diff options
1051 files changed, 46126 insertions, 12479 deletions
diff --git a/Documentation/ABI/testing/sysfs-platform-lg-laptop b/Documentation/ABI/testing/sysfs-platform-lg-laptop new file mode 100644 index 000000000000..cf47749b19df --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-lg-laptop @@ -0,0 +1,35 @@ +What: /sys/devices/platform/lg-laptop/reader_mode +Date: October 2018 +KernelVersion: 4.20 +Contact: "Matan Ziv-Av <matan@svgalib.org> +Description: + Control reader mode. 1 means on, 0 means off. + +What: /sys/devices/platform/lg-laptop/fn_lock +Date: October 2018 +KernelVersion: 4.20 +Contact: "Matan Ziv-Av <matan@svgalib.org> +Description: + Control FN lock mode. 1 means on, 0 means off. + +What: /sys/devices/platform/lg-laptop/battery_care_limit +Date: October 2018 +KernelVersion: 4.20 +Contact: "Matan Ziv-Av <matan@svgalib.org> +Description: + Maximal battery charge level. Accepted values are 80 or 100. + +What: /sys/devices/platform/lg-laptop/fan_mode +Date: October 2018 +KernelVersion: 4.20 +Contact: "Matan Ziv-Av <matan@svgalib.org> +Description: + Control fan mode. 1 for performance mode, 0 for silent mode. + +What: /sys/devices/platform/lg-laptop/usb_charge +Date: October 2018 +KernelVersion: 4.20 +Contact: "Matan Ziv-Av <matan@svgalib.org> +Description: + Control USB port charging when device is turned off. + 1 means on, 0 means off. diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 8384c681a4b2..476722b7b636 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1879,10 +1879,8 @@ following two functions. wbc_init_bio(@wbc, @bio) Should be called for each bio carrying writeback data and - associates the bio with the inode's owner cgroup and the - corresponding request queue. This must be called after - a queue (device) has been associated with the bio and - before submission. + associates the bio with the inode's owner cgroup. Can be + called anytime between bio allocation and submission. wbc_account_io(@wbc, @page, @bytes) Should be called for each data segment being written out. @@ -1901,7 +1899,7 @@ the configuration, the bio may be executed at a lower priority and if the writeback session is holding shared resources, e.g. a journal entry, may lead to priority inversion. There is no one easy solution for the problem. Filesystems can try to work around specific problem -cases by skipping wbc_init_bio() or using bio_associate_create_blkg() +cases by skipping wbc_init_bio() or using bio_associate_blkcg() directly. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b90fe3b6bc6c..81d1d5a74728 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1068,7 +1068,7 @@ earlyprintk=serial[,0x...[,baudrate]] earlyprintk=ttySn[,baudrate] earlyprintk=dbgp[debugController#] - earlyprintk=pciserial,bus:device.function[,baudrate] + earlyprintk=pciserial[,force],bus:device.function[,baudrate] earlyprintk=xdbc[xhciController#] earlyprintk is useful when the kernel crashes before @@ -1100,6 +1100,10 @@ The sclp output can only be used on s390. + The optional "force" to "pciserial" enables use of a + PCI device even when its classcode is not of the + UART class. + edac_report= [HW,EDAC] Control how to report EDAC event Format: {"on" | "off" | "force"} on: enable EDAC to report H/W event. May be overridden diff --git a/Documentation/crypto/asymmetric-keys.txt b/Documentation/crypto/asymmetric-keys.txt index 5969bf42562a..8763866b11cf 100644 --- a/Documentation/crypto/asymmetric-keys.txt +++ b/Documentation/crypto/asymmetric-keys.txt @@ -183,6 +183,10 @@ and looks like the following: void (*describe)(const struct key *key, struct seq_file *m); void (*destroy)(void *payload); + int (*query)(const struct kernel_pkey_params *params, + struct kernel_pkey_query *info); + int (*eds_op)(struct kernel_pkey_params *params, + const void *in, void *out); int (*verify_signature)(const struct key *key, const struct public_key_signature *sig); }; @@ -207,12 +211,22 @@ There are a number of operations defined by the subtype: asymmetric key will look after freeing the fingerprint and releasing the reference on the subtype module. - (3) verify_signature(). + (3) query(). - Optional. These are the entry points for the key usage operations. - Currently there is only the one defined. If not set, the caller will be - given -ENOTSUPP. The subtype may do anything it likes to implement an - operation, including offloading to hardware. + Mandatory. This is a function for querying the capabilities of a key. + + (4) eds_op(). + + Optional. This is the entry point for the encryption, decryption and + signature creation operations (which are distinguished by the operation ID + in the parameter struct). The subtype may do anything it likes to + implement an operation, including offloading to hardware. + + (5) verify_signature(). + + Optional. This is the entry point for signature verification. The + subtype may do anything it likes to implement an operation, including + offloading to hardware. ========================== @@ -234,6 +248,8 @@ Examples of blob formats for which parsers could be implemented include: - X.509 ASN.1 stream. - Pointer to TPM key. - Pointer to UEFI key. + - PKCS#8 private key [RFC 5208]. + - PKCS#5 encrypted private key [RFC 2898]. During key instantiation each parser in the list is tried until one doesn't return -EBADMSG. diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt index 9b5685a1d15d..84262cdb8d29 100644 --- a/Documentation/devicetree/bindings/arm/cpu-capacity.txt +++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt @@ -59,9 +59,11 @@ mhz values (normalized w.r.t. the highest value found while parsing the DT). =========================================== Example 1 (ARM 64-bit, 6-cpu system, two clusters): -capacities-dmips-mhz are scaled w.r.t. 1024 (cpu@0 and cpu@1) -supposing cluster0@max-freq=1100 and custer1@max-freq=850, -final capacities are 1024 for cluster0 and 446 for cluster1 +The capacities-dmips-mhz or DMIPS/MHz values (scaled to 1024) +are 1024 and 578 for cluster0 and cluster1. Further normalization +is done by the operating system based on cluster0@max-freq=1100 and +custer1@max-freq=850, final capacities are 1024 for cluster0 and +446 for cluster1 (576*850/1100). cpus { #address-cells = <2>; diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt index 1333db9acfee..7f696362a4a1 100644 --- a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt +++ b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt @@ -21,10 +21,29 @@ PROPERTIES the register region. An optional second element specifies the base address and size of the alias register region. +- clocks: + Usage: required + Value type: <prop-encoded-array> + Definition: reference to the pll parents. + +- clock-names: + Usage: required + Value type: <stringlist> + Definition: must be "pll8_vote", "pxo". + +- clock-output-names: + Usage: optional + Value type: <string> + Definition: Name of the output clock. Typically acpuX_aux where X is a + CPU number starting at 0. + Example: clock-controller@2088000 { compatible = "qcom,kpss-acc-v2"; reg = <0x02088000 0x1000>, <0x02008000 0x1000>; + clocks = <&gcc PLL8_VOTE>, <&gcc PXO_SRC>; + clock-names = "pll8_vote", "pxo"; + clock-output-names = "acpu0_aux"; }; diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt new file mode 100644 index 000000000000..e628758950e1 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt @@ -0,0 +1,44 @@ +Krait Processor Sub-system (KPSS) Global Clock Controller (GCC) + +PROPERTIES + +- compatible: + Usage: required + Value type: <string> + Definition: should be one of the following. The generic compatible + "qcom,kpss-gcc" should also be included. + "qcom,kpss-gcc-ipq8064", "qcom,kpss-gcc" + "qcom,kpss-gcc-apq8064", "qcom,kpss-gcc" + "qcom,kpss-gcc-msm8974", "qcom,kpss-gcc" + "qcom,kpss-gcc-msm8960", "qcom,kpss-gcc" + +- reg: + Usage: required + Value type: <prop-encoded-array> + Definition: base address and size of the register region + +- clocks: + Usage: required + Value type: <prop-encoded-array> + Definition: reference to the pll parents. + +- clock-names: + Usage: required + Value type: <stringlist> + Definition: must be "pll8_vote", "pxo". + +- clock-output-names: + Usage: required + Value type: <string> + Definition: Name of the output clock. Typically acpu_l2_aux indicating + an L2 cache auxiliary clock. + +Example: + + l2cc: clock-controller@2011000 { + compatible = "qcom,kpss-gcc-ipq8064", "qcom,kpss-gcc"; + reg = <0x2011000 0x1000>; + clocks = <&gcc PLL8_VOTE>, <&gcc PXO_SRC>; + clock-names = "pll8_vote", "pxo"; + clock-output-names = "acpu_l2_aux"; + }; diff --git a/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt b/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt index d1e60d297387..2ef86ae96df8 100644 --- a/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt +++ b/Documentation/devicetree/bindings/clock/actions,owl-cmu.txt @@ -13,6 +13,7 @@ Required Properties: region. - clocks: Reference to the parent clocks ("hosc", "losc") - #clock-cells: should be 1. +- #reset-cells: should be 1. Each clock is assigned an identifier, and client nodes can use this identifier to specify the clock which they consume. @@ -36,6 +37,7 @@ Example: Clock Management Unit node: reg = <0x0 0xe0160000 0x0 0x1000>; clocks = <&hosc>, <&losc>; #clock-cells = <1>; + #reset-cells = <1>; }; Example: UART controller node that consumes clock generated by the clock diff --git a/Documentation/devicetree/bindings/clock/at91-clock.txt b/Documentation/devicetree/bindings/clock/at91-clock.txt index 8f8f95056f3d..e9f70fcdfe80 100644 --- a/Documentation/devicetree/bindings/clock/at91-clock.txt +++ b/Documentation/devicetree/bindings/clock/at91-clock.txt @@ -4,6 +4,8 @@ This binding uses the common clock binding[1]. [1] Documentation/devicetree/bindings/clock/clock-bindings.txt +Slow Clock controller: + Required properties: - compatible : shall be one of the following: "atmel,at91sam9x5-sckc" or @@ -16,84 +18,6 @@ Required properties: "atmel,at91sam9x5-clk-slow-rc-osc": at91 internal slow RC oscillator - - "atmel,<chip>-pmc": - at91 PMC (Power Management Controller) - All at91 specific clocks (clocks defined below) must be child - node of the PMC node. - <chip> can be: at91rm9200, at91sam9260, at91sam9261, - at91sam9263, at91sam9g45, at91sam9n12, at91sam9rl, at91sam9x5, - sama5d2, sama5d3 or sama5d4. - - "atmel,at91sam9x5-clk-slow" (under sckc node) - or - "atmel,at91sam9260-clk-slow" (under pmc node): - at91 slow clk - - "atmel,at91rm9200-clk-main-osc" - "atmel,at91sam9x5-clk-main-rc-osc" - at91 main clk sources - - "atmel,at91sam9x5-clk-main" - "atmel,at91rm9200-clk-main": - at91 main clock - - "atmel,at91rm9200-clk-master" or - "atmel,at91sam9x5-clk-master": - at91 master clock - - "atmel,at91sam9x5-clk-peripheral" or - "atmel,at91rm9200-clk-peripheral": - at91 peripheral clocks - - "atmel,at91rm9200-clk-pll" or - "atmel,at91sam9g45-clk-pll" or - "atmel,at91sam9g20-clk-pllb" or - "atmel,sama5d3-clk-pll": - at91 pll clocks - - "atmel,at91sam9x5-clk-plldiv": - at91 plla divisor - - "atmel,at91rm9200-clk-programmable" or - "atmel,at91sam9g45-clk-programmable" or - "atmel,at91sam9x5-clk-programmable": - at91 programmable clocks - - "atmel,at91sam9x5-clk-smd": - at91 SMD (Soft Modem) clock - - "atmel,at91rm9200-clk-system": - at91 system clocks - - "atmel,at91rm9200-clk-usb" or - "atmel,at91sam9x5-clk-usb" or - "atmel,at91sam9n12-clk-usb": - at91 usb clock - - "atmel,at91sam9x5-clk-utmi": - at91 utmi clock - - "atmel,sama5d4-clk-h32mx": - at91 h32mx clock - - "atmel,sama5d2-clk-generated": - at91 generated clock - - "atmel,sama5d2-clk-audio-pll-frac": - at91 audio fractional pll - - "atmel,sama5d2-clk-audio-pll-pad": - at91 audio pll CLK_AUDIO output pin - - "atmel,sama5d2-clk-audio-pll-pmc" - at91 audio pll output on AUDIOPLLCLK that feeds the PMC - and can be used by peripheral clock or generic clock - - "atmel,sama5d2-clk-i2s-mux" (under pmc node): - at91 I2S clock source selection - -Required properties for SCKC node: - reg : defines the IO memory reserved for the SCKC. - #size-cells : shall be 0 (reg is used to encode clk id). - #address-cells : shall be 1 (reg is used to encode clk id). @@ -109,428 +33,30 @@ For example: /* put at91 slow clocks here */ }; +Power Management Controller (PMC): -Required properties for internal slow RC oscillator: -- #clock-cells : from common clock binding; shall be set to 0. -- clock-frequency : define the internal RC oscillator frequency. - -Optional properties: -- clock-accuracy : define the internal RC oscillator accuracy. - -For example: - slow_rc_osc: slow_rc_osc { - compatible = "atmel,at91sam9x5-clk-slow-rc-osc"; - clock-frequency = <32768>; - clock-accuracy = <50000000>; - }; - -Required properties for slow oscillator: -- #clock-cells : from common clock binding; shall be set to 0. -- clocks : shall encode the main osc source clk sources (see atmel datasheet). +Required properties: +- compatible : shall be "atmel,<chip>-pmc", "syscon": + <chip> can be: at91rm9200, at91sam9260, at91sam9261, + at91sam9263, at91sam9g45, at91sam9n12, at91sam9rl, at91sam9g15, + at91sam9g25, at91sam9g35, at91sam9x25, at91sam9x35, at91sam9x5, + sama5d2, sama5d3 or sama5d4. +- #clock-cells : from common clock binding; shall be set to 2. The first entry + is the type of the clock (core, system, peripheral or generated) and the + second entry its index as provided by the datasheet +- clocks : Must contain an entry for each entry in clock-names. +- clock-names: Must include the following entries: "slow_clk", "main_xtal" Optional properties: - atmel,osc-bypass : boolean property. Set this when a clock signal is directly provided on XIN. For example: - slow_osc: slow_osc { - compatible = "atmel,at91rm9200-clk-slow-osc"; - #clock-cells = <0>; - clocks = <&slow_xtal>; - }; - -Required properties for slow clock: -- #clock-cells : from common clock binding; shall be set to 0. -- clocks : shall encode the slow clk sources (see atmel datasheet). - -For example: - clk32k: slck { - compatible = "atmel,at91sam9x5-clk-slow"; - #clock-cells = <0>; - clocks = <&slow_rc_osc &slow_osc>; - }; - -Required properties for PMC node: -- reg : defines the IO memory reserved for the PMC. -- #size-cells : shall be 0 (reg is used to encode clk id). -- #address-cells : shall be 1 (reg is used to encode clk id). -- interrupts : shall be set to PMC interrupt line. -- interrupt-controller : tell that the PMC is an interrupt controller. -- #interrupt-cells : must be set to 1. The first cell encodes the interrupt id, - and reflect the bit position in the PMC_ER/DR/SR registers. - You can use the dt macros defined in dt-bindings/clock/at91.h. - 0 (AT91_PMC_MOSCS) -> main oscillator ready - 1 (AT91_PMC_LOCKA) -> PLL A ready - 2 (AT91_PMC_LOCKB) -> PLL B ready - 3 (AT91_PMC_MCKRDY) -> master clock ready - 6 (AT91_PMC_LOCKU) -> UTMI PLL clock ready - 8 .. 15 (AT91_PMC_PCKRDY(id)) -> programmable clock ready - 16 (AT91_PMC_MOSCSELS) -> main oscillator selected - 17 (AT91_PMC_MOSCRCS) -> RC main oscillator stabilized - 18 (AT91_PMC_CFDEV) -> clock failure detected - -For example: - pmc: pmc@fffffc00 { - compatible = "atmel,sama5d3-pmc"; - interrupts = <1 4 7>; - interrupt-controller; - #interrupt-cells = <2>; - #size-cells = <0>; - #address-cells = <1>; - - /* put at91 clocks here */ - }; - -Required properties for main clock internal RC oscillator: -- interrupts : shall be set to "<0>". -- clock-frequency : define the internal RC oscillator frequency. - -Optional properties: -- clock-accuracy : define the internal RC oscillator accuracy. - -For example: - main_rc_osc: main_rc_osc { - compatible = "atmel,at91sam9x5-clk-main-rc-osc"; - interrupt-parent = <&pmc>; - interrupts = <0>; - clock-frequency = <12000000>; - clock-accuracy = <50000000>; - }; - -Required properties for main clock oscillator: -- interrupts : shall be set to "<0>". -- #clock-cells : from common clock binding; shall be set to 0. -- clocks : shall encode the main osc source clk sources (see atmel datasheet). - -Optional properties: -- atmel,osc-bypass : boolean property. Specified if a clock signal is provided - on XIN. - - clock signal is directly provided on XIN pin. - -For example: - main_osc: main_osc { - compatible = "atmel,at91rm9200-clk-main-osc"; - interrupt-parent = <&pmc>; - interrupts = <0>; - #clock-cells = <0>; - clocks = <&main_xtal>; - }; - -Required properties for main clock: -- interrupts : shall be set to "<0>". -- #clock-cells : from common clock binding; shall be set to 0. -- clocks : shall encode the main clk sources (see atmel datasheet). - -For example: - main: mainck { - compatible = "atmel,at91sam9x5-clk-main"; - interrupt-parent = <&pmc>; - interrupts = <0>; - #clock-cells = <0>; - clocks = <&main_rc_osc &main_osc>; - }; - -Required properties for master clock: -- interrupts : shall be set to "<3>". -- #clock-cells : from common clock binding; shall be set to 0. -- clocks : shall be the master clock sources (see atmel datasheet) phandles. - e.g. "<&ck32k>, <&main>, <&plla>, <&pllb>". -- atmel,clk-output-range : minimum and maximum clock frequency (two u32 - fields). - e.g. output = <0 133000000>; <=> 0 to 133MHz. -- atmel,clk-divisors : master clock divisors table (four u32 fields). - 0 <=> reserved value. - e.g. divisors = <1 2 4 6>; -- atmel,master-clk-have-div3-pres : some SoC use the reserved value 7 in the - PRES field as CLOCK_DIV3 (e.g sam9x5). - -For example: - mck: mck { - compatible = "atmel,at91rm9200-clk-master"; - interrupt-parent = <&pmc>; - interrupts = <3>; - #clock-cells = <0>; - atmel,clk-output-range = <0 133000000>; - atmel,clk-divisors = <1 2 4 0>; - }; - -Required properties for peripheral clocks: -- #size-cells : shall be 0 (reg is used to encode clk id). -- #address-cells : shall be 1 (reg is used to encode clk id). -- clocks : shall be the master clock phandle. - e.g. clocks = <&mck>; -- name: device tree node describing a specific peripheral clock. - * #clock-cells : from common clock binding; shall be set to 0. - * reg: peripheral id. See Atmel's datasheets to get a full - list of peripheral ids. - * atmel,clk-output-range : minimum and maximum clock frequency - (two u32 fields). Only valid on at91sam9x5-clk-peripheral - compatible IPs. - -For example: - periph: periphck { - compatible = "atmel,at91sam9x5-clk-peripheral"; - #size-cells = <0>; - #address-cells = <1>; - clocks = <&mck>; - - ssc0_clk { - #clock-cells = <0>; - reg = <2>; - atmel,clk-output-range = <0 133000000>; - }; - - usart0_clk { - #clock-cells = <0>; - reg = <3>; - atmel,clk-output-range = <0 66000000>; - }; - }; - - -Required properties for pll clocks: -- interrupts : shall be set to "<1>". -- #clock-cells : from common clock binding; shall be set to 0. -- clocks : shall be the main clock phandle. -- reg : pll id. - 0 -> PLL A - 1 -> PLL B -- atmel,clk-input-range : minimum and maximum source clock frequency (two u32 - fields). - e.g. input = <1 32000000>; <=> 1 to 32MHz. -- #atmel,pll-clk-output-range-cells : number of cells reserved for pll output - range description. Sould be set to 2, 3 - or 4. - * 1st and 2nd cells represent the frequency range (min-max). - * 3rd cell is optional and represents the OUT field value for the given - range. - * 4th cell is optional and represents the ICPLL field (PLLICPR - register) -- atmel,pll-clk-output-ranges : pll output frequency ranges + optional parameter - depending on #atmel,pll-output-range-cells - property value. - -For example: - plla: pllack { - compatible = "atmel,at91sam9g45-clk-pll"; - interrupt-parent = <&pmc>; - interrupts = <1>; - #clock-cells = <0>; - clocks = <&main>; - reg = <0>; - atmel,clk-input-range = <2000000 32000000>; - #atmel,pll-clk-output-range-cells = <4>; - atmel,pll-clk-output-ranges = <74500000 800000000 0 0 - 69500000 750000000 1 0 - 64500000 700000000 2 0 - 59500000 650000000 3 0 - 54500000 600000000 0 1 - 49500000 550000000 1 1 - 44500000 500000000 2 1 - 40000000 450000000 3 1>; - }; - -Required properties for plldiv clocks (plldiv = pll / 2): -- #clock-cells : from common clock binding; shall be set to 0. -- clocks : shall be the plla clock phandle. - -The pll divisor is equal to 2 and cannot be changed. - -For example: - plladiv: plladivck { - compatible = "atmel,at91sam9x5-clk-plldiv"; - #clock-cells = <0>; - clocks = <&plla>; - }; - -Required properties for programmable clocks: -- #size-cells : shall be 0 (reg is used to encode clk id). -- #address-cells : shall be 1 (reg is used to encode clk id). -- clocks : shall be the programmable clock source phandles. - e.g. clocks = <&clk32k>, <&main>, <&plla>, <&pllb>; -- name: device tree node describing a specific prog clock. - * #clock-cells : from common clock binding; shall be set to 0. - * reg : programmable clock id (register offset from PCKx - register). - * interrupts : shall be set to "<(8 + id)>". - -For example: - prog: progck { - compatible = "atmel,at91sam9g45-clk-programmable"; - #size-cells = <0>; - #address-cells = <1>; - interrupt-parent = <&pmc>; - clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>; - - prog0 { - #clock-cells = <0>; - reg = <0>; - interrupts = <8>; - }; - - prog1 { - #clock-cells = <0>; - reg = <1>; - interrupts = <9>; - }; - }; - - -Required properties for smd clock: -- #clock-cells : from common clock binding; shall be set to 0. -- clocks : shall be the smd clock source phandles. - e.g. clocks = <&plladiv>, <&utmi>; - -For example: - smd: smdck { - compatible = "atmel,at91sam9x5-clk-smd"; - #clock-cells = <0>; - clocks = <&plladiv>, <&utmi>; - }; - -Required properties for system clocks: -- #size-cells : shall be 0 (reg is used to encode clk id). -- #address-cells : shall be 1 (reg is used to encode clk id). -- name: device tree node describing a specific system clock. - * #clock-cells : from common clock binding; shall be set to 0. - * reg: system clock id (bit position in SCER/SCDR/SCSR registers). - See Atmel's datasheet to get a full list of system clock ids. - -For example: - system: systemck { - compatible = "atmel,at91rm9200-clk-system"; - #address-cells = <1>; - #size-cells = <0>; - - ddrck { - #clock-cells = <0>; - reg = <2>; - clocks = <&mck>; - }; - - uhpck { - #clock-cells = <0>; - reg = <6>; - clocks = <&usb>; - }; - - udpck { - #clock-cells = <0>; - reg = <7>; - clocks = <&usb>; - }; - }; - - -Required properties for usb clock: -- #clock-cells : from common clock binding; shall be set to 0. -- clocks : shall be the smd clock source phandles. - e.g. clocks = <&pllb>; -- atmel,clk-divisors (only available for "atmel,at91rm9200-clk-usb"): - usb clock divisor table. - e.g. divisors = <1 2 4 0>; - -For example: - usb: usbck { - compatible = "atmel,at91sam9x5-clk-usb"; - #clock-cells = <0>; - clocks = <&plladiv>, <&utmi>; - }; - - usb: usbck { - compatible = "atmel,at91rm9200-clk-usb"; - #clock-cells = <0>; - clocks = <&pllb>; - atmel,clk-divisors = <1 2 4 0>; - }; - - -Required properties for utmi clock: -- interrupts : shall be set to "<AT91_PMC_LOCKU IRQ_TYPE_LEVEL_HIGH>". -- #clock-cells : from common clock binding; shall be set to 0. -- clocks : shall be the main clock source phandle. - -For example: - utmi: utmick { - compatible = "atmel,at91sam9x5-clk-utmi"; - interrupt-parent = <&pmc>; - interrupts = <AT91_PMC_LOCKU IRQ_TYPE_LEVEL_HIGH>; - #clock-cells = <0>; - clocks = <&main>; - }; - -Required properties for 32 bits bus Matrix clock (h32mx clock): -- #clock-cells : from common clock binding; shall be set to 0. -- clocks : shall be the master clock source phandle. - -For example: - h32ck: h32mxck { - #clock-cells = <0>; - compatible = "atmel,sama5d4-clk-h32mx"; - clocks = <&mck>; - }; - -Required properties for generated clocks: -- #size-cells : shall be 0 (reg is used to encode clk id). -- #address-cells : shall be 1 (reg is used to encode clk id). -- clocks : shall be the generated clock source phandles. - e.g. clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>, <&audio_pll_pmc>; -- name: device tree node describing a specific generated clock. - * #clock-cells : from common clock binding; shall be set to 0. - * reg: peripheral id. See Atmel's datasheets to get a full - list of peripheral ids. - * atmel,clk-output-range : minimum and maximum clock frequency - (two u32 fields). - -For example: - gck { - compatible = "atmel,sama5d2-clk-generated"; - #address-cells = <1>; - #size-cells = <0>; - clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>, <&audio_pll_pmc>; - - tcb0_gclk: tcb0_gclk { - #clock-cells = <0>; - reg = <35>; - atmel,clk-output-range = <0 83000000>; - }; - - pwm_gclk: pwm_gclk { - #clock-cells = <0>; - reg = <38>; - atmel,clk-output-range = <0 83000000>; - }; - }; - -Required properties for I2S mux clocks: -- #size-cells : shall be 0 (reg is used to encode I2S bus id). -- #address-cells : shall be 1 (reg is used to encode I2S bus id). -- name: device tree node describing a specific mux clock. - * #clock-cells : from common clock binding; shall be set to 0. - * clocks : shall be the mux clock parent phandles; shall be 2 phandles: - peripheral and generated clock; the first phandle shall belong to the - peripheral clock and the second one shall belong to the generated - clock; "clock-indices" property can be user to specify - the correct order. - * reg: I2S bus id of the corresponding mux clock. - e.g. reg = <0>; for i2s0, reg = <1>; for i2s1 - -For example: - i2s_clkmux { - compatible = "atmel,sama5d2-clk-i2s-mux"; - #address-cells = <1>; - #size-cells = <0>; - - i2s0muxck: i2s0_muxclk { - clocks = <&i2s0_clk>, <&i2s0_gclk>; - #clock-cells = <0>; - reg = <0>; - }; - - i2s1muxck: i2s1_muxclk { - clocks = <&i2s1_clk>, <&i2s1_gclk>; - #clock-cells = <0>; - reg = <1>; - }; + pmc: pmc@f0018000 { + compatible = "atmel,sama5d4-pmc", "syscon"; + reg = <0xf0018000 0x120>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; + #clock-cells = <2>; + clocks = <&clk32k>, <&main_xtal>; + clock-names = "slow_clk", "main_xtal"; }; diff --git a/Documentation/devicetree/bindings/clock/hi3670-clock.txt b/Documentation/devicetree/bindings/clock/hi3670-clock.txt new file mode 100644 index 000000000000..66f3697eca78 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/hi3670-clock.txt @@ -0,0 +1,43 @@ +* Hisilicon Hi3670 Clock Controller + +The Hi3670 clock controller generates and supplies clock to various +controllers within the Hi3670 SoC. + +Required Properties: + +- compatible: the compatible should be one of the following strings to + indicate the clock controller functionality. + + - "hisilicon,hi3670-crgctrl" + - "hisilicon,hi3670-pctrl" + - "hisilicon,hi3670-pmuctrl" + - "hisilicon,hi3670-sctrl" + - "hisilicon,hi3670-iomcu" + - "hisilicon,hi3670-media1-crg" + - "hisilicon,hi3670-media2-crg" + +- reg: physical base address of the controller and length of memory mapped + region. + +- #clock-cells: should be 1. + +Each clock is assigned an identifier and client nodes use this identifier +to specify the clock which they consume. + +All these identifier could be found in <dt-bindings/clock/hi3670-clock.h>. + +Examples: + crg_ctrl: clock-controller@fff35000 { + compatible = "hisilicon,hi3670-crgctrl", "syscon"; + reg = <0x0 0xfff35000 0x0 0x1000>; + #clock-cells = <1>; + }; + + uart0: serial@fdf02000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x0 0xfdf02000 0x0 0x1000>; + interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&crg_ctrl HI3670_CLK_GATE_UART0>, + <&crg_ctrl HI3670_PCLK>; + clock-names = "uartclk", "apb_pclk"; + }; diff --git a/Documentation/devicetree/bindings/clock/ingenic,cgu.txt b/Documentation/devicetree/bindings/clock/ingenic,cgu.txt index f8d4134ae409..ba5a442026b7 100644 --- a/Documentation/devicetree/bindings/clock/ingenic,cgu.txt +++ b/Documentation/devicetree/bindings/clock/ingenic,cgu.txt @@ -6,8 +6,11 @@ to provide many different clock signals derived from only 2 external source clocks. Required properties: -- compatible : Should be "ingenic,<soctype>-cgu". - For example "ingenic,jz4740-cgu" or "ingenic,jz4780-cgu". +- compatible : Should be one of: + * ingenic,jz4740-cgu + * ingenic,jz4725b-cgu + * ingenic,jz4770-cgu + * ingenic,jz4780-cgu - reg : The address & length of the CGU registers. - clocks : List of phandle & clock specifiers for clocks external to the CGU. Two such external clocks should be specified - first the external crystal diff --git a/Documentation/devicetree/bindings/clock/qcom,camcc.txt b/Documentation/devicetree/bindings/clock/qcom,camcc.txt new file mode 100644 index 000000000000..c5eb6694fda9 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,camcc.txt @@ -0,0 +1,18 @@ +Qualcomm Camera Clock & Reset Controller Binding +------------------------------------------------ + +Required properties : +- compatible : shall contain "qcom,sdm845-camcc". +- reg : shall contain base register location and length. +- #clock-cells : from common clock binding, shall contain 1. +- #reset-cells : from common reset binding, shall contain 1. +- #power-domain-cells : from generic power domain binding, shall contain 1. + +Example: + camcc: clock-controller@ad00000 { + compatible = "qcom,sdm845-camcc"; + reg = <0xad00000 0x10000>; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt index 664ea1fd6c76..52d9345c9927 100644 --- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt @@ -19,6 +19,9 @@ Required properties : "qcom,gcc-msm8996" "qcom,gcc-msm8998" "qcom,gcc-mdm9615" + "qcom,gcc-qcs404" + "qcom,gcc-sdm630" + "qcom,gcc-sdm660" "qcom,gcc-sdm845" - reg : shall contain base register location and length diff --git a/Documentation/devicetree/bindings/clock/qcom,hfpll.txt b/Documentation/devicetree/bindings/clock/qcom,hfpll.txt new file mode 100644 index 000000000000..ec02a024424c --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,hfpll.txt @@ -0,0 +1,60 @@ +High-Frequency PLL (HFPLL) + +PROPERTIES + +- compatible: + Usage: required + Value type: <string>: + shall contain only one of the following. The generic + compatible "qcom,hfpll" should be also included. + + "qcom,hfpll-ipq8064", "qcom,hfpll" + "qcom,hfpll-apq8064", "qcom,hfpll" + "qcom,hfpll-msm8974", "qcom,hfpll" + "qcom,hfpll-msm8960", "qcom,hfpll" + +- reg: + Usage: required + Value type: <prop-encoded-array> + Definition: address and size of HPLL registers. An optional second + element specifies the address and size of the alias + register region. + +- clocks: + Usage: required + Value type: <prop-encoded-array> + Definition: reference to the xo clock. + +- clock-names: + Usage: required + Value type: <stringlist> + Definition: must be "xo". + +- clock-output-names: + Usage: required + Value type: <string> + Definition: Name of the PLL. Typically hfpllX where X is a CPU number + starting at 0. Otherwise hfpll_Y where Y is more specific + such as "l2". + +Example: + +1) An HFPLL for the L2 cache. + + clock-controller@f9016000 { + compatible = "qcom,hfpll-ipq8064", "qcom,hfpll"; + reg = <0xf9016000 0x30>; + clocks = <&xo_board>; + clock-names = "xo"; + clock-output-names = "hfpll_l2"; + }; + +2) An HFPLL for CPU0. This HFPLL has the alias register region. + + clock-controller@f908a000 { + compatible = "qcom,hfpll-ipq8064", "qcom,hfpll"; + reg = <0xf908a000 0x30>, <0xf900a000 0x30>; + clocks = <&xo_board>; + clock-names = "xo"; + clock-output-names = "hfpll0"; + }; diff --git a/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt b/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt new file mode 100644 index 000000000000..030ba60dab08 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt @@ -0,0 +1,34 @@ +Krait Clock Controller + +PROPERTIES + +- compatible: + Usage: required + Value type: <string> + Definition: must be one of: + "qcom,krait-cc-v1" + "qcom,krait-cc-v2" + +- #clock-cells: + Usage: required + Value type: <u32> + Definition: must be 1 + +- clocks: + Usage: required + Value type: <prop-encoded-array> + Definition: reference to the clock parents of hfpll, secondary muxes. + +- clock-names: + Usage: required + Value type: <stringlist> + Definition: must be "hfpll0", "hfpll1", "acpu0_aux", "acpu1_aux", "qsb". + +Example: + + kraitcc: clock-controller { + compatible = "qcom,krait-cc-v1"; + clocks = <&hfpll0>, <&hfpll1>, <&acpu0_aux>, <&acpu1_aux>, <qsb>; + clock-names = "hfpll0", "hfpll1", "acpu0_aux", "acpu1_aux", "qsb"; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt index db542abadb75..916a601b76a7 100644 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt @@ -13,9 +13,13 @@ They provide the following functionalities: Required Properties: - compatible: Must be one of: + - "renesas,r7s9210-cpg-mssr" for the r7s9210 SoC (RZ/A2) - "renesas,r8a7743-cpg-mssr" for the r8a7743 SoC (RZ/G1M) + - "renesas,r8a7744-cpg-mssr" for the r8a7744 SoC (RZ/G1N) - "renesas,r8a7745-cpg-mssr" for the r8a7745 SoC (RZ/G1E) - "renesas,r8a77470-cpg-mssr" for the r8a77470 SoC (RZ/G1C) + - "renesas,r8a774a1-cpg-mssr" for the r8a774a1 SoC (RZ/G2M) + - "renesas,r8a774c0-cpg-mssr" for the r8a774c0 SoC (RZ/G2E) - "renesas,r8a7790-cpg-mssr" for the r8a7790 SoC (R-Car H2) - "renesas,r8a7791-cpg-mssr" for the r8a7791 SoC (R-Car M2-W) - "renesas,r8a7792-cpg-mssr" for the r8a7792 SoC (R-Car V2H) @@ -35,12 +39,13 @@ Required Properties: - clocks: References to external parent clocks, one entry for each entry in clock-names - clock-names: List of external parent clock names. Valid names are: - - "extal" (r8a7743, r8a7745, r8a77470, r8a7790, r8a7791, r8a7792, - r8a7793, r8a7794, r8a7795, r8a7796, r8a77965, r8a77970, - r8a77980, r8a77990, r8a77995) - - "extalr" (r8a7795, r8a7796, r8a77965, r8a77970, r8a77980) - - "usb_extal" (r8a7743, r8a7745, r8a77470, r8a7790, r8a7791, r8a7793, - r8a7794) + - "extal" (r7s9210, r8a7743, r8a7744, r8a7745, r8a77470, r8a774a1, + r8a774c0, r8a7790, r8a7791, r8a7792, r8a7793, r8a7794, + r8a7795, r8a7796, r8a77965, r8a77970, r8a77980, r8a77990, + r8a77995) + - "extalr" (r8a774a1, r8a7795, r8a7796, r8a77965, r8a77970, r8a77980) + - "usb_extal" (r8a7743, r8a7744, r8a7745, r8a77470, r8a7790, r8a7791, + r8a7793, r8a7794) - #clock-cells: Must be 2 - For CPG core clocks, the two clock specifier cells must be "CPG_CORE" diff --git a/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt b/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt index a9b35265fa13..513f03466aba 100644 --- a/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt +++ b/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt @@ -1,20 +1,22 @@ -Innolux TV123WAM 12.3 inch eDP 2K display panel +Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel This binding is compatible with the simple-panel binding, which is specified in simple-panel.txt in this directory. Required properties: -- compatible: should be "innolux,tv123wam" +- compatible: should be "innolux,p120zdg-bf1" - power-supply: regulator to provide the supply voltage Optional properties: - enable-gpios: GPIO pin to enable or disable the panel - backlight: phandle of the backlight device attached to the panel +- no-hpd: If HPD isn't hooked up; add this property. Example: panel_edp: panel-edp { - compatible = "innolux,tv123wam"; + compatible = "innolux,p120zdg-bf1"; enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>; power-supply = <&pm8916_l2>; backlight = <&backlight>; + no-hpd; }; diff --git a/Documentation/devicetree/bindings/display/panel/simple-panel.txt b/Documentation/devicetree/bindings/display/panel/simple-panel.txt index 45a457ad38f0..b2b872c710f2 100644 --- a/Documentation/devicetree/bindings/display/panel/simple-panel.txt +++ b/Documentation/devicetree/bindings/display/panel/simple-panel.txt @@ -11,6 +11,9 @@ Optional properties: - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing - enable-gpios: GPIO pin to enable or disable the panel - backlight: phandle of the backlight device attached to the panel +- no-hpd: This panel is supposed to communicate that it's ready via HPD + (hot plug detect) signal, but the signal isn't hooked up so we should + hardcode the max delay from the panel spec when powering up the panel. Example: diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt index 091c8dfd3229..b245363d6d60 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt @@ -3,6 +3,7 @@ Required properties: - compatible : - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc + - "fsl,imx8qxp-lpi2c" for LPI2C compatible with the one integrated on i.MX8QXP soc - reg : address and length of the lpi2c master registers - interrupts : lpi2c interrupt - clocks : lpi2c clock specifier diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt index 06a363d9ccef..b9a1d7402128 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt +++ b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt @@ -7,6 +7,7 @@ Required properties: for da850 - compatible = "ti,da850-ecap", "ti,am3352-ecap", "ti,am33xx-ecap"; for dra746 - compatible = "ti,dra746-ecap", "ti,am3352-ecap"; for 66ak2g - compatible = "ti,k2g-ecap", "ti,am3352-ecap"; + for am654 - compatible = "ti,am654-ecap", "ti,am3352-ecap"; - #pwm-cells: should be 3. See pwm.txt in this directory for a description of the cells format. The PWM channel index ranges from 0 to 4. The only third cell flag supported by this binding is PWM_POLARITY_INVERTED. diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt index e1ef6afbe3a7..7f31fe7e2093 100644 --- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt +++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt @@ -3,7 +3,9 @@ Required Properties: - compatible: should be "renesas,pwm-rcar" and one of the following. - "renesas,pwm-r8a7743": for RZ/G1M + - "renesas,pwm-r8a7744": for RZ/G1N - "renesas,pwm-r8a7745": for RZ/G1E + - "renesas,pwm-r8a774a1": for RZ/G2M - "renesas,pwm-r8a7778": for R-Car M1A - "renesas,pwm-r8a7779": for R-Car H1 - "renesas,pwm-r8a7790": for R-Car H2 @@ -12,6 +14,8 @@ Required Properties: - "renesas,pwm-r8a7795": for R-Car H3 - "renesas,pwm-r8a7796": for R-Car M3-W - "renesas,pwm-r8a77965": for R-Car M3-N + - "renesas,pwm-r8a77970": for R-Car V3M + - "renesas,pwm-r8a77980": for R-Car V3H - "renesas,pwm-r8a77990": for R-Car E3 - "renesas,pwm-r8a77995": for R-Car D3 - reg: base address and length of the registers block for the PWM. diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt index d53a16715da6..848a92b53d81 100644 --- a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt +++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt @@ -2,13 +2,19 @@ Required Properties: - - compatible: should be one of the following. + - compatible: must contain one or more of the following: - "renesas,tpu-r8a73a4": for R8A73A4 (R-Mobile APE6) compatible PWM controller. - "renesas,tpu-r8a7740": for R8A7740 (R-Mobile A1) compatible PWM controller. - "renesas,tpu-r8a7743": for R8A7743 (RZ/G1M) compatible PWM controller. + - "renesas,tpu-r8a7744": for R8A7744 (RZ/G1N) compatible PWM controller. - "renesas,tpu-r8a7745": for R8A7745 (RZ/G1E) compatible PWM controller. - "renesas,tpu-r8a7790": for R8A7790 (R-Car H2) compatible PWM controller. - - "renesas,tpu": for generic R-Car and RZ/G1 TPU PWM controller. + - "renesas,tpu-r8a77970": for R8A77970 (R-Car V3M) compatible PWM + controller. + - "renesas,tpu-r8a77980": for R8A77980 (R-Car V3H) compatible PWM + controller. + - "renesas,tpu": for the generic TPU PWM controller; this is a fallback for + the entries listed above. - reg: Base address and length of each memory resource used by the PWM controller hardware module. diff --git a/Documentation/devicetree/bindings/timer/csky,gx6605s-timer.txt b/Documentation/devicetree/bindings/timer/csky,gx6605s-timer.txt new file mode 100644 index 000000000000..6b04344f4bea --- /dev/null +++ b/Documentation/devicetree/bindings/timer/csky,gx6605s-timer.txt @@ -0,0 +1,42 @@ +================= +gx6605s SOC Timer +================= + +The timer is used in gx6605s soc as system timer and the driver +contain clk event and clk source. + +============================== +timer node bindings definition +============================== + + Description: Describes gx6605s SOC timer + + PROPERTIES + + - compatible + Usage: required + Value type: <string> + Definition: must be "csky,gx6605s-timer" + - reg + Usage: required + Value type: <u32 u32> + Definition: <phyaddr size> in soc from cpu view + - clocks + Usage: required + Value type: phandle + clock specifier cells + Definition: must be input clk node + - interrupt + Usage: required + Value type: <u32> + Definition: must be timer irq num defined by soc + +Examples: +--------- + + timer0: timer@20a000 { + compatible = "csky,gx6605s-timer"; + reg = <0x0020a000 0x400>; + clocks = <&dummy_apb_clk>; + interrupts = <10>; + interrupt-parent = <&intc>; + }; diff --git a/Documentation/devicetree/bindings/timer/csky,mptimer.txt b/Documentation/devicetree/bindings/timer/csky,mptimer.txt new file mode 100644 index 000000000000..15cfec08fbb8 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/csky,mptimer.txt @@ -0,0 +1,42 @@ +============================ +C-SKY Multi-processors Timer +============================ + +C-SKY multi-processors timer is designed for C-SKY SMP system and the +regs is accessed by cpu co-processor 4 registers with mtcr/mfcr. + + - PTIM_CTLR "cr<0, 14>" Control reg to start reset timer. + - PTIM_TSR "cr<1, 14>" Interrupt cleanup status reg. + - PTIM_CCVR "cr<3, 14>" Current counter value reg. + - PTIM_LVR "cr<6, 14>" Window value reg to triger next event. + +============================== +timer node bindings definition +============================== + + Description: Describes SMP timer + + PROPERTIES + + - compatible + Usage: required + Value type: <string> + Definition: must be "csky,mptimer" + - clocks + Usage: required + Value type: <node> + Definition: must be input clk node + - interrupts + Usage: required + Value type: <u32> + Definition: must be timer irq num defined by soc + +Examples: +--------- + + timer: timer { + compatible = "csky,mptimer"; + clocks = <&dummy_apb_clk>; + interrupts = <16>; + interrupt-parent = <&intc>; + }; diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt index 8bf62240e10d..1177052701e1 100644 --- a/Documentation/filesystems/ceph.txt +++ b/Documentation/filesystems/ceph.txt @@ -151,6 +151,11 @@ Mount Options Report overall filesystem usage in statfs instead of using the root directory quota. + nocopyfrom + Don't use the RADOS 'copy-from' operation to perform remote object + copies. Currently, it's only used in copy_file_range, which will revert + to the default VFS implementation if this option is used. + More Information ================ diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index 51c136c821bf..eef7d9d259e8 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt @@ -286,6 +286,12 @@ pointed by REDIRECT. This should not be possible on local system as setting "trusted." xattrs will require CAP_SYS_ADMIN. But it should be possible for untrusted layers like from a pen drive. +Note: redirect_dir={off|nofollow|follow(*)} conflicts with metacopy=on, and +results in an error. + +(*) redirect_dir=follow only conflicts with metacopy=on if upperdir=... is +given. + Sharing and copying layers -------------------------- diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 321d74b73937..cf43bc4dbf31 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -623,6 +623,11 @@ in your dentry operations instead. On success you get a new struct file sharing the mount/dentry with the original, on failure - ERR_PTR(). -- +[mandatory] + ->clone_file_range() and ->dedupe_file_range have been replaced with + ->remap_file_range(). See Documentation/filesystems/vfs.txt for more + information. +-- [recommended] ->lookup() instances doing an equivalent of if (IS_ERR(inode)) diff --git a/Documentation/filesystems/ubifs-authentication.md b/Documentation/filesystems/ubifs-authentication.md new file mode 100644 index 000000000000..028b3e2e25f9 --- /dev/null +++ b/Documentation/filesystems/ubifs-authentication.md @@ -0,0 +1,426 @@ +% UBIFS Authentication +% sigma star gmbh +% 2018 + +# Introduction + +UBIFS utilizes the fscrypt framework to provide confidentiality for file +contents and file names. This prevents attacks where an attacker is able to +read contents of the filesystem on a single point in time. A classic example +is a lost smartphone where the attacker is unable to read personal data stored +on the device without the filesystem decryption key. + +At the current state, UBIFS encryption however does not prevent attacks where +the attacker is able to modify the filesystem contents and the user uses the +device afterwards. In such a scenario an attacker can modify filesystem +contents arbitrarily without the user noticing. One example is to modify a +binary to perform a malicious action when executed [DMC-CBC-ATTACK]. Since +most of the filesystem metadata of UBIFS is stored in plain, this makes it +fairly easy to swap files and replace their contents. + +Other full disk encryption systems like dm-crypt cover all filesystem metadata, +which makes such kinds of attacks more complicated, but not impossible. +Especially, if the attacker is given access to the device multiple points in +time. For dm-crypt and other filesystems that build upon the Linux block IO +layer, the dm-integrity or dm-verity subsystems [DM-INTEGRITY, DM-VERITY] +can be used to get full data authentication at the block layer. +These can also be combined with dm-crypt [CRYPTSETUP2]. + +This document describes an approach to get file contents _and_ full metadata +authentication for UBIFS. Since UBIFS uses fscrypt for file contents and file +name encryption, the authentication system could be tied into fscrypt such that +existing features like key derivation can be utilized. It should however also +be possible to use UBIFS authentication without using encryption. + + +## MTD, UBI & UBIFS + +On Linux, the MTD (Memory Technology Devices) subsystem provides a uniform +interface to access raw flash devices. One of the more prominent subsystems that +work on top of MTD is UBI (Unsorted Block Images). It provides volume management +for flash devices and is thus somewhat similar to LVM for block devices. In +addition, it deals with flash-specific wear-leveling and transparent I/O error +handling. UBI offers logical erase blocks (LEBs) to the layers on top of it +and maps them transparently to physical erase blocks (PEBs) on the flash. + +UBIFS is a filesystem for raw flash which operates on top of UBI. Thus, wear +leveling and some flash specifics are left to UBI, while UBIFS focuses on +scalability, performance and recoverability. + + + + +------------+ +*******+ +-----------+ +-----+ + | | * UBIFS * | UBI-BLOCK | | ... | + | JFFS/JFFS2 | +*******+ +-----------+ +-----+ + | | +-----------------------------+ +-----------+ +-----+ + | | | UBI | | MTD-BLOCK | | ... | + +------------+ +-----------------------------+ +-----------+ +-----+ + +------------------------------------------------------------------+ + | MEMORY TECHNOLOGY DEVICES (MTD) | + +------------------------------------------------------------------+ + +-----------------------------+ +--------------------------+ +-----+ + | NAND DRIVERS | | NOR DRIVERS | | ... | + +-----------------------------+ +--------------------------+ +-----+ + + Figure 1: Linux kernel subsystems for dealing with raw flash + + + +Internally, UBIFS maintains multiple data structures which are persisted on +the flash: + +- *Index*: an on-flash B+ tree where the leaf nodes contain filesystem data +- *Journal*: an additional data structure to collect FS changes before updating + the on-flash index and reduce flash wear. +- *Tree Node Cache (TNC)*: an in-memory B+ tree that reflects the current FS + state to avoid frequent flash reads. It is basically the in-memory + representation of the index, but contains additional attributes. +- *LEB property tree (LPT)*: an on-flash B+ tree for free space accounting per + UBI LEB. + +In the remainder of this section we will cover the on-flash UBIFS data +structures in more detail. The TNC is of less importance here since it is never +persisted onto the flash directly. More details on UBIFS can also be found in +[UBIFS-WP]. + + +### UBIFS Index & Tree Node Cache + +Basic on-flash UBIFS entities are called *nodes*. UBIFS knows different types +of nodes. Eg. data nodes (`struct ubifs_data_node`) which store chunks of file +contents or inode nodes (`struct ubifs_ino_node`) which represent VFS inodes. +Almost all types of nodes share a common header (`ubifs_ch`) containing basic +information like node type, node length, a sequence number, etc. (see +`fs/ubifs/ubifs-media.h`in kernel source). Exceptions are entries of the LPT +and some less important node types like padding nodes which are used to pad +unusable content at the end of LEBs. + +To avoid re-writing the whole B+ tree on every single change, it is implemented +as *wandering tree*, where only the changed nodes are re-written and previous +versions of them are obsoleted without erasing them right away. As a result, +the index is not stored in a single place on the flash, but *wanders* around +and there are obsolete parts on the flash as long as the LEB containing them is +not reused by UBIFS. To find the most recent version of the index, UBIFS stores +a special node called *master node* into UBI LEB 1 which always points to the +most recent root node of the UBIFS index. For recoverability, the master node +is additionally duplicated to LEB 2. Mounting UBIFS is thus a simple read of +LEB 1 and 2 to get the current master node and from there get the location of +the most recent on-flash index. + +The TNC is the in-memory representation of the on-flash index. It contains some +additional runtime attributes per node which are not persisted. One of these is +a dirty-flag which marks nodes that have to be persisted the next time the +index is written onto the flash. The TNC acts as a write-back cache and all +modifications of the on-flash index are done through the TNC. Like other caches, +the TNC does not have to mirror the full index into memory, but reads parts of +it from flash whenever needed. A *commit* is the UBIFS operation of updating the +on-flash filesystem structures like the index. On every commit, the TNC nodes +marked as dirty are written to the flash to update the persisted index. + + +### Journal + +To avoid wearing out the flash, the index is only persisted (*commited*) when +certain conditions are met (eg. `fsync(2)`). The journal is used to record +any changes (in form of inode nodes, data nodes etc.) between commits +of the index. During mount, the journal is read from the flash and replayed +onto the TNC (which will be created on-demand from the on-flash index). + +UBIFS reserves a bunch of LEBs just for the journal called *log area*. The +amount of log area LEBs is configured on filesystem creation (using +`mkfs.ubifs`) and stored in the superblock node. The log area contains only +two types of nodes: *reference nodes* and *commit start nodes*. A commit start +node is written whenever an index commit is performed. Reference nodes are +written on every journal update. Each reference node points to the position of +other nodes (inode nodes, data nodes etc.) on the flash that are part of this +journal entry. These nodes are called *buds* and describe the actual filesystem +changes including their data. + +The log area is maintained as a ring. Whenever the journal is almost full, +a commit is initiated. This also writes a commit start node so that during +mount, UBIFS will seek for the most recent commit start node and just replay +every reference node after that. Every reference node before the commit start +node will be ignored as they are already part of the on-flash index. + +When writing a journal entry, UBIFS first ensures that enough space is +available to write the reference node and buds part of this entry. Then, the +reference node is written and afterwards the buds describing the file changes. +On replay, UBIFS will record every reference node and inspect the location of +the referenced LEBs to discover the buds. If these are corrupt or missing, +UBIFS will attempt to recover them by re-reading the LEB. This is however only +done for the last referenced LEB of the journal. Only this can become corrupt +because of a power cut. If the recovery fails, UBIFS will not mount. An error +for every other LEB will directly cause UBIFS to fail the mount operation. + + + | ---- LOG AREA ---- | ---------- MAIN AREA ------------ | + + -----+------+-----+--------+---- ------+-----+-----+--------------- + \ | | | | / / | | | \ + / CS | REF | REF | | \ \ DENT | INO | INO | / + \ | | | | / / | | | \ + ----+------+-----+--------+--- -------+-----+-----+---------------- + | | ^ ^ + | | | | + +------------------------+ | + | | + +-------------------------------+ + + + Figure 2: UBIFS flash layout of log area with commit start nodes + (CS) and reference nodes (REF) pointing to main area + containing their buds + + +### LEB Property Tree/Table + +The LEB property tree is used to store per-LEB information. This includes the +LEB type and amount of free and *dirty* (old, obsolete content) space [1] on +the LEB. The type is important, because UBIFS never mixes index nodes with data +nodes on a single LEB and thus each LEB has a specific purpose. This again is +useful for free space calculations. See [UBIFS-WP] for more details. + +The LEB property tree again is a B+ tree, but it is much smaller than the +index. Due to its smaller size it is always written as one chunk on every +commit. Thus, saving the LPT is an atomic operation. + + +[1] Since LEBs can only be appended and never overwritten, there is a +difference between free space ie. the remaining space left on the LEB to be +written to without erasing it and previously written content that is obsolete +but can't be overwritten without erasing the full LEB. + + +# UBIFS Authentication + +This chapter introduces UBIFS authentication which enables UBIFS to verify +the authenticity and integrity of metadata and file contents stored on flash. + + +## Threat Model + +UBIFS authentication enables detection of offline data modification. While it +does not prevent it, it enables (trusted) code to check the integrity and +authenticity of on-flash file contents and filesystem metadata. This covers +attacks where file contents are swapped. + +UBIFS authentication will not protect against rollback of full flash contents. +Ie. an attacker can still dump the flash and restore it at a later time without +detection. It will also not protect against partial rollback of individual +index commits. That means that an attacker is able to partially undo changes. +This is possible because UBIFS does not immediately overwrites obsolete +versions of the index tree or the journal, but instead marks them as obsolete +and garbage collection erases them at a later time. An attacker can use this by +erasing parts of the current tree and restoring old versions that are still on +the flash and have not yet been erased. This is possible, because every commit +will always write a new version of the index root node and the master node +without overwriting the previous version. This is further helped by the +wear-leveling operations of UBI which copies contents from one physical +eraseblock to another and does not atomically erase the first eraseblock. + +UBIFS authentication does not cover attacks where an attacker is able to +execute code on the device after the authentication key was provided. +Additional measures like secure boot and trusted boot have to be taken to +ensure that only trusted code is executed on a device. + + +## Authentication + +To be able to fully trust data read from flash, all UBIFS data structures +stored on flash are authenticated. That is: + +- The index which includes file contents, file metadata like extended + attributes, file length etc. +- The journal which also contains file contents and metadata by recording changes + to the filesystem +- The LPT which stores UBI LEB metadata which UBIFS uses for free space accounting + + +### Index Authentication + +Through UBIFS' concept of a wandering tree, it already takes care of only +updating and persisting changed parts from leaf node up to the root node +of the full B+ tree. This enables us to augment the index nodes of the tree +with a hash over each node's child nodes. As a result, the index basically also +a Merkle tree. Since the leaf nodes of the index contain the actual filesystem +data, the hashes of their parent index nodes thus cover all the file contents +and file metadata. When a file changes, the UBIFS index is updated accordingly +from the leaf nodes up to the root node including the master node. This process +can be hooked to recompute the hash only for each changed node at the same time. +Whenever a file is read, UBIFS can verify the hashes from each leaf node up to +the root node to ensure the node's integrity. + +To ensure the authenticity of the whole index, the UBIFS master node stores a +keyed hash (HMAC) over its own contents and a hash of the root node of the index +tree. As mentioned above, the master node is always written to the flash whenever +the index is persisted (ie. on index commit). + +Using this approach only UBIFS index nodes and the master node are changed to +include a hash. All other types of nodes will remain unchanged. This reduces +the storage overhead which is precious for users of UBIFS (ie. embedded +devices). + + + +---------------+ + | Master Node | + | (hash) | + +---------------+ + | + v + +-------------------+ + | Index Node #1 | + | | + | branch0 branchn | + | (hash) (hash) | + +-------------------+ + | ... | (fanout: 8) + | | + +-------+ +------+ + | | + v v + +-------------------+ +-------------------+ + | Index Node #2 | | Index Node #3 | + | | | | + | branch0 branchn | | branch0 branchn | + | (hash) (hash) | | (hash) (hash) | + +-------------------+ +-------------------+ + | ... | ... | + v v v + +-----------+ +----------+ +-----------+ + | Data Node | | INO Node | | DENT Node | + +-----------+ +----------+ +-----------+ + + + Figure 3: Coverage areas of index node hash and master node HMAC + + + +The most important part for robustness and power-cut safety is to atomically +persist the hash and file contents. Here the existing UBIFS logic for how +changed nodes are persisted is already designed for this purpose such that +UBIFS can safely recover if a power-cut occurs while persisting. Adding +hashes to index nodes does not change this since each hash will be persisted +atomically together with its respective node. + + +### Journal Authentication + +The journal is authenticated too. Since the journal is continuously written +it is necessary to also add authentication information frequently to the +journal so that in case of a powercut not too much data can't be authenticated. +This is done by creating a continuous hash beginning from the commit start node +over the previous reference nodes, the current reference node, and the bud +nodes. From time to time whenever it is suitable authentication nodes are added +between the bud nodes. This new node type contains a HMAC over the current state +of the hash chain. That way a journal can be authenticated up to the last +authentication node. The tail of the journal which may not have a authentication +node cannot be authenticated and is skipped during journal replay. + +We get this picture for journal authentication: + + ,,,,,,,, + ,......,........................................... + ,. CS , hash1.----. hash2.----. + ,. | , . |hmac . |hmac + ,. v , . v . v + ,.REF#0,-> bud -> bud -> bud.-> auth -> bud -> bud.-> auth ... + ,..|...,........................................... + , | , + , | ,,,,,,,,,,,,,,, + . | hash3,----. + , | , |hmac + , v , v + , REF#1 -> bud -> bud,-> auth ... + ,,,|,,,,,,,,,,,,,,,,,, + v + REF#2 -> ... + | + V + ... + +Since the hash also includes the reference nodes an attacker cannot reorder or +skip any journal heads for replay. An attacker can only remove bud nodes or +reference nodes from the end of the journal, effectively rewinding the +filesystem at maximum back to the last commit. + +The location of the log area is stored in the master node. Since the master +node is authenticated with a HMAC as described above, it is not possible to +tamper with that without detection. The size of the log area is specified when +the filesystem is created using `mkfs.ubifs` and stored in the superblock node. +To avoid tampering with this and other values stored there, a HMAC is added to +the superblock struct. The superblock node is stored in LEB 0 and is only +modified on feature flag or similar changes, but never on file changes. + + +### LPT Authentication + +The location of the LPT root node on the flash is stored in the UBIFS master +node. Since the LPT is written and read atomically on every commit, there is +no need to authenticate individual nodes of the tree. It suffices to +protect the integrity of the full LPT by a simple hash stored in the master +node. Since the master node itself is authenticated, the LPTs authenticity can +be verified by verifying the authenticity of the master node and comparing the +LTP hash stored there with the hash computed from the read on-flash LPT. + + +## Key Management + +For simplicity, UBIFS authentication uses a single key to compute the HMACs +of superblock, master, commit start and reference nodes. This key has to be +available on creation of the filesystem (`mkfs.ubifs`) to authenticate the +superblock node. Further, it has to be available on mount of the filesystem +to verify authenticated nodes and generate new HMACs for changes. + +UBIFS authentication is intended to operate side-by-side with UBIFS encryption +(fscrypt) to provide confidentiality and authenticity. Since UBIFS encryption +has a different approach of encryption policies per directory, there can be +multiple fscrypt master keys and there might be folders without encryption. +UBIFS authentication on the other hand has an all-or-nothing approach in the +sense that it either authenticates everything of the filesystem or nothing. +Because of this and because UBIFS authentication should also be usable without +encryption, it does not share the same master key with fscrypt, but manages +a dedicated authentication key. + +The API for providing the authentication key has yet to be defined, but the +key can eg. be provided by userspace through a keyring similar to the way it +is currently done in fscrypt. It should however be noted that the current +fscrypt approach has shown its flaws and the userspace API will eventually +change [FSCRYPT-POLICY2]. + +Nevertheless, it will be possible for a user to provide a single passphrase +or key in userspace that covers UBIFS authentication and encryption. This can +be solved by the corresponding userspace tools which derive a second key for +authentication in addition to the derived fscrypt master key used for +encryption. + +To be able to check if the proper key is available on mount, the UBIFS +superblock node will additionally store a hash of the authentication key. This +approach is similar to the approach proposed for fscrypt encryption policy v2 +[FSCRYPT-POLICY2]. + + +# Future Extensions + +In certain cases where a vendor wants to provide an authenticated filesystem +image to customers, it should be possible to do so without sharing the secret +UBIFS authentication key. Instead, in addition the each HMAC a digital +signature could be stored where the vendor shares the public key alongside the +filesystem image. In case this filesystem has to be modified afterwards, +UBIFS can exchange all digital signatures with HMACs on first mount similar +to the way the IMA/EVM subsystem deals with such situations. The HMAC key +will then have to be provided beforehand in the normal way. + + +# References + +[CRYPTSETUP2] http://www.saout.de/pipermail/dm-crypt/2017-November/005745.html + +[DMC-CBC-ATTACK] http://www.jakoblell.com/blog/2013/12/22/practical-malleability-attack-against-cbc-encrypted-luks-partitions/ + +[DM-INTEGRITY] https://www.kernel.org/doc/Documentation/device-mapper/dm-integrity.txt + +[DM-VERITY] https://www.kernel.org/doc/Documentation/device-mapper/verity.txt + +[FSCRYPT-POLICY2] https://www.spinics.net/lists/linux-ext4/msg58710.html + +[UBIFS-WP] http://www.linux-mtd.infradead.org/doc/ubifs_whitepaper.pdf diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt index a0a61d2f389f..acc80442a3bb 100644 --- a/Documentation/filesystems/ubifs.txt +++ b/Documentation/filesystems/ubifs.txt @@ -91,6 +91,13 @@ chk_data_crc do not skip checking CRCs on data nodes compr=none override default compressor and set it to "none" compr=lzo override default compressor and set it to "lzo" compr=zlib override default compressor and set it to "zlib" +auth_key= specify the key used for authenticating the filesystem. + Passing this option makes authentication mandatory. + The passed key must be present in the kernel keyring + and must be of type 'logon' +auth_hash_name= The hash algorithm used for authentication. Used for + both hashing and for creating HMACs. Typical values + include "sha256" or "sha512" Quick usage instructions diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index a6c6a8af48a2..5f71a252e2e0 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -883,8 +883,9 @@ struct file_operations { unsigned (*mmap_capabilities)(struct file *); #endif ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); - int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64); - int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64); + loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags); int (*fadvise)(struct file *, loff_t, loff_t, int); }; @@ -960,11 +961,18 @@ otherwise noted. copy_file_range: called by the copy_file_range(2) system call. - clone_file_range: called by the ioctl(2) system call for FICLONERANGE and - FICLONE commands. - - dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE - command. + remap_file_range: called by the ioctl(2) system call for FICLONERANGE and + FICLONE and FIDEDUPERANGE commands to remap file ranges. An + implementation should remap len bytes at pos_in of the source file into + the dest file at pos_out. Implementations must handle callers passing + in len == 0; this means "remap to the end of the source file". The + return value should the number of bytes remapped, or the usual + negative error code if errors occurred before any bytes were remapped. + The remap_flags parameter accepts REMAP_FILE_* flags. If + REMAP_FILE_DEDUP is set then the implementation must only remap if the + requested file ranges have identical contents. If REMAP_CAN_SHORTEN is + set, the caller is ok with the implementation shortening the request + length to satisfy alignment or EOF requirements (or any other reason). fadvise: possibly called by the fadvise64() system call. diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt index 7b6a2b2bdc98..8da26c6dd886 100644 --- a/Documentation/kbuild/makefiles.txt +++ b/Documentation/kbuild/makefiles.txt @@ -537,21 +537,6 @@ more details, with real examples. The third parameter may be a text as in this example, but it may also be an expanded variable or a macro. - cc-fullversion - cc-fullversion is useful when the exact version of gcc is needed. - One typical use-case is when a specific GCC version is broken. - cc-fullversion points out a more specific version than cc-version does. - - Example: - #arch/powerpc/Makefile - $(Q)if test "$(cc-fullversion)" = "040200" ; then \ - echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \ - false ; \ - fi - - In this example for a specific GCC version the build will error out - explaining to the user why it stops. - cc-cross-prefix cc-cross-prefix is used to check if there exists a $(CC) in path with one of the listed prefixes. The first prefix where there exist a diff --git a/Documentation/laptops/lg-laptop.rst b/Documentation/laptops/lg-laptop.rst new file mode 100644 index 000000000000..e486fe7ddc35 --- /dev/null +++ b/Documentation/laptops/lg-laptop.rst @@ -0,0 +1,81 @@ +.. SPDX-License-Identifier: GPL-2.0+ +LG Gram laptop extra features +============================= + +By Matan Ziv-Av <matan@svgalib.org> + + +Hotkeys +------- + +The following FN keys are ignored by the kernel without this driver: +- FN-F1 (LG control panel) - Generates F15 +- FN-F5 (Touchpad toggle) - Generates F13 +- FN-F6 (Airplane mode) - Generates RFKILL +- FN-F8 (Keyboard backlight) - Generates F16. + This key also changes keyboard backlight mode. +- FN-F9 (Reader mode) - Generates F14 + +The rest of the FN key work without a need for a special driver. + + +Reader mode +----------- + +Writing 0/1 to /sys/devices/platform/lg-laptop/reader_mode disables/enables +reader mode. In this mode the screen colors change (blue color reduced), +and the reader mode indicator LED (on F9 key) turns on. + + +FN Lock +------- + +Writing 0/1 to /sys/devices/platform/lg-laptop/fn_lock disables/enables +FN lock. + + +Battery care limit +------------------ + +Writing 80/100 to /sys/devices/platform/lg-laptop/battery_care_limit +sets the maximum capacity to charge the battery. Limiting the charge +reduces battery capacity loss over time. + +This value is reset to 100 when the kernel boots. + + +Fan mode +-------- + +Writing 1/0 to /sys/devices/platform/lg-laptop/fan_mode disables/enables +the fan silent mode. + + +USB charge +---------- + +Writing 0/1 to /sys/devices/platform/lg-laptop/usb_charge disables/enables +charging another device from the USB port while the device is turned off. + +This value is reset to 0 when the kernel boots. + + +LEDs +~~~~ + +The are two LED devices supported by the driver: + +Keyboard backlight +------------------ + +A led device named kbd_led controls the keyboard backlight. There are three +lighting level: off (0), low (127) and high (255). + +The keyboard backlight is also controlled by the key combination FN-F8 +which cycles through those levels. + + +Touchpad indicator LED +---------------------- + +On the F5 key. Controlled by led device names tpad_led. diff --git a/Documentation/networking/ice.rst b/Documentation/networking/ice.rst index 1e4948c9e989..4d118b827bbb 100644 --- a/Documentation/networking/ice.rst +++ b/Documentation/networking/ice.rst @@ -20,7 +20,7 @@ Enabling the driver The driver is enabled via the standard kernel configuration system, using the make command:: - make oldconfig/silentoldconfig/menuconfig/etc. + make oldconfig/menuconfig/etc. The driver is located in the menu structure at: diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 163b5ff1073c..32b21571adfe 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -316,6 +316,17 @@ tcp_frto - INTEGER By default it's enabled with a non-zero value. 0 disables F-RTO. +tcp_fwmark_accept - BOOLEAN + If set, incoming connections to listening sockets that do not have a + socket mark will set the mark of the accepting socket to the fwmark of + the incoming SYN packet. This will cause all packets on that connection + (starting from the first SYNACK) to be sent with that fwmark. The + listening socket's mark is unchanged. Listening sockets that already + have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are + unaffected. + + Default: 0 + tcp_invalid_ratelimit - INTEGER Limit the maximal rate for sending duplicate acknowledgments in response to incoming TCP packets that are for an existing diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst index 757808526d9a..878ebfda7eef 100644 --- a/Documentation/process/index.rst +++ b/Documentation/process/index.rst @@ -25,6 +25,7 @@ Below are the essential guides that every developer should read. code-of-conduct-interpretation development-process submitting-patches + programming-language coding-style maintainer-pgp-guide email-clients diff --git a/Documentation/process/programming-language.rst b/Documentation/process/programming-language.rst new file mode 100644 index 000000000000..e5f5f065dc24 --- /dev/null +++ b/Documentation/process/programming-language.rst @@ -0,0 +1,45 @@ +.. _programming_language: + +Programming Language +==================== + +The kernel is written in the C programming language [c-language]_. +More precisely, the kernel is typically compiled with ``gcc`` [gcc]_ +under ``-std=gnu89`` [gcc-c-dialect-options]_: the GNU dialect of ISO C90 +(including some C99 features). + +This dialect contains many extensions to the language [gnu-extensions]_, +and many of them are used within the kernel as a matter of course. + +There is some support for compiling the kernel with ``clang`` [clang]_ +and ``icc`` [icc]_ for several of the architectures, although at the time +of writing it is not completed, requiring third-party patches. + +Attributes +---------- + +One of the common extensions used throughout the kernel are attributes +[gcc-attribute-syntax]_. Attributes allow to introduce +implementation-defined semantics to language entities (like variables, +functions or types) without having to make significant syntactic changes +to the language (e.g. adding a new keyword) [n2049]_. + +In some cases, attributes are optional (i.e. a compiler not supporting them +should still produce proper code, even if it is slower or does not perform +as many compile-time checks/diagnostics). + +The kernel defines pseudo-keywords (e.g. ``__pure``) instead of using +directly the GNU attribute syntax (e.g. ``__attribute__((__pure__))``) +in order to feature detect which ones can be used and/or to shorten the code. + +Please refer to ``include/linux/compiler_attributes.h`` for more information. + +.. [c-language] http://www.open-std.org/jtc1/sc22/wg14/www/standards +.. [gcc] https://gcc.gnu.org +.. [clang] https://clang.llvm.org +.. [icc] https://software.intel.com/en-us/c-compilers +.. [gcc-c-dialect-options] https://gcc.gnu.org/onlinedocs/gcc/C-Dialect-Options.html +.. [gnu-extensions] https://gcc.gnu.org/onlinedocs/gcc/C-Extensions.html +.. [gcc-attribute-syntax] https://gcc.gnu.org/onlinedocs/gcc/Attribute-Syntax.html +.. [n2049] http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2049.pdf + diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst index 9ce7256c6edb..9521c4207f01 100644 --- a/Documentation/security/keys/core.rst +++ b/Documentation/security/keys/core.rst @@ -859,6 +859,7 @@ The keyctl syscall functions are: and either the buffer length or the OtherInfo length exceeds the allowed length. + * Restrict keyring linkage:: long keyctl(KEYCTL_RESTRICT_KEYRING, key_serial_t keyring, @@ -890,6 +891,116 @@ The keyctl syscall functions are: applicable to the asymmetric key type. + * Query an asymmetric key:: + + long keyctl(KEYCTL_PKEY_QUERY, + key_serial_t key_id, unsigned long reserved, + struct keyctl_pkey_query *info); + + Get information about an asymmetric key. The information is returned in + the keyctl_pkey_query struct:: + + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; + __u32 __spare[10]; + + ``supported_ops`` contains a bit mask of flags indicating which ops are + supported. This is constructed from a bitwise-OR of:: + + KEYCTL_SUPPORTS_{ENCRYPT,DECRYPT,SIGN,VERIFY} + + ``key_size`` indicated the size of the key in bits. + + ``max_*_size`` indicate the maximum sizes in bytes of a blob of data to be + signed, a signature blob, a blob to be encrypted and a blob to be + decrypted. + + ``__spare[]`` must be set to 0. This is intended for future use to hand + over one or more passphrases needed unlock a key. + + If successful, 0 is returned. If the key is not an asymmetric key, + EOPNOTSUPP is returned. + + + * Encrypt, decrypt, sign or verify a blob using an asymmetric key:: + + long keyctl(KEYCTL_PKEY_ENCRYPT, + const struct keyctl_pkey_params *params, + const char *info, + const void *in, + void *out); + + long keyctl(KEYCTL_PKEY_DECRYPT, + const struct keyctl_pkey_params *params, + const char *info, + const void *in, + void *out); + + long keyctl(KEYCTL_PKEY_SIGN, + const struct keyctl_pkey_params *params, + const char *info, + const void *in, + void *out); + + long keyctl(KEYCTL_PKEY_VERIFY, + const struct keyctl_pkey_params *params, + const char *info, + const void *in, + const void *in2); + + Use an asymmetric key to perform a public-key cryptographic operation a + blob of data. For encryption and verification, the asymmetric key may + only need the public parts to be available, but for decryption and signing + the private parts are required also. + + The parameter block pointed to by params contains a number of integer + values:: + + __s32 key_id; + __u32 in_len; + __u32 out_len; + __u32 in2_len; + + ``key_id`` is the ID of the asymmetric key to be used. ``in_len`` and + ``in2_len`` indicate the amount of data in the in and in2 buffers and + ``out_len`` indicates the size of the out buffer as appropriate for the + above operations. + + For a given operation, the in and out buffers are used as follows:: + + Operation ID in,in_len out,out_len in2,in2_len + ======================= =============== =============== =============== + KEYCTL_PKEY_ENCRYPT Raw data Encrypted data - + KEYCTL_PKEY_DECRYPT Encrypted data Raw data - + KEYCTL_PKEY_SIGN Raw data Signature - + KEYCTL_PKEY_VERIFY Raw data - Signature + + ``info`` is a string of key=value pairs that supply supplementary + information. These include: + + ``enc=<encoding>`` The encoding of the encrypted/signature blob. This + can be "pkcs1" for RSASSA-PKCS1-v1.5 or + RSAES-PKCS1-v1.5; "pss" for "RSASSA-PSS"; "oaep" for + "RSAES-OAEP". If omitted or is "raw", the raw output + of the encryption function is specified. + + ``hash=<algo>`` If the data buffer contains the output of a hash + function and the encoding includes some indication of + which hash function was used, the hash function can be + specified with this, eg. "hash=sha256". + + The ``__spare[]`` space in the parameter block must be set to 0. This is + intended, amongst other things, to allow the passing of passphrases + required to unlock a key. + + If successful, encrypt, decrypt and sign all return the amount of data + written into the output buffer. Verification returns 0 on success. + + Kernel Services =============== @@ -1483,6 +1594,112 @@ The structure has a number of fields, some of which are mandatory: attempted key link operation. If there is no match, -EINVAL is returned. + * ``int (*asym_eds_op)(struct kernel_pkey_params *params, + const void *in, void *out);`` + ``int (*asym_verify_signature)(struct kernel_pkey_params *params, + const void *in, const void *in2);`` + + These methods are optional. If provided the first allows a key to be + used to encrypt, decrypt or sign a blob of data, and the second allows a + key to verify a signature. + + In all cases, the following information is provided in the params block:: + + struct kernel_pkey_params { + struct key *key; + const char *encoding; + const char *hash_algo; + char *info; + __u32 in_len; + union { + __u32 out_len; + __u32 in2_len; + }; + enum kernel_pkey_operation op : 8; + }; + + This includes the key to be used; a string indicating the encoding to use + (for instance, "pkcs1" may be used with an RSA key to indicate + RSASSA-PKCS1-v1.5 or RSAES-PKCS1-v1.5 encoding or "raw" if no encoding); + the name of the hash algorithm used to generate the data for a signature + (if appropriate); the sizes of the input and output (or second input) + buffers; and the ID of the operation to be performed. + + For a given operation ID, the input and output buffers are used as + follows:: + + Operation ID in,in_len out,out_len in2,in2_len + ======================= =============== =============== =============== + kernel_pkey_encrypt Raw data Encrypted data - + kernel_pkey_decrypt Encrypted data Raw data - + kernel_pkey_sign Raw data Signature - + kernel_pkey_verify Raw data - Signature + + asym_eds_op() deals with encryption, decryption and signature creation as + specified by params->op. Note that params->op is also set for + asym_verify_signature(). + + Encrypting and signature creation both take raw data in the input buffer + and return the encrypted result in the output buffer. Padding may have + been added if an encoding was set. In the case of signature creation, + depending on the encoding, the padding created may need to indicate the + digest algorithm - the name of which should be supplied in hash_algo. + + Decryption takes encrypted data in the input buffer and returns the raw + data in the output buffer. Padding will get checked and stripped off if + an encoding was set. + + Verification takes raw data in the input buffer and the signature in the + second input buffer and checks that the one matches the other. Padding + will be validated. Depending on the encoding, the digest algorithm used + to generate the raw data may need to be indicated in hash_algo. + + If successful, asym_eds_op() should return the number of bytes written + into the output buffer. asym_verify_signature() should return 0. + + A variety of errors may be returned, including EOPNOTSUPP if the operation + is not supported; EKEYREJECTED if verification fails; ENOPKG if the + required crypto isn't available. + + + * ``int (*asym_query)(const struct kernel_pkey_params *params, + struct kernel_pkey_query *info);`` + + This method is optional. If provided it allows information about the + public or asymmetric key held in the key to be determined. + + The parameter block is as for asym_eds_op() and co. but in_len and out_len + are unused. The encoding and hash_algo fields should be used to reduce + the returned buffer/data sizes as appropriate. + + If successful, the following information is filled in:: + + struct kernel_pkey_query { + __u32 supported_ops; + __u32 key_size; + __u16 max_data_size; + __u16 max_sig_size; + __u16 max_enc_size; + __u16 max_dec_size; + }; + + The supported_ops field will contain a bitmask indicating what operations + are supported by the key, including encryption of a blob, decryption of a + blob, signing a blob and verifying the signature on a blob. The following + constants are defined for this:: + + KEYCTL_SUPPORTS_{ENCRYPT,DECRYPT,SIGN,VERIFY} + + The key_size field is the size of the key in bits. max_data_size and + max_sig_size are the maximum raw data and signature sizes for creation and + verification of a signature; max_enc_size and max_dec_size are the maximum + raw data and signature sizes for encryption and decryption. The + max_*_size fields are measured in bytes. + + If successful, 0 will be returned. If the key doesn't support this, + EOPNOTSUPP will be returned. + + Request-Key Callback Service ============================ diff --git a/Documentation/security/self-protection.rst b/Documentation/security/self-protection.rst index e1ca698e0006..f584fb74b4ff 100644 --- a/Documentation/security/self-protection.rst +++ b/Documentation/security/self-protection.rst @@ -302,11 +302,11 @@ sure structure holes are cleared. Memory poisoning ---------------- -When releasing memory, it is best to poison the contents (clear stack on -syscall return, wipe heap memory on a free), to avoid reuse attacks that -rely on the old contents of memory. This frustrates many uninitialized -variable attacks, stack content exposures, heap content exposures, and -use-after-free attacks. +When releasing memory, it is best to poison the contents, to avoid reuse +attacks that rely on the old contents of memory. E.g., clear stack on a +syscall return (``CONFIG_GCC_PLUGIN_STACKLEAK``), wipe heap memory on a +free. This frustrates many uninitialized variable attacks, stack content +exposures, heap content exposures, and use-after-free attacks. Destination tracking -------------------- diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 37a679501ddc..1b8775298cf7 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -89,6 +89,7 @@ show up in /proc/sys/kernel: - shmmni - softlockup_all_cpu_backtrace - soft_watchdog +- stack_erasing - stop-a [ SPARC only ] - sysrq ==> Documentation/admin-guide/sysrq.rst - sysctl_writes_strict @@ -987,6 +988,23 @@ detect a hard lockup condition. ============================================================== +stack_erasing + +This parameter can be used to control kernel stack erasing at the end +of syscalls for kernels built with CONFIG_GCC_PLUGIN_STACKLEAK. + +That erasing reduces the information which kernel stack leak bugs +can reveal and blocks some uninitialized stack variable attacks. +The tradeoff is the performance impact: on a single CPU system kernel +compilation sees a 1% slowdown, other systems and workloads may vary. + + 0: kernel stack erasing is disabled, STACKLEAK_METRICS are not updated. + + 1: kernel stack erasing is enabled (default), it is performed before + returning to the userspace at the end of syscalls. + +============================================================== + tainted: Non-zero if the kernel has been tainted. Numeric values, which can be diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 702898633b00..73aaaa3da436 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -146,3 +146,6 @@ Their order is preserved but their base will be offset early at boot time. Be very careful vs. KASLR when changing anything here. The KASLR address range must not overlap with anything except the KASAN shadow area, which is correct as KASAN disables KASLR. + +For both 4- and 5-level layouts, the STACKLEAK_POISON value in the last 2MB +hole: ffffffffffff4111 diff --git a/MAINTAINERS b/MAINTAINERS index 1c0f771b859e..f4855974f325 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -376,7 +376,7 @@ F: drivers/platform/x86/i2c-multi-instantiate.c ACPI PMIC DRIVERS M: "Rafael J. Wysocki" <rjw@rjwysocki.net> M: Len Brown <lenb@kernel.org> -R: Andy Shevchenko <andy@infradead.org> +R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> R: Mika Westerberg <mika.westerberg@linux.intel.com> L: linux-acpi@vger.kernel.org Q: https://patchwork.kernel.org/project/linux-acpi/list/ @@ -3737,6 +3737,11 @@ L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/compal-laptop.c +COMPILER ATTRIBUTES +M: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> +S: Maintained +F: include/linux/compiler_attributes.h + CONEXANT ACCESSRUNNER USB DRIVER L: accessrunner-general@lists.sourceforge.net W: http://accessrunner.sourceforge.net/ @@ -4207,6 +4212,12 @@ M: Pali Rohár <pali.rohar@gmail.com> S: Maintained F: drivers/platform/x86/dell-rbtn.* +DELL REMOTE BIOS UPDATE DRIVER +M: Stuart Hayes <stuart.w.hayes@gmail.com> +L: platform-driver-x86@vger.kernel.org +S: Maintained +F: drivers/platform/x86/dell_rbu.c + DELL LAPTOP SMM DRIVER M: Pali Rohár <pali.rohar@gmail.com> S: Maintained @@ -4214,10 +4225,11 @@ F: drivers/hwmon/dell-smm-hwmon.c F: include/uapi/linux/i8k.h DELL SYSTEMS MANAGEMENT BASE DRIVER (dcdbas) -M: Doug Warzecha <Douglas_Warzecha@dell.com> +M: Stuart Hayes <stuart.w.hayes@gmail.com> +L: platform-driver-x86@vger.kernel.org S: Maintained F: Documentation/dcdbas.txt -F: drivers/firmware/dcdbas.* +F: drivers/platform/x86/dcdbas.* DELL WMI NOTIFICATIONS DRIVER M: Matthew Garrett <mjg59@srcf.ucam.org> @@ -5871,6 +5883,14 @@ L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-cpm.c +FREESCALE IMX LPI2C DRIVER +M: Dong Aisheng <aisheng.dong@nxp.com> +L: linux-i2c@vger.kernel.org +L: linux-imx@nxp.com +S: Maintained +F: drivers/i2c/busses/i2c-imx-lpi2c.c +F: Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt + FREESCALE IMX / MXC FEC DRIVER M: Fugang Duan <fugang.duan@nxp.com> L: netdev@vger.kernel.org @@ -7347,6 +7367,12 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Supported F: sound/soc/intel/ +INTEL ATOMISP2 DUMMY / POWER-MANAGEMENT DRIVER +M: Hans de Goede <hdegoede@redhat.com> +L: platform-driver-x86@vger.kernel.org +S: Maintained +F: drivers/platform/x86/intel_atomisp2_pm.c + INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support <intel-linux-scu@intel.com> M: Artur Paszkiewicz <artur.paszkiewicz@intel.com> @@ -7533,7 +7559,6 @@ M: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com> M: Vishwanath Somayaji <vishwanath.somayaji@intel.com> L: platform-driver-x86@vger.kernel.org S: Maintained -F: arch/x86/include/asm/pmc_core.h F: drivers/platform/x86/intel_pmc_core* INTEL PMC/P-Unit IPC DRIVER @@ -7577,7 +7602,8 @@ F: drivers/infiniband/hw/i40iw/ F: include/uapi/rdma/i40iw-abi.h INTEL TELEMETRY DRIVER -M: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com> +M: Rajneesh Bhardwaj <rajneesh.bhardwaj@linux.intel.com> +M: "David E. Box" <david.e.box@linux.intel.com> L: platform-driver-x86@vger.kernel.org S: Maintained F: arch/x86/include/asm/intel_telemetry.h @@ -8310,6 +8336,14 @@ W: http://legousb.sourceforge.net/ S: Maintained F: drivers/usb/misc/legousbtower.c +LG LAPTOP EXTRAS +M: Matan Ziv-Av <matan@svgalib.org> +L: platform-driver-x86@vger.kernel.org +S: Maintained +F: Documentation/ABI/testing/sysfs-platform-lg-laptop +F: Documentation/laptops/lg-laptop.rst +F: drivers/platform/x86/lg-laptop.c + LG2160 MEDIA DRIVER M: Michael Krufky <mkrufky@linuxtv.org> L: linux-media@vger.kernel.org @@ -15829,7 +15863,6 @@ F: net/vmw_vsock/virtio_transport_common.c F: net/vmw_vsock/virtio_transport.c F: drivers/net/vsockmon.c F: drivers/vhost/vsock.c -F: drivers/vhost/vsock.h F: tools/testing/vsock/ VIRTIO CONSOLE DRIVER @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 -PATCHLEVEL = 19 +PATCHLEVEL = 20 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc1 NAME = "People's Front" # *DOCUMENTATION* @@ -485,7 +485,7 @@ ifneq ($(KBUILD_SRC),) $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree) endif -ifeq ($(cc-name),clang) +ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) ifneq ($(CROSS_COMPILE),) CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD))) @@ -702,7 +702,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong KBUILD_CFLAGS += $(stackp-flags-y) -ifeq ($(cc-name),clang) +ifdef CONFIG_CC_IS_CLANG KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) diff --git a/arch/Kconfig b/arch/Kconfig index ed27fd262627..e1e540ffa979 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -429,6 +429,13 @@ config SECCOMP_FILTER See Documentation/userspace-api/seccomp_filter.rst for details. +config HAVE_ARCH_STACKLEAK + bool + help + An architecture should select this if it has the code which + fills the used part of the kernel stack with the STACKLEAK_POISON + value before returning from system calls. + config HAVE_STACKPROTECTOR bool help diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi index c50c36baba75..8bf1c17f8cef 100644 --- a/arch/arm/boot/dts/stm32mp157c.dtsi +++ b/arch/arm/boot/dts/stm32mp157c.dtsi @@ -923,7 +923,7 @@ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>; clocks = <&rcc HASH1>; resets = <&rcc HASH1_R>; - dmas = <&mdma1 31 0x10 0x1000A02 0x0 0x0 0x0>; + dmas = <&mdma1 31 0x10 0x1000A02 0x0 0x0>; dma-names = "in"; dma-maxburst = <2>; status = "disabled"; diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index e5ad0708849a..c8e198631d41 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig @@ -7,6 +7,9 @@ config DMABOUNCE bool select ZONE_DMA +config KRAIT_L2_ACCESSORS + bool + config SHARP_LOCOMO bool diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 3157be413297..219a260bbe5f 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -7,6 +7,7 @@ obj-y += firmware.o obj-$(CONFIG_SA1111) += sa1111.o obj-$(CONFIG_DMABOUNCE) += dmabounce.o +obj-$(CONFIG_KRAIT_L2_ACCESSORS) += krait-l2-accessors.o obj-$(CONFIG_SHARP_LOCOMO) += locomo.o obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o obj-$(CONFIG_SHARP_SCOOP) += scoop.o diff --git a/arch/arm/common/krait-l2-accessors.c b/arch/arm/common/krait-l2-accessors.c new file mode 100644 index 000000000000..9a97ddadecd6 --- /dev/null +++ b/arch/arm/common/krait-l2-accessors.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018, The Linux Foundation. All rights reserved. + +#include <linux/spinlock.h> +#include <linux/export.h> + +#include <asm/barrier.h> +#include <asm/krait-l2-accessors.h> + +static DEFINE_RAW_SPINLOCK(krait_l2_lock); + +void krait_set_l2_indirect_reg(u32 addr, u32 val) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&krait_l2_lock, flags); + /* + * Select the L2 window by poking l2cpselr, then write to the window + * via l2cpdr. + */ + asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr)); + isb(); + asm volatile ("mcr p15, 3, %0, c15, c0, 7 @ l2cpdr" : : "r" (val)); + isb(); + + raw_spin_unlock_irqrestore(&krait_l2_lock, flags); +} +EXPORT_SYMBOL(krait_set_l2_indirect_reg); + +u32 krait_get_l2_indirect_reg(u32 addr) +{ + u32 val; + unsigned long flags; + + raw_spin_lock_irqsave(&krait_l2_lock, flags); + /* + * Select the L2 window by poking l2cpselr, then read from the window + * via l2cpdr. + */ + asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr)); + isb(); + asm volatile ("mrc p15, 3, %0, c15, c0, 7 @ l2cpdr" : "=r" (val)); + + raw_spin_unlock_irqrestore(&krait_l2_lock, flags); + + return val; +} +EXPORT_SYMBOL(krait_get_l2_indirect_reg); diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 63af6234c1b6..1c7616815a86 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -1,6 +1,7 @@ CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_CGROUPS=y CONFIG_BLK_DEV_INITRD=y CONFIG_EMBEDDED=y diff --git a/arch/arm/include/asm/krait-l2-accessors.h b/arch/arm/include/asm/krait-l2-accessors.h new file mode 100644 index 000000000000..a5f2cdd6445f --- /dev/null +++ b/arch/arm/include/asm/krait-l2-accessors.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASMARM_KRAIT_L2_ACCESSORS_H +#define __ASMARM_KRAIT_L2_ACCESSORS_H + +extern void krait_set_l2_indirect_reg(u32 addr, u32 val); +extern u32 krait_get_l2_indirect_reg(u32 addr); + +#endif diff --git a/arch/arm/mach-davinci/include/mach/clock.h b/arch/arm/mach-davinci/include/mach/clock.h deleted file mode 100644 index 42ed4f2f5ce4..000000000000 --- a/arch/arm/mach-davinci/include/mach/clock.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * arch/arm/mach-davinci/include/mach/clock.h - * - * Clock control driver for DaVinci - header file - * - * Authors: Vladimir Barinov <source@mvista.com> - * - * 2007 (c) MontaVista Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ -#ifndef __ASM_ARCH_DAVINCI_CLOCK_H -#define __ASM_ARCH_DAVINCI_CLOCK_H - -struct clk; - -int davinci_clk_reset_assert(struct clk *c); -int davinci_clk_reset_deassert(struct clk *c); - -#endif diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index af318d958fd2..3d191fd52910 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c @@ -773,7 +773,7 @@ static struct plat_serial8250_port ams_delta_modem_ports[] = { { .membase = IOMEM(MODEM_VIRT), .mapbase = MODEM_PHYS, - .irq = -EINVAL, /* changed later */ + .irq = IRQ_NOTCONNECTED, /* changed later */ .flags = UPF_BOOT_AUTOCONF, .irqflags = IRQF_TRIGGER_RISING, .iotype = UPIO_MEM, @@ -864,8 +864,7 @@ static int __init modem_nreset_init(void) /* - * This function expects MODEM IRQ number already assigned to the port - * and fails if it's not. + * This function expects MODEM IRQ number already assigned to the port. * The MODEM device requires its RESET# pin kept high during probe. * That requirement can be fulfilled in several ways: * - with a descriptor of already functional modem_nreset regulator @@ -888,9 +887,6 @@ static int __init ams_delta_modem_init(void) if (!machine_is_ams_delta()) return -ENODEV; - if (ams_delta_modem_ports[0].irq < 0) - return ams_delta_modem_ports[0].irq; - omap_cfg_reg(M14_1510_GPIO2); /* Initialize the modem_nreset regulator consumer before use */ diff --git a/arch/arm/plat-orion/mpp.c b/arch/arm/plat-orion/mpp.c index 5b4ff9373c89..8a6880d528b6 100644 --- a/arch/arm/plat-orion/mpp.c +++ b/arch/arm/plat-orion/mpp.c @@ -28,10 +28,15 @@ void __init orion_mpp_conf(unsigned int *mpp_list, unsigned int variant_mask, unsigned int mpp_max, void __iomem *dev_bus) { unsigned int mpp_nr_regs = (1 + mpp_max/8); - u32 mpp_ctrl[mpp_nr_regs]; + u32 mpp_ctrl[8]; int i; printk(KERN_DEBUG "initial MPP regs:"); + if (mpp_nr_regs > ARRAY_SIZE(mpp_ctrl)) { + printk(KERN_ERR "orion_mpp_conf: invalid mpp_max\n"); + return; + } + for (i = 0; i < mpp_nr_regs; i++) { mpp_ctrl[i] = readl(mpp_ctrl_addr(i, dev_bus)); printk(" %08x", mpp_ctrl[i]); diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index b4e994cd3a42..6cb9fc7e9382 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -134,6 +134,7 @@ vdso_install: archclean: $(Q)$(MAKE) $(clean)=$(boot) +ifeq ($(KBUILD_EXTMOD),) # We need to generate vdso-offsets.h before compiling certain files in kernel/. # In order to do that, we should use the archprepare target, but we can't since # asm-offsets.h is included in some files used to generate vdso-offsets.h, and @@ -143,6 +144,7 @@ archclean: prepare: vdso_prepare vdso_prepare: prepare0 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h +endif define archhelp echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 3cb995606e60..c9a57d11330b 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -308,6 +308,9 @@ CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y CONFIG_SERIAL_MVEBU_UART=y CONFIG_SERIAL_DEV_BUS=y CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m CONFIG_TCG_TPM=y CONFIG_TCG_TIS_I2C_INFINEON=y CONFIG_I2C_CHARDEV=y diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 9234013e759e..21a81b59a0cc 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -96,6 +96,7 @@ static inline unsigned long __percpu_##op(void *ptr, \ : [val] "Ir" (val)); \ break; \ default: \ + ret = 0; \ BUILD_BUG(); \ } \ \ @@ -125,6 +126,7 @@ static inline unsigned long __percpu_read(void *ptr, int size) ret = READ_ONCE(*(u64 *)ptr); break; default: + ret = 0; BUILD_BUG(); } @@ -194,6 +196,7 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, : [val] "r" (val)); break; default: + ret = 0; BUILD_BUG(); } diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c index f46d57c31443..6b5037ed15b2 100644 --- a/arch/arm64/kernel/crash_dump.c +++ b/arch/arm64/kernel/crash_dump.c @@ -58,7 +58,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, /** * elfcorehdr_read - read from ELF core header * @buf: buffer where the data is placed - * @csize: number of bytes to read + * @count: number of bytes to read * @ppos: address in the memory * * This function reads @count bytes from elf core header which exists diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 9b65132e789a..2a5b338b2542 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -23,7 +23,9 @@ #include <linux/slab.h> #include <linux/stop_machine.h> #include <linux/sched/debug.h> +#include <linux/set_memory.h> #include <linux/stringify.h> +#include <linux/vmalloc.h> #include <asm/traps.h> #include <asm/ptrace.h> #include <asm/cacheflush.h> @@ -42,10 +44,21 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static void __kprobes post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); +static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) +{ + void *addrs[1]; + u32 insns[1]; + + addrs[0] = addr; + insns[0] = opcode; + + return aarch64_insn_patch_text(addrs, insns, 1); +} + static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { /* prepare insn slot */ - p->ainsn.api.insn[0] = cpu_to_le32(p->opcode); + patch_text(p->ainsn.api.insn, p->opcode); flush_icache_range((uintptr_t) (p->ainsn.api.insn), (uintptr_t) (p->ainsn.api.insn) + @@ -118,15 +131,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return 0; } -static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) +void *alloc_insn_page(void) { - void *addrs[1]; - u32 insns[1]; + void *page; - addrs[0] = (void *)addr; - insns[0] = (u32)opcode; + page = vmalloc_exec(PAGE_SIZE); + if (page) + set_memory_ro((unsigned long)page, 1); - return aarch64_insn_patch_text(addrs, insns, 1); + return page; } /* arm kprobe: install breakpoint in text */ diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index ce99c58cd1f1..d9a4c2d6dd8b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -497,25 +497,3 @@ void arch_setup_new_exec(void) { current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; } - -#ifdef CONFIG_GCC_PLUGIN_STACKLEAK -void __used stackleak_check_alloca(unsigned long size) -{ - unsigned long stack_left; - unsigned long current_sp = current_stack_pointer; - struct stack_info info; - - BUG_ON(!on_accessible_stack(current, current_sp, &info)); - - stack_left = current_sp - info.low; - - /* - * There's a good chance we're almost out of stack space if this - * is true. Using panic() over BUG() is more likely to give - * reliable debugging output. - */ - if (size >= stack_left) - panic("alloca() over the kernel stack boundary\n"); -} -EXPORT_SYMBOL(stackleak_check_alloca); -#endif diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 3a703e5d4e32..a3ac26284845 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -160,6 +160,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, __dma_unmap_area(phys_to_virt(paddr), size, dir); } +#ifdef CONFIG_IOMMU_DMA static int __swiotlb_get_sgtable_page(struct sg_table *sgt, struct page *page, size_t size) { @@ -188,6 +189,7 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma, return ret; } +#endif /* CONFIG_IOMMU_DMA */ static int __init atomic_pool_init(void) { diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c index 2e1c0ea22eb0..e9d6824ae94d 100644 --- a/arch/c6x/kernel/setup.c +++ b/arch/c6x/kernel/setup.c @@ -290,7 +290,6 @@ notrace void __init machine_init(unsigned long dt_ptr) void __init setup_arch(char **cmdline_p) { - int bootmap_size; struct memblock_region *reg; printk(KERN_INFO "Initializing kernel\n"); @@ -347,16 +346,6 @@ void __init setup_arch(char **cmdline_p) init_mm.end_data = memory_start; init_mm.brk = memory_start; - /* - * Give all the memory to the bootmap allocator, tell it to put the - * boot mem_map at the start of memory - */ - bootmap_size = init_bootmem_node(NODE_DATA(0), - memory_start >> PAGE_SHIFT, - PAGE_OFFSET >> PAGE_SHIFT, - memory_end >> PAGE_SHIFT); - memblock_reserve(memory_start, bootmap_size); - unflatten_and_copy_device_tree(); c6x_cache_init(); @@ -391,22 +380,9 @@ void __init setup_arch(char **cmdline_p) /* Initialize the coherent memory allocator */ coherent_mem_init(dma_start, dma_size); - /* - * Free all memory as a starting point. - */ - free_bootmem(PAGE_OFFSET, memory_end - PAGE_OFFSET); - - /* - * Then reserve memory which is already being used. - */ - for_each_memblock(reserved, reg) { - pr_debug("reserved - 0x%08x-0x%08x\n", - (u32) reg->base, (u32) reg->size); - reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); - } - max_low_pfn = PFN_DOWN(memory_end); min_low_pfn = PFN_UP(memory_start); + max_pfn = max_low_pfn; max_mapnr = max_low_pfn - min_low_pfn; /* Get kmalloc into gear */ diff --git a/arch/csky/Kconfig.debug b/arch/csky/Kconfig.debug index 48cf6ff9df4a..22a162cd99e8 100644 --- a/arch/csky/Kconfig.debug +++ b/arch/csky/Kconfig.debug @@ -1,9 +1 @@ -menu "C-SKY Debug Options" -config CSKY_BUILTIN_DTB - string "Use kernel builtin dtb" - help - User could define the dtb instead of the one which is passed from - bootloader. - Sometimes for debug, we want to use a built-in dtb and then we needn't - modify bootloader at all. -endmenu +# dummy file, do not delete diff --git a/arch/csky/Makefile b/arch/csky/Makefile index 67a4ae1fba2b..c639fc167895 100644 --- a/arch/csky/Makefile +++ b/arch/csky/Makefile @@ -65,26 +65,15 @@ libs-y += arch/csky/lib/ \ $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) boot := arch/csky/boot -ifneq '$(CONFIG_CSKY_BUILTIN_DTB)' '""' core-y += $(boot)/dts/ -endif all: zImage - -dtbs: scripts - $(Q)$(MAKE) $(build)=$(boot)/dts - -%.dtb %.dtb.S %.dtb.o: scripts - $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ - -zImage Image uImage: vmlinux dtbs +zImage Image uImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ archclean: $(Q)$(MAKE) $(clean)=$(boot) - $(Q)$(MAKE) $(clean)=$(boot)/dts - rm -rf arch/csky/include/generated define archhelp echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' diff --git a/arch/csky/boot/dts/Makefile b/arch/csky/boot/dts/Makefile index 305e81a5e91e..c57ad3c880bf 100644 --- a/arch/csky/boot/dts/Makefile +++ b/arch/csky/boot/dts/Makefile @@ -1,13 +1,3 @@ dtstree := $(srctree)/$(src) -ifneq '$(CONFIG_CSKY_BUILTIN_DTB)' '""' -builtindtb-y := $(patsubst "%",%,$(CONFIG_CSKY_BUILTIN_DTB)) -dtb-y += $(builtindtb-y).dtb -obj-y += $(builtindtb-y).dtb.o -.SECONDARY: $(obj)/$(builtindtb-y).dtb.S -else dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) -endif - -always += $(dtb-y) -clean-files += *.dtb *.dtb.S diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 15a84cfd0719..68410490e12f 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -128,7 +128,7 @@ cflags-y += -ffreestanding # clang's output will be based upon the build machine. So for clang we simply # unconditionally specify -EB or -EL as appropriate. # -ifeq ($(cc-name),clang) +ifdef CONFIG_CC_IS_CLANG cflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -EL else diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 34605ca21498..58a0315ad743 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -10,7 +10,7 @@ ccflags-vdso := \ $(filter -march=%,$(KBUILD_CFLAGS)) \ -D__VDSO__ -ifeq ($(cc-name),clang) +ifdef CONFIG_CC_IS_CLANG ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS)) endif diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 2d51b2bd4aa1..8be31261aec8 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -930,10 +930,6 @@ config FSL_GTM help Freescale General-purpose Timers support -# Yes MCA RS/6000s exist but Linux-PPC does not currently support any -config MCA - bool - # Platforms that what PCI turned unconditionally just do select PCI # in their config node. Platforms that want to choose at config # time should select PPC_PCI_CHOICE @@ -944,7 +940,6 @@ config PCI bool "PCI support" if PPC_PCI_CHOICE default y if !40x && !CPM2 && !PPC_8xx && !PPC_83xx \ && !PPC_85xx && !PPC_86xx && !GAMECUBE_COMMON - default PCI_QSPAN if PPC_8xx select GENERIC_PCI_IOMAP help Find out whether your system includes a PCI bus. PCI is the name of @@ -958,14 +953,6 @@ config PCI_DOMAINS config PCI_SYSCALL def_bool PCI -config PCI_QSPAN - bool "QSpan PCI" - depends on PPC_8xx - select PPC_I8259 - help - Say Y here if you have a system based on a Motorola 8xx-series - embedded processor with a QSPAN PCI interface, otherwise say N. - config PCI_8260 bool depends on PCI && 8260 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 17be664dafa2..8a2ce14d68d0 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -96,7 +96,7 @@ aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2 endif -ifneq ($(cc-name),clang) +ifndef CONFIG_CC_IS_CLANG cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align endif @@ -175,7 +175,7 @@ endif # Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828 -ifneq ($(cc-name),clang) +ifndef CONFIG_CC_IS_CLANG CC_FLAGS_FTRACE += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog) endif endif diff --git a/arch/powerpc/boot/dts/fsl/t2080rdb.dts b/arch/powerpc/boot/dts/fsl/t2080rdb.dts index 55c0210a771d..092a400740f8 100644 --- a/arch/powerpc/boot/dts/fsl/t2080rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t2080rdb.dts @@ -77,12 +77,12 @@ }; ethernet@f0000 { - phy-handle = <&xg_cs4315_phy1>; + phy-handle = <&xg_cs4315_phy2>; phy-connection-type = "xgmii"; }; ethernet@f2000 { - phy-handle = <&xg_cs4315_phy2>; + phy-handle = <&xg_cs4315_phy1>; phy-connection-type = "xgmii"; }; diff --git a/arch/powerpc/boot/dts/mpc885ads.dts b/arch/powerpc/boot/dts/mpc885ads.dts index 5b037f51741d..3aa300afbbca 100644 --- a/arch/powerpc/boot/dts/mpc885ads.dts +++ b/arch/powerpc/boot/dts/mpc885ads.dts @@ -72,7 +72,7 @@ #address-cells = <1>; #size-cells = <1>; device_type = "soc"; - ranges = <0x0 0xff000000 0x4000>; + ranges = <0x0 0xff000000 0x28000>; bus-frequency = <0>; // Temporary -- will go away once kernel uses ranges for get_immrbase(). @@ -224,6 +224,17 @@ #size-cells = <0>; }; }; + + crypto@20000 { + compatible = "fsl,sec1.2", "fsl,sec1.0"; + reg = <0x20000 0x8000>; + interrupts = <1 1>; + interrupt-parent = <&PIC>; + fsl,num-channels = <1>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0x4c>; + fsl,descriptor-types-mask = <0x05000154>; + }; }; chosen { diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 31733a95bbd0..3d5acd2b113a 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -36,6 +36,11 @@ int raw_patch_instruction(unsigned int *addr, unsigned int instr); int patch_instruction_site(s32 *addr, unsigned int instr); int patch_branch_site(s32 *site, unsigned long target, int flags); +static inline unsigned long patch_site_addr(s32 *site) +{ + return (unsigned long)site + *site; +} + int instr_is_relative_branch(unsigned int instr); int instr_is_relative_link_branch(unsigned int instr); int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 4f547752ae79..fa05aa566ece 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -34,20 +34,12 @@ * respectively NA for All or X for Supervisor and no access for User. * Then we use the APG to say whether accesses are according to Page rules or * "all Supervisor" rules (Access to all) - * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP: - * When that bit is not set access is done iaw "all user" - * which means no access iaw page rules. - * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED - * 0x => No access => 11 (all accesses performed as user iaw page definition) - * 10 => No user => 01 (all accesses performed according to page definition) - * 11 => User => 00 (all accesses performed as supervisor iaw page definition) + * Therefore, we define 2 APG groups. lsb is _PMD_USER + * 0 => No user => 01 (all accesses performed according to page definition) + * 1 => User => 00 (all accesses performed as supervisor iaw page definition) * We define all 16 groups so that all other bits of APG can take any value */ -#ifdef CONFIG_SWAP -#define MI_APG_INIT 0xf4f4f4f4 -#else #define MI_APG_INIT 0x44444444 -#endif /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MI_RPN is written, bits in @@ -115,20 +107,12 @@ * Supervisor and no access for user and NA for ALL. * Then we use the APG to say whether accesses are according to Page rules or * "all Supervisor" rules (Access to all) - * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP: - * When that bit is not set access is done iaw "all user" - * which means no access iaw page rules. - * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED - * 0x => No access => 11 (all accesses performed as user iaw page definition) - * 10 => No user => 01 (all accesses performed according to page definition) - * 11 => User => 00 (all accesses performed as supervisor iaw page definition) + * Therefore, we define 2 APG groups. lsb is _PMD_USER + * 0 => No user => 01 (all accesses performed according to page definition) + * 1 => User => 00 (all accesses performed as supervisor iaw page definition) * We define all 16 groups so that all other bits of APG can take any value */ -#ifdef CONFIG_SWAP -#define MD_APG_INIT 0xf4f4f4f4 -#else #define MD_APG_INIT 0x44444444 -#endif /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MD_RPN is written, bits in @@ -180,12 +164,6 @@ */ #define SPRN_M_TW 799 -/* APGs */ -#define M_APG0 0x00000000 -#define M_APG1 0x00000020 -#define M_APG2 0x00000040 -#define M_APG3 0x00000060 - #ifdef CONFIG_PPC_MM_SLICES #include <asm/nohash/32/slice.h> #define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1)) @@ -251,6 +229,15 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) BUG(); } +/* patch sites */ +extern s32 patch__itlbmiss_linmem_top; +extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp; +extern s32 patch__fixupdar_linmem_top; + +extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2; +extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3; +extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf; + #endif /* !__ASSEMBLY__ */ #if defined(CONFIG_PPC_4K_PAGES) diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index bb38dd67d47d..1b06add4f092 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -5,6 +5,7 @@ #include <linux/spinlock.h> #include <asm/page.h> #include <linux/time.h> +#include <linux/cpumask.h> /* * Definitions for talking to the RTAS on CHRP machines. diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 134a573a9f2d..3b67b9533c82 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -31,6 +31,7 @@ #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/export.h> +#include <asm/code-patching-asm.h> #if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000 /* By simply checking Address >= 0x80000000, we know if its a kernel address */ @@ -318,8 +319,8 @@ InstructionTLBMiss: cmpli cr0, r11, PAGE_OFFSET@h #ifndef CONFIG_PIN_TLB_TEXT /* It is assumed that kernel code fits into the first 8M page */ -_ENTRY(ITLBMiss_cmp) - cmpli cr7, r11, (PAGE_OFFSET + 0x0800000)@h +0: cmpli cr7, r11, (PAGE_OFFSET + 0x0800000)@h + patch_site 0b, patch__itlbmiss_linmem_top #endif #endif #endif @@ -353,13 +354,14 @@ _ENTRY(ITLBMiss_cmp) #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) mtcr r12 #endif - -#ifdef CONFIG_SWAP - rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1 -#endif /* Load the MI_TWC with the attributes for this "segment." */ mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ +#ifdef CONFIG_SWAP + rlwinm r11, r10, 32-5, _PAGE_PRESENT + and r11, r11, r10 + rlwimi r10, r11, 0, _PAGE_PRESENT +#endif li r11, RPN_PATTERN | 0x200 /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 20 and 23 must be clear. @@ -372,16 +374,17 @@ _ENTRY(ITLBMiss_cmp) mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ /* Restore registers */ -_ENTRY(itlb_miss_exit_1) - mfspr r10, SPRN_SPRG_SCRATCH0 +0: mfspr r10, SPRN_SPRG_SCRATCH0 mfspr r11, SPRN_SPRG_SCRATCH1 #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) mfspr r12, SPRN_SPRG_SCRATCH2 #endif rfi + patch_site 0b, patch__itlbmiss_exit_1 + #ifdef CONFIG_PERF_EVENTS -_ENTRY(itlb_miss_perf) - lis r10, (itlb_miss_counter - PAGE_OFFSET)@ha + patch_site 0f, patch__itlbmiss_perf +0: lis r10, (itlb_miss_counter - PAGE_OFFSET)@ha lwz r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10) addi r11, r11, 1 stw r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10) @@ -435,11 +438,11 @@ DataStoreTLBMiss: #ifndef CONFIG_PIN_TLB_IMMR cmpli cr0, r11, VIRT_IMMR_BASE@h #endif -_ENTRY(DTLBMiss_cmp) - cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h +0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h + patch_site 0b, patch__dtlbmiss_linmem_top #ifndef CONFIG_PIN_TLB_IMMR -_ENTRY(DTLBMiss_jmp) - beq- DTLBMissIMMR +0: beq- DTLBMissIMMR + patch_site 0b, patch__dtlbmiss_immr_jmp #endif blt cr7, DTLBMissLinear lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha @@ -470,14 +473,22 @@ _ENTRY(DTLBMiss_jmp) * above. */ rlwimi r11, r10, 0, _PAGE_GUARDED -#ifdef CONFIG_SWAP - /* _PAGE_ACCESSED has to be set. We use second APG bit for that, 0 - * on that bit will represent a Non Access group - */ - rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1 -#endif mtspr SPRN_MD_TWC, r11 + /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. + * We also need to know if the insn is a load/store, so: + * Clear _PAGE_PRESENT and load that which will + * trap into DTLB Error with store bit set accordinly. + */ + /* PRESENT=0x1, ACCESSED=0x20 + * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5)); + * r10 = (r10 & ~PRESENT) | r11; + */ +#ifdef CONFIG_SWAP + rlwinm r11, r10, 32-5, _PAGE_PRESENT + and r11, r11, r10 + rlwimi r10, r11, 0, _PAGE_PRESENT +#endif /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 24, 25, 26, and 27 must be * set. All other Linux PTE bits control the behavior @@ -489,14 +500,16 @@ _ENTRY(DTLBMiss_jmp) /* Restore registers */ mtspr SPRN_DAR, r11 /* Tag DAR */ -_ENTRY(dtlb_miss_exit_1) - mfspr r10, SPRN_SPRG_SCRATCH0 + +0: mfspr r10, SPRN_SPRG_SCRATCH0 mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r12, SPRN_SPRG_SCRATCH2 rfi + patch_site 0b, patch__dtlbmiss_exit_1 + #ifdef CONFIG_PERF_EVENTS -_ENTRY(dtlb_miss_perf) - lis r10, (dtlb_miss_counter - PAGE_OFFSET)@ha + patch_site 0f, patch__dtlbmiss_perf +0: lis r10, (dtlb_miss_counter - PAGE_OFFSET)@ha lwz r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10) addi r11, r11, 1 stw r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10) @@ -637,8 +650,8 @@ InstructionBreakpoint: */ DTLBMissIMMR: mtcr r12 - /* Set 512k byte guarded page and mark it valid and accessed */ - li r10, MD_PS512K | MD_GUARDED | MD_SVALID | M_APG2 + /* Set 512k byte guarded page and mark it valid */ + li r10, MD_PS512K | MD_GUARDED | MD_SVALID mtspr SPRN_MD_TWC, r10 mfspr r10, SPRN_IMMR /* Get current IMMR */ rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ @@ -648,16 +661,17 @@ DTLBMissIMMR: li r11, RPN_PATTERN mtspr SPRN_DAR, r11 /* Tag DAR */ -_ENTRY(dtlb_miss_exit_2) - mfspr r10, SPRN_SPRG_SCRATCH0 + +0: mfspr r10, SPRN_SPRG_SCRATCH0 mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r12, SPRN_SPRG_SCRATCH2 rfi + patch_site 0b, patch__dtlbmiss_exit_2 DTLBMissLinear: mtcr r12 - /* Set 8M byte page and mark it valid and accessed */ - li r11, MD_PS8MEG | MD_SVALID | M_APG2 + /* Set 8M byte page and mark it valid */ + li r11, MD_PS8MEG | MD_SVALID mtspr SPRN_MD_TWC, r11 rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ @@ -666,28 +680,29 @@ DTLBMissLinear: li r11, RPN_PATTERN mtspr SPRN_DAR, r11 /* Tag DAR */ -_ENTRY(dtlb_miss_exit_3) - mfspr r10, SPRN_SPRG_SCRATCH0 + +0: mfspr r10, SPRN_SPRG_SCRATCH0 mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r12, SPRN_SPRG_SCRATCH2 rfi + patch_site 0b, patch__dtlbmiss_exit_3 #ifndef CONFIG_PIN_TLB_TEXT ITLBMissLinear: mtcr r12 - /* Set 8M byte page and mark it valid,accessed */ - li r11, MI_PS8MEG | MI_SVALID | M_APG2 + /* Set 8M byte page and mark it valid */ + li r11, MI_PS8MEG | MI_SVALID mtspr SPRN_MI_TWC, r11 rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ _PAGE_PRESENT mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ -_ENTRY(itlb_miss_exit_2) - mfspr r10, SPRN_SPRG_SCRATCH0 +0: mfspr r10, SPRN_SPRG_SCRATCH0 mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r12, SPRN_SPRG_SCRATCH2 rfi + patch_site 0b, patch__itlbmiss_exit_2 #endif /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions @@ -705,8 +720,10 @@ FixupDAR:/* Entry point for dcbx workaround. */ mfspr r11, SPRN_M_TW /* Get level 1 table */ blt+ 3f rlwinm r11, r10, 16, 0xfff8 -_ENTRY(FixupDAR_cmp) - cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h + +0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h + patch_site 0b, patch__fixupdar_linmem_top + /* create physical page address from effective address */ tophys(r11, r10) blt- cr7, 201f @@ -960,7 +977,7 @@ initial_mmu: ori r8, r8, MI_EVALID /* Mark it valid */ mtspr SPRN_MI_EPN, r8 li r8, MI_PS8MEG /* Set 8M byte page */ - ori r8, r8, MI_SVALID | M_APG2 /* Make it valid, APG 2 */ + ori r8, r8, MI_SVALID /* Make it valid */ mtspr SPRN_MI_TWC, r8 li r8, MI_BOOTINIT /* Create RPN for address 0 */ mtspr SPRN_MI_RPN, r8 /* Store TLB entry */ @@ -987,7 +1004,7 @@ initial_mmu: ori r8, r8, MD_EVALID /* Mark it valid */ mtspr SPRN_MD_EPN, r8 li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */ - ori r8, r8, MD_SVALID | M_APG2 /* Make it valid and accessed */ + ori r8, r8, MD_SVALID /* Make it valid */ mtspr SPRN_MD_TWC, r8 mr r8, r9 /* Create paddr for TLB */ ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4d5322cfad25..96f34730010f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -590,12 +590,11 @@ void flush_all_to_thread(struct task_struct *tsk) if (tsk->thread.regs) { preempt_disable(); BUG_ON(tsk != current); - save_all(tsk); - #ifdef CONFIG_SPE if (tsk->thread.regs->msr & MSR_SPE) tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); #endif + save_all(tsk); preempt_enable(); } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index bf8def2159c3..d65b961661fb 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2337,8 +2337,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu) kvmppc_core_prepare_to_enter(vcpu); return; } - dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC - / tb_ticks_per_sec; + dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now); hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); vcpu->arch.timer_running = 1; } diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index fa888bfc347e..9f5b8c01c4e1 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -61,11 +61,10 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) dec_time = vcpu->arch.dec; /* - * Guest timebase ticks at the same frequency as host decrementer. - * So use the host decrementer calculations for decrementer emulation. + * Guest timebase ticks at the same frequency as host timebase. + * So use the host timebase calculations for decrementer emulation. */ - dec_time = dec_time << decrementer_clockevent.shift; - do_div(dec_time, decrementer_clockevent.mult); + dec_time = tb_to_ns(dec_time); dec_nsec = do_div(dec_time, NSEC_PER_SEC); hrtimer_start(&vcpu->arch.dec_timer, ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index 36484a2ef915..01b7f5107c3a 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c @@ -13,6 +13,7 @@ */ #include <linux/memblock.h> +#include <linux/mmu_context.h> #include <asm/fixmap.h> #include <asm/code-patching.h> @@ -79,7 +80,7 @@ void __init MMU_init_hw(void) for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) { mtspr(SPRN_MD_CTR, ctr | (i << 8)); mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID); - mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID | M_APG2); + mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID); mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT); addr += LARGE_PAGE_SIZE_8M; mem -= LARGE_PAGE_SIZE_8M; @@ -97,22 +98,13 @@ static void __init mmu_mapin_immr(void) map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG); } -/* Address of instructions to patch */ -#ifndef CONFIG_PIN_TLB_IMMR -extern unsigned int DTLBMiss_jmp; -#endif -extern unsigned int DTLBMiss_cmp, FixupDAR_cmp; -#ifndef CONFIG_PIN_TLB_TEXT -extern unsigned int ITLBMiss_cmp; -#endif - -static void __init mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped) +static void __init mmu_patch_cmp_limit(s32 *site, unsigned long mapped) { - unsigned int instr = *addr; + unsigned int instr = *(unsigned int *)patch_site_addr(site); instr &= 0xffff0000; instr |= (unsigned long)__va(mapped) >> 16; - patch_instruction(addr, instr); + patch_instruction_site(site, instr); } unsigned long __init mmu_mapin_ram(unsigned long top) @@ -123,17 +115,17 @@ unsigned long __init mmu_mapin_ram(unsigned long top) mapped = 0; mmu_mapin_immr(); #ifndef CONFIG_PIN_TLB_IMMR - patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP); + patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP); #endif #ifndef CONFIG_PIN_TLB_TEXT - mmu_patch_cmp_limit(&ITLBMiss_cmp, 0); + mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0); #endif } else { mapped = top & ~(LARGE_PAGE_SIZE_8M - 1); } - mmu_patch_cmp_limit(&DTLBMiss_cmp, mapped); - mmu_patch_cmp_limit(&FixupDAR_cmp, mapped); + mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped); + mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped); /* If the size of RAM is not an exact power of two, we may not * have covered RAM in its entirety with 8 MiB diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c index 6c0020d1c561..e38f74e9e7a4 100644 --- a/arch/powerpc/perf/8xx-pmu.c +++ b/arch/powerpc/perf/8xx-pmu.c @@ -31,9 +31,6 @@ extern unsigned long itlb_miss_counter, dtlb_miss_counter; extern atomic_t instruction_counter; -extern unsigned int itlb_miss_perf, dtlb_miss_perf; -extern unsigned int itlb_miss_exit_1, itlb_miss_exit_2; -extern unsigned int dtlb_miss_exit_1, dtlb_miss_exit_2, dtlb_miss_exit_3; static atomic_t insn_ctr_ref; static atomic_t itlb_miss_ref; @@ -103,22 +100,22 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags) break; case PERF_8xx_ID_ITLB_LOAD_MISS: if (atomic_inc_return(&itlb_miss_ref) == 1) { - unsigned long target = (unsigned long)&itlb_miss_perf; + unsigned long target = patch_site_addr(&patch__itlbmiss_perf); - patch_branch(&itlb_miss_exit_1, target, 0); + patch_branch_site(&patch__itlbmiss_exit_1, target, 0); #ifndef CONFIG_PIN_TLB_TEXT - patch_branch(&itlb_miss_exit_2, target, 0); + patch_branch_site(&patch__itlbmiss_exit_2, target, 0); #endif } val = itlb_miss_counter; break; case PERF_8xx_ID_DTLB_LOAD_MISS: if (atomic_inc_return(&dtlb_miss_ref) == 1) { - unsigned long target = (unsigned long)&dtlb_miss_perf; + unsigned long target = patch_site_addr(&patch__dtlbmiss_perf); - patch_branch(&dtlb_miss_exit_1, target, 0); - patch_branch(&dtlb_miss_exit_2, target, 0); - patch_branch(&dtlb_miss_exit_3, target, 0); + patch_branch_site(&patch__dtlbmiss_exit_1, target, 0); + patch_branch_site(&patch__dtlbmiss_exit_2, target, 0); + patch_branch_site(&patch__dtlbmiss_exit_3, target, 0); } val = dtlb_miss_counter; break; @@ -180,17 +177,17 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags) break; case PERF_8xx_ID_ITLB_LOAD_MISS: if (atomic_dec_return(&itlb_miss_ref) == 0) { - patch_instruction(&itlb_miss_exit_1, insn); + patch_instruction_site(&patch__itlbmiss_exit_1, insn); #ifndef CONFIG_PIN_TLB_TEXT - patch_instruction(&itlb_miss_exit_2, insn); + patch_instruction_site(&patch__itlbmiss_exit_2, insn); #endif } break; case PERF_8xx_ID_DTLB_LOAD_MISS: if (atomic_dec_return(&dtlb_miss_ref) == 0) { - patch_instruction(&dtlb_miss_exit_1, insn); - patch_instruction(&dtlb_miss_exit_2, insn); - patch_instruction(&dtlb_miss_exit_3, insn); + patch_instruction_site(&patch__dtlbmiss_exit_1, insn); + patch_instruction_site(&patch__dtlbmiss_exit_2, insn); + patch_instruction_site(&patch__dtlbmiss_exit_3, insn); } break; } diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index 2a9d66254ffc..5326ece36120 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -29,6 +29,7 @@ config KILAUEA select 405EX select PPC40x_SIMPLE select PPC4xx_PCI_EXPRESS + select PCI select PCI_MSI select PPC4xx_MSI help diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index f024efd5a4c2..9a85d350b1b6 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -21,6 +21,7 @@ config BLUESTONE depends on 44x select PPC44x_SIMPLE select APM821xx + select PCI select PCI_MSI select PPC4xx_MSI select PPC4xx_PCI_EXPRESS @@ -200,6 +201,7 @@ config AKEBONO select SWIOTLB select 476FPE select PPC4xx_PCI_EXPRESS + select PCI select PCI_MSI select PPC4xx_HSTA_MSI select I2C diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index 8bd590af488a..794487313cc8 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -26,6 +26,7 @@ #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/uaccess.h> +#include <linux/hugetlb.h> #include <asm/lppaca.h> #include <asm/hvcall.h> #include <asm/firmware.h> @@ -36,6 +37,7 @@ #include <asm/vio.h> #include <asm/mmu.h> #include <asm/machdep.h> +#include <asm/drmem.h> #include "pseries.h" @@ -433,6 +435,16 @@ static void parse_em_data(struct seq_file *m) seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); } +static void maxmem_data(struct seq_file *m) +{ + unsigned long maxmem = 0; + + maxmem += drmem_info->n_lmbs * drmem_info->lmb_size; + maxmem += hugetlb_total_pages() * PAGE_SIZE; + + seq_printf(m, "MaxMem=%ld\n", maxmem); +} + static int pseries_lparcfg_data(struct seq_file *m, void *v) { int partition_potential_processors; @@ -491,6 +503,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) seq_printf(m, "slb_size=%d\n", mmu_slb_size); #endif parse_em_data(m); + maxmem_data(m); return 0; } diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index 69e7fb47bcaa..878f9c1d3615 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -11,6 +11,12 @@ UBSAN_SANITIZE := n ORIG_CFLAGS := $(KBUILD_CFLAGS) KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)) +ifdef CONFIG_CC_IS_CLANG +# clang stores addresses on the stack causing the frame size to blow +# out. See https://github.com/ClangBuiltLinux/linux/issues/252 +KBUILD_CFLAGS += -Wframe-larger-than=4096 +endif + ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) obj-y += xmon.o nonstdio.o spr_access.o diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index d86842c21710..55da93f4e818 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -107,7 +107,6 @@ config ARCH_RV32I select GENERIC_LIB_ASHRDI3 select GENERIC_LIB_LSHRDI3 select GENERIC_LIB_UCMPDI2 - select GENERIC_LIB_UMODDI3 config ARCH_RV64I bool "RV64I" diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 36473d7dbaac..07fa9ea75fea 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -1,6 +1,3 @@ -CONFIG_SMP=y -CONFIG_PCI=y -CONFIG_PCIE_XILINX=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_IKCONFIG=y @@ -11,10 +8,15 @@ CONFIG_CFS_BANDWIDTH=y CONFIG_CGROUP_BPF=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y -CONFIG_CHECKPOINT_RESTORE=y CONFIG_BPF_SYSCALL=y +CONFIG_SMP=y +CONFIG_PCI=y +CONFIG_PCIE_XILINX=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -59,6 +61,7 @@ CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y CONFIG_USB_UAS=y CONFIG_VIRTIO_MMIO=y +CONFIG_SIFIVE_PLIC=y CONFIG_RAS=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -72,8 +75,5 @@ CONFIG_NFS_V4=y CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y CONFIG_ROOT_NFS=y -# CONFIG_RCU_TRACE is not set CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_SIFIVE_PLIC=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index a1ef503d616e..697fc23b0d5a 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -16,9 +16,6 @@ #include <asm/auxvec.h> #include <asm/byteorder.h> -/* TODO: Move definition into include/uapi/linux/elf-em.h */ -#define EM_RISCV 0xF3 - /* * These are used to set parameters in the core dumps. */ diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 5493f3228704..0339087aa652 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -28,7 +28,7 @@ bool has_fpu __read_mostly; void riscv_fill_hwcap(void) { - struct device_node *node; + struct device_node *node = NULL; const char *isa; size_t i; static unsigned long isa2hwcap[256] = {0}; @@ -44,9 +44,11 @@ void riscv_fill_hwcap(void) /* * We don't support running Linux on hertergenous ISA systems. For - * now, we just check the ISA of the first processor. + * now, we just check the ISA of the first "okay" processor. */ - node = of_find_node_by_type(NULL, "cpu"); + while ((node = of_find_node_by_type(node, "cpu"))) + if (riscv_of_processor_hartid(node) >= 0) + break; if (!node) { pr_warning("Unable to find \"cpu\" devicetree entry"); return; diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 67b3e6b3ce5d..47c871394ccb 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1849,16 +1849,12 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs { u64 saved_fault_address = current_thread_info()->fault_address; u8 saved_fault_code = get_thread_fault_code(); - mm_segment_t old_fs; perf_callchain_store(entry, regs->tpc); if (!current->mm) return; - old_fs = get_fs(); - set_fs(USER_DS); - flushw_user(); pagefault_disable(); @@ -1870,7 +1866,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs pagefault_enable(); - set_fs(old_fs); set_thread_fault_code(saved_fault_code); current_thread_info()->fault_address = saved_fault_address; } diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index bb68c805b891..ff9389a1c9f3 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -47,9 +47,9 @@ sys_call_table32: .word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate /*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown .word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 -/*140*/ .word sys_sendfile64, sys_nis_syscall, compat_sys_futex, sys_gettid, compat_sys_getrlimit +/*140*/ .word sys_sendfile64, sys_getpeername, compat_sys_futex, sys_gettid, compat_sys_getrlimit .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write -/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 +/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount /*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 8d80b27502e6..7e524efed584 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -261,7 +261,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data) if (err == 0) { spin_unlock(&line->lock); return IRQ_NONE; - } else if (err < 0) { + } else if ((err < 0) && (err != -EAGAIN)) { line->head = line->buffer; line->tail = line->buffer; } @@ -284,7 +284,7 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data) if (err) return err; if (output) - err = um_request_irq(driver->write_irq, fd, IRQ_NONE, + err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, line_write_interrupt, IRQF_SHARED, driver->write_irq_name, data); return err; diff --git a/arch/um/drivers/port_user.c b/arch/um/drivers/port_user.c index 9a8e1b64c22e..5f56d11b886f 100644 --- a/arch/um/drivers/port_user.c +++ b/arch/um/drivers/port_user.c @@ -168,7 +168,7 @@ int port_connection(int fd, int *socket, int *pid_out) { int new, err; char *argv[] = { "/usr/sbin/in.telnetd", "-L", - "/usr/lib/uml/port-helper", NULL }; + OS_LIB_PATH "/uml/port-helper", NULL }; struct port_pre_exec_data data; new = accept(fd, NULL, 0); diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 10d8d20eb9ec..046fa9ea0ccc 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -1118,16 +1118,11 @@ static int vector_net_close(struct net_device *dev) os_close_file(vp->fds->tx_fd); vp->fds->tx_fd = -1; } - if (vp->bpf != NULL) - kfree(vp->bpf); - if (vp->fds->remote_addr != NULL) - kfree(vp->fds->remote_addr); - if (vp->transport_data != NULL) - kfree(vp->transport_data); - if (vp->header_rxbuffer != NULL) - kfree(vp->header_rxbuffer); - if (vp->header_txbuffer != NULL) - kfree(vp->header_txbuffer); + kfree(vp->bpf); + kfree(vp->fds->remote_addr); + kfree(vp->transport_data); + kfree(vp->header_rxbuffer); + kfree(vp->header_txbuffer); if (vp->rx_queue != NULL) destroy_queue(vp->rx_queue); if (vp->tx_queue != NULL) diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c index 4d6a78e31089..3d8cdbdb4e66 100644 --- a/arch/um/drivers/vector_user.c +++ b/arch/um/drivers/vector_user.c @@ -267,8 +267,7 @@ cleanup: os_close_file(rxfd); if (txfd >= 0) os_close_file(txfd); - if (result != NULL) - kfree(result); + kfree(result); return NULL; } @@ -434,8 +433,7 @@ cleanup: if (fd >= 0) os_close_file(fd); if (result != NULL) { - if (result->remote_addr != NULL) - kfree(result->remote_addr); + kfree(result->remote_addr); kfree(result); } return NULL; diff --git a/arch/um/include/shared/aio.h b/arch/um/include/shared/aio.h deleted file mode 100644 index 423bae9153f8..000000000000 --- a/arch/um/include/shared/aio.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (C) 2004 Jeff Dike (jdike@karaya.com) - * Licensed under the GPL - */ - -#ifndef AIO_H__ -#define AIO_H__ - -enum aio_type { AIO_READ, AIO_WRITE, AIO_MMAP }; - -struct aio_thread_reply { - void *data; - int err; -}; - -struct aio_context { - int reply_fd; - struct aio_context *next; -}; - -#define INIT_AIO_CONTEXT { .reply_fd = -1, \ - .next = NULL } - -extern int submit_aio(enum aio_type type, int fd, char *buf, int len, - unsigned long long offset, int reply_fd, - struct aio_context *aio); - -#endif diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 6b7f3827d6e4..8360fa3f676d 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -244,8 +244,7 @@ static void garbage_collect_irq_entries(void) to_free = NULL; } walk = walk->next; - if (to_free != NULL) - kfree(to_free); + kfree(to_free); } } diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index cced82946042..0e8b6158f224 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -19,7 +19,7 @@ #include <skas.h> /* - * Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by + * Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by * segv(). */ int handle_page_fault(unsigned long address, unsigned long ip, diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile index ada473bf6f46..455b500afe97 100644 --- a/arch/um/os-Linux/Makefile +++ b/arch/um/os-Linux/Makefile @@ -6,18 +6,14 @@ # Don't instrument UML-specific code KCOV_INSTRUMENT := n -obj-y = aio.o execvp.o file.o helper.o irq.o main.o mem.o process.o \ +obj-y = execvp.o file.o helper.o irq.o main.o mem.o process.o \ registers.o sigio.o signal.o start_up.o time.o tty.o \ umid.o user_syms.o util.o drivers/ skas/ obj-$(CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA) += elf_aux.o -USER_OBJS := $(user-objs-y) aio.o elf_aux.o execvp.o file.o helper.o irq.o \ +USER_OBJS := $(user-objs-y) elf_aux.o execvp.o file.o helper.o irq.o \ main.o mem.o process.o registers.o sigio.o signal.o start_up.o time.o \ tty.o umid.o util.o -HAVE_AIO_ABI := $(shell [ -r /usr/include/linux/aio_abi.h ] && \ - echo -DHAVE_AIO_ABI ) -CFLAGS_aio.o += $(HAVE_AIO_ABI) - include arch/um/scripts/Makefile.rules diff --git a/arch/um/os-Linux/aio.c b/arch/um/os-Linux/aio.c deleted file mode 100644 index 014eb35fd13b..000000000000 --- a/arch/um/os-Linux/aio.c +++ /dev/null @@ -1,390 +0,0 @@ -/* - * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) - * Licensed under the GPL - */ - -#include <unistd.h> -#include <sched.h> -#include <signal.h> -#include <errno.h> -#include <sys/time.h> -#include <asm/unistd.h> -#include <aio.h> -#include <init.h> -#include <kern_util.h> -#include <os.h> - -struct aio_thread_req { - enum aio_type type; - int io_fd; - unsigned long long offset; - char *buf; - int len; - struct aio_context *aio; -}; - -#if defined(HAVE_AIO_ABI) -#include <linux/aio_abi.h> - -/* - * If we have the headers, we are going to build with AIO enabled. - * If we don't have aio in libc, we define the necessary stubs here. - */ - -#if !defined(HAVE_AIO_LIBC) - -static long io_setup(int n, aio_context_t *ctxp) -{ - return syscall(__NR_io_setup, n, ctxp); -} - -static long io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp) -{ - return syscall(__NR_io_submit, ctx, nr, iocbpp); -} - -static long io_getevents(aio_context_t ctx_id, long min_nr, long nr, - struct io_event *events, struct timespec *timeout) -{ - return syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout); -} - -#endif - -/* - * The AIO_MMAP cases force the mmapped page into memory here - * rather than in whatever place first touches the data. I used - * to do this by touching the page, but that's delicate because - * gcc is prone to optimizing that away. So, what's done here - * is we read from the descriptor from which the page was - * mapped. The caller is required to pass an offset which is - * inside the page that was mapped. Thus, when the read - * returns, we know that the page is in the page cache, and - * that it now backs the mmapped area. - */ - -static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf, - int len, unsigned long long offset, struct aio_context *aio) -{ - struct iocb *iocbp = & ((struct iocb) { - .aio_data = (unsigned long) aio, - .aio_fildes = fd, - .aio_buf = (unsigned long) buf, - .aio_nbytes = len, - .aio_offset = offset - }); - char c; - - switch (type) { - case AIO_READ: - iocbp->aio_lio_opcode = IOCB_CMD_PREAD; - break; - case AIO_WRITE: - iocbp->aio_lio_opcode = IOCB_CMD_PWRITE; - break; - case AIO_MMAP: - iocbp->aio_lio_opcode = IOCB_CMD_PREAD; - iocbp->aio_buf = (unsigned long) &c; - iocbp->aio_nbytes = sizeof(c); - break; - default: - printk(UM_KERN_ERR "Bogus op in do_aio - %d\n", type); - return -EINVAL; - } - - return (io_submit(ctx, 1, &iocbp) > 0) ? 0 : -errno; -} - -/* Initialized in an initcall and unchanged thereafter */ -static aio_context_t ctx = 0; - -static int aio_thread(void *arg) -{ - struct aio_thread_reply reply; - struct io_event event; - int err, n, reply_fd; - - os_fix_helper_signals(); - while (1) { - n = io_getevents(ctx, 1, 1, &event, NULL); - if (n < 0) { - if (errno == EINTR) - continue; - printk(UM_KERN_ERR "aio_thread - io_getevents failed, " - "errno = %d\n", errno); - } - else { - reply = ((struct aio_thread_reply) - { .data = (void *) (long) event.data, - .err = event.res }); - reply_fd = ((struct aio_context *) reply.data)->reply_fd; - err = write(reply_fd, &reply, sizeof(reply)); - if (err != sizeof(reply)) - printk(UM_KERN_ERR "aio_thread - write failed, " - "fd = %d, err = %d\n", reply_fd, errno); - } - } - return 0; -} - -#endif - -static int do_not_aio(struct aio_thread_req *req) -{ - char c; - unsigned long long actual; - int n; - - actual = lseek64(req->io_fd, req->offset, SEEK_SET); - if (actual != req->offset) - return -errno; - - switch (req->type) { - case AIO_READ: - n = read(req->io_fd, req->buf, req->len); - break; - case AIO_WRITE: - n = write(req->io_fd, req->buf, req->len); - break; - case AIO_MMAP: - n = read(req->io_fd, &c, sizeof(c)); - break; - default: - printk(UM_KERN_ERR "do_not_aio - bad request type : %d\n", - req->type); - return -EINVAL; - } - - if (n < 0) - return -errno; - return 0; -} - -/* These are initialized in initcalls and not changed */ -static int aio_req_fd_r = -1; -static int aio_req_fd_w = -1; -static int aio_pid = -1; -static unsigned long aio_stack; - -static int not_aio_thread(void *arg) -{ - struct aio_thread_req req; - struct aio_thread_reply reply; - int err; - - os_fix_helper_signals(); - while (1) { - err = read(aio_req_fd_r, &req, sizeof(req)); - if (err != sizeof(req)) { - if (err < 0) - printk(UM_KERN_ERR "not_aio_thread - " - "read failed, fd = %d, err = %d\n", - aio_req_fd_r, - errno); - else { - printk(UM_KERN_ERR "not_aio_thread - short " - "read, fd = %d, length = %d\n", - aio_req_fd_r, err); - } - continue; - } - err = do_not_aio(&req); - reply = ((struct aio_thread_reply) { .data = req.aio, - .err = err }); - err = write(req.aio->reply_fd, &reply, sizeof(reply)); - if (err != sizeof(reply)) - printk(UM_KERN_ERR "not_aio_thread - write failed, " - "fd = %d, err = %d\n", req.aio->reply_fd, errno); - } - - return 0; -} - -static int init_aio_24(void) -{ - int fds[2], err; - - err = os_pipe(fds, 1, 1); - if (err) - goto out; - - aio_req_fd_w = fds[0]; - aio_req_fd_r = fds[1]; - - err = os_set_fd_block(aio_req_fd_w, 0); - if (err) - goto out_close_pipe; - - err = run_helper_thread(not_aio_thread, NULL, - CLONE_FILES | CLONE_VM, &aio_stack); - if (err < 0) - goto out_close_pipe; - - aio_pid = err; - goto out; - -out_close_pipe: - close(fds[0]); - close(fds[1]); - aio_req_fd_w = -1; - aio_req_fd_r = -1; -out: -#ifndef HAVE_AIO_ABI - printk(UM_KERN_INFO "/usr/include/linux/aio_abi.h not present during " - "build\n"); -#endif - printk(UM_KERN_INFO "2.6 host AIO support not used - falling back to " - "I/O thread\n"); - return 0; -} - -#ifdef HAVE_AIO_ABI -#define DEFAULT_24_AIO 0 -static int init_aio_26(void) -{ - int err; - - if (io_setup(256, &ctx)) { - err = -errno; - printk(UM_KERN_ERR "aio_thread failed to initialize context, " - "err = %d\n", errno); - return err; - } - - err = run_helper_thread(aio_thread, NULL, - CLONE_FILES | CLONE_VM, &aio_stack); - if (err < 0) - return err; - - aio_pid = err; - - printk(UM_KERN_INFO "Using 2.6 host AIO\n"); - return 0; -} - -static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len, - unsigned long long offset, struct aio_context *aio) -{ - struct aio_thread_reply reply; - int err; - - err = do_aio(ctx, type, io_fd, buf, len, offset, aio); - if (err) { - reply = ((struct aio_thread_reply) { .data = aio, - .err = err }); - err = write(aio->reply_fd, &reply, sizeof(reply)); - if (err != sizeof(reply)) { - err = -errno; - printk(UM_KERN_ERR "submit_aio_26 - write failed, " - "fd = %d, err = %d\n", aio->reply_fd, -err); - } - else err = 0; - } - - return err; -} - -#else -#define DEFAULT_24_AIO 1 -static int init_aio_26(void) -{ - return -ENOSYS; -} - -static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len, - unsigned long long offset, struct aio_context *aio) -{ - return -ENOSYS; -} -#endif - -/* Initialized in an initcall and unchanged thereafter */ -static int aio_24 = DEFAULT_24_AIO; - -static int __init set_aio_24(char *name, int *add) -{ - aio_24 = 1; - return 0; -} - -__uml_setup("aio=2.4", set_aio_24, -"aio=2.4\n" -" This is used to force UML to use 2.4-style AIO even when 2.6 AIO is\n" -" available. 2.4 AIO is a single thread that handles one request at a\n" -" time, synchronously. 2.6 AIO is a thread which uses the 2.6 AIO \n" -" interface to handle an arbitrary number of pending requests. 2.6 AIO \n" -" is not available in tt mode, on 2.4 hosts, or when UML is built with\n" -" /usr/include/linux/aio_abi.h not available. Many distributions don't\n" -" include aio_abi.h, so you will need to copy it from a kernel tree to\n" -" your /usr/include/linux in order to build an AIO-capable UML\n\n" -); - -static int init_aio(void) -{ - int err; - - if (!aio_24) { - err = init_aio_26(); - if (err && (errno == ENOSYS)) { - printk(UM_KERN_INFO "2.6 AIO not supported on the " - "host - reverting to 2.4 AIO\n"); - aio_24 = 1; - } - else return err; - } - - if (aio_24) - return init_aio_24(); - - return 0; -} - -/* - * The reason for the __initcall/__uml_exitcall asymmetry is that init_aio - * needs to be called when the kernel is running because it calls run_helper, - * which needs get_free_page. exit_aio is a __uml_exitcall because the generic - * kernel does not run __exitcalls on shutdown, and can't because many of them - * break when called outside of module unloading. - */ -__initcall(init_aio); - -static void exit_aio(void) -{ - if (aio_pid != -1) { - os_kill_process(aio_pid, 1); - free_stack(aio_stack, 0); - } -} - -__uml_exitcall(exit_aio); - -static int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len, - unsigned long long offset, struct aio_context *aio) -{ - struct aio_thread_req req = { .type = type, - .io_fd = io_fd, - .offset = offset, - .buf = buf, - .len = len, - .aio = aio, - }; - int err; - - err = write(aio_req_fd_w, &req, sizeof(req)); - if (err == sizeof(req)) - err = 0; - else err = -errno; - - return err; -} - -int submit_aio(enum aio_type type, int io_fd, char *buf, int len, - unsigned long long offset, int reply_fd, - struct aio_context *aio) -{ - aio->reply_fd = reply_fd; - if (aio_24) - return submit_aio_24(type, io_fd, buf, len, offset, aio); - else - return submit_aio_26(type, io_fd, buf, len, offset, aio); -} diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index c94c3bd70ccd..df4a985716eb 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -610,6 +610,11 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf) fatal_sigsegv(); } longjmp(*switch_buf, 1); + + /* unreachable */ + printk(UM_KERN_ERR "impossible long jump!"); + fatal_sigsegv(); + return 0; } void initial_thread_cb_skas(void (*proc)(void *), void *arg) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c51c989c19c0..ba7e3464ee92 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -129,6 +129,7 @@ config X86 select HAVE_ARCH_PREL32_RELOCATIONS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_THREAD_STRUCT_WHITELIST + select HAVE_ARCH_STACKLEAK select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 8f0c4c9fc904..51079fc9298f 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c @@ -113,7 +113,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) { int err; - memset(&cpu.flags, 0, sizeof cpu.flags); + memset(&cpu.flags, 0, sizeof(cpu.flags)); cpu.level = 3; if (has_eflag(X86_EFLAGS_AC)) diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c index b25c53527a94..023bf1c3de8b 100644 --- a/arch/x86/boot/early_serial_console.c +++ b/arch/x86/boot/early_serial_console.c @@ -50,7 +50,7 @@ static void parse_earlyprintk(void) int pos = 0; int port = 0; - if (cmdline_find_option("earlyprintk", arg, sizeof arg) > 0) { + if (cmdline_find_option("earlyprintk", arg, sizeof(arg)) > 0) { char *e; if (!strncmp(arg, "serial", 6)) { @@ -124,7 +124,7 @@ static void parse_console_uart8250(void) * console=uart8250,io,0x3f8,115200n8 * need to make sure it is last one console ! */ - if (cmdline_find_option("console", optstr, sizeof optstr) <= 0) + if (cmdline_find_option("console", optstr, sizeof(optstr)) <= 0) return; options = optstr; diff --git a/arch/x86/boot/edd.c b/arch/x86/boot/edd.c index 223e42527077..6c176b6a42ad 100644 --- a/arch/x86/boot/edd.c +++ b/arch/x86/boot/edd.c @@ -76,7 +76,7 @@ static int get_edd_info(u8 devno, struct edd_info *ei) { struct biosregs ireg, oreg; - memset(ei, 0, sizeof *ei); + memset(ei, 0, sizeof(*ei)); /* Check Extensions Present */ @@ -133,7 +133,7 @@ void query_edd(void) struct edd_info ei, *edp; u32 *mbrptr; - if (cmdline_find_option("edd", eddarg, sizeof eddarg) > 0) { + if (cmdline_find_option("edd", eddarg, sizeof(eddarg)) > 0) { if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) { do_edd = 1; do_mbr = 0; @@ -166,7 +166,7 @@ void query_edd(void) */ if (!get_edd_info(devno, &ei) && boot_params.eddbuf_entries < EDDMAXNR) { - memcpy(edp, &ei, sizeof ei); + memcpy(edp, &ei, sizeof(ei)); edp++; boot_params.eddbuf_entries++; } diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c index 9bcea386db65..73532543d689 100644 --- a/arch/x86/boot/main.c +++ b/arch/x86/boot/main.c @@ -36,8 +36,8 @@ static void copy_boot_params(void) const struct old_cmdline * const oldcmd = (const struct old_cmdline *)OLD_CL_ADDRESS; - BUILD_BUG_ON(sizeof boot_params != 4096); - memcpy(&boot_params.hdr, &hdr, sizeof hdr); + BUILD_BUG_ON(sizeof(boot_params) != 4096); + memcpy(&boot_params.hdr, &hdr, sizeof(hdr)); if (!boot_params.hdr.cmd_line_ptr && oldcmd->cl_magic == OLD_CL_MAGIC) { diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c index d9c28c87e477..7df2b28207be 100644 --- a/arch/x86/boot/memory.c +++ b/arch/x86/boot/memory.c @@ -26,7 +26,7 @@ static int detect_memory_e820(void) initregs(&ireg); ireg.ax = 0xe820; - ireg.cx = sizeof buf; + ireg.cx = sizeof(buf); ireg.edx = SMAP; ireg.di = (size_t)&buf; diff --git a/arch/x86/boot/regs.c b/arch/x86/boot/regs.c index c0fb356a3092..2fe3616ba161 100644 --- a/arch/x86/boot/regs.c +++ b/arch/x86/boot/regs.c @@ -21,7 +21,7 @@ void initregs(struct biosregs *reg) { - memset(reg, 0, sizeof *reg); + memset(reg, 0, sizeof(*reg)); reg->eflags |= X86_EFLAGS_CF; reg->ds = ds(); reg->es = ds(); diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index ba3e100654db..3ecc11a9c440 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c @@ -62,7 +62,7 @@ static int vesa_probe(void) if (mode & ~0x1ff) continue; - memset(&vminfo, 0, sizeof vminfo); /* Just in case... */ + memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */ ireg.ax = 0x4f01; ireg.cx = mode; @@ -109,7 +109,7 @@ static int vesa_set_mode(struct mode_info *mode) int is_graphic; u16 vesa_mode = mode->mode - VIDEO_FIRST_VESA; - memset(&vminfo, 0, sizeof vminfo); /* Just in case... */ + memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */ initregs(&ireg); ireg.ax = 0x4f01; @@ -241,7 +241,7 @@ void vesa_store_edid(void) struct biosregs ireg, oreg; /* Apparently used as a nonsense token... */ - memset(&boot_params.edid_info, 0x13, sizeof boot_params.edid_info); + memset(&boot_params.edid_info, 0x13, sizeof(boot_params.edid_info)); if (vginfo.version < 0x0200) return; /* EDID requires VBE 2.0+ */ diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index 77780e386e9b..ac89b6624a40 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c @@ -115,7 +115,7 @@ static unsigned int get_entry(void) } else if ((key >= '0' && key <= '9') || (key >= 'A' && key <= 'Z') || (key >= 'a' && key <= 'z')) { - if (len < sizeof entry_buf) { + if (len < sizeof(entry_buf)) { entry_buf[len++] = key; putchar(key); } diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 708b46a54578..25e5a6bda8c3 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -329,8 +329,22 @@ For 32-bit we have the following conventions - kernel is built with #endif +.macro STACKLEAK_ERASE_NOCLOBBER +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK + PUSH_AND_CLEAR_REGS + call stackleak_erase + POP_REGS +#endif +.endm + #endif /* CONFIG_X86_64 */ +.macro STACKLEAK_ERASE +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK + call stackleak_erase +#endif +.endm + /* * This does 'call enter_from_user_mode' unless we can avoid it based on * kernel config or using the static jump infrastructure. diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 687e47f8a796..d309f30cf7af 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -46,6 +46,8 @@ #include <asm/frame.h> #include <asm/nospec-branch.h> +#include "calling.h" + .section .entry.text, "ax" /* @@ -712,6 +714,7 @@ ENTRY(ret_from_fork) /* When we fork, we trace the syscall return in the child, too. */ movl %esp, %eax call syscall_return_slowpath + STACKLEAK_ERASE jmp restore_all /* kernel thread */ @@ -886,6 +889,8 @@ ENTRY(entry_SYSENTER_32) ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV + STACKLEAK_ERASE + /* Opportunistic SYSEXIT */ TRACE_IRQS_ON /* User mode traces as IRQs on. */ @@ -997,6 +1002,8 @@ ENTRY(entry_INT80_32) call do_int80_syscall_32 .Lsyscall_32_done: + STACKLEAK_ERASE + restore_all: TRACE_IRQS_IRET SWITCH_TO_ENTRY_STACK diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 4d7a2d9d44cf..ce25d84023c0 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -266,6 +266,8 @@ syscall_return_via_sysret: * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ + STACKLEAK_ERASE_NOCLOBBER + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi popq %rdi @@ -625,6 +627,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ + STACKLEAK_ERASE_NOCLOBBER SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 7d0df78db727..8eaf8952c408 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -261,6 +261,11 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) /* Opportunistic SYSRET */ sysret32_from_system_call: + /* + * We are not going to return to userspace from the trampoline + * stack. So let's erase the thread stack right now. + */ + STACKLEAK_ERASE TRACE_IRQS_ON /* User mode traces as IRQs on. */ movq RBX(%rsp), %rbx /* pt_regs->rbx */ movq RBP(%rsp), %rbp /* pt_regs->rbp */ diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 0fb8659b20d8..273c62e81546 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4535,7 +4535,7 @@ __init int intel_pmu_init(void) } } - snprintf(pmu_name_str, sizeof pmu_name_str, "%s", name); + snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name); if (version >= 2 && extra_attr) { x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index fab4df16a3c4..22c4dfe65992 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -217,11 +217,18 @@ static inline bool in_x32_syscall(void) return false; } -static inline bool in_compat_syscall(void) +static inline bool in_32bit_syscall(void) { return in_ia32_syscall() || in_x32_syscall(); } + +#ifdef CONFIG_COMPAT +static inline bool in_compat_syscall(void) +{ + return in_32bit_syscall(); +} #define in_compat_syscall in_compat_syscall /* override the generic impl */ +#endif struct compat_siginfo; int __copy_siginfo_to_user32(struct compat_siginfo __user *to, diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 89a048c2faec..28c4a502b419 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -331,6 +331,8 @@ #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ +#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */ +#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index c18ed65287d5..cf350639e76d 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -76,9 +76,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) { - if (in_compat_syscall()) - return true; - return false; + return in_32bit_syscall(); } #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ #endif /* !COMPILE_OFFSETS */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index fba54ca23b2a..26942ad63830 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -361,7 +361,6 @@ extern struct paravirt_patch_template pv_ops; __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \ asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name)) -unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); unsigned paravirt_patch_default(u8 type, void *insnbuf, unsigned long addr, unsigned len); @@ -651,7 +650,6 @@ void paravirt_leave_lazy_mmu(void); void paravirt_flush_lazy_mmu(void); void _paravirt_nop(void); -u32 _paravirt_ident_32(u32); u64 _paravirt_ident_64(u64); #define paravirt_nop ((void *)_paravirt_nop) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 323a313947e0..d760611cfc35 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -453,6 +453,12 @@ static inline void __native_flush_tlb_one_user(unsigned long addr) */ static inline void __flush_tlb_all(void) { + /* + * This is to catch users with enabled preemption and the PGE feature + * and don't trigger the warning in __native_flush_tlb(). + */ + VM_WARN_ON_ONCE(preemptible()); + if (boot_cpu_has(X86_FEATURE_PGE)) { __flush_tlb_global(); } else { diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cbbd57ae06ee..ffb181f959d2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1074,7 +1074,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) #endif c->x86_cache_alignment = c->x86_clflush_size; - memset(&c->x86_capability, 0, sizeof c->x86_capability); + memset(&c->x86_capability, 0, sizeof(c->x86_capability)); c->extended_cpuid_level = 0; if (!have_cpuid_p()) @@ -1317,7 +1317,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) c->x86_virt_bits = 32; #endif c->x86_cache_alignment = c->x86_clflush_size; - memset(&c->x86_capability, 0, sizeof c->x86_capability); + memset(&c->x86_capability, 0, sizeof(c->x86_capability)); generic_identify(c); diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8cb3c02980cf..8c66d2fc8f81 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -2215,7 +2215,7 @@ static int mce_device_create(unsigned int cpu) if (dev) return 0; - dev = kzalloc(sizeof *dev, GFP_KERNEL); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->id = cpu; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b9bc8a1a584e..2637ff09d6a0 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -666,8 +666,8 @@ static ssize_t pf_show(struct device *dev, } static DEVICE_ATTR_WO(reload); -static DEVICE_ATTR(version, 0400, version_show, NULL); -static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL); +static DEVICE_ATTR(version, 0444, version_show, NULL); +static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL); static struct attribute *mc_default_attrs[] = { &dev_attr_version.attr, diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index e12ee86906c6..86e277f8daf4 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -798,7 +798,7 @@ static void generic_set_all(void) local_irq_restore(flags); /* Use the atomic bitops to update the global mask */ - for (count = 0; count < sizeof mask * 8; ++count) { + for (count = 0; count < sizeof(mask) * 8; ++count) { if (mask & 0x01) set_bit(count, &smp_changes_mask); mask >>= 1; diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 40eee6cc4124..2e173d47b450 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -174,12 +174,12 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) case MTRRIOC_SET_PAGE_ENTRY: case MTRRIOC_DEL_PAGE_ENTRY: case MTRRIOC_KILL_PAGE_ENTRY: - if (copy_from_user(&sentry, arg, sizeof sentry)) + if (copy_from_user(&sentry, arg, sizeof(sentry))) return -EFAULT; break; case MTRRIOC_GET_ENTRY: case MTRRIOC_GET_PAGE_ENTRY: - if (copy_from_user(&gentry, arg, sizeof gentry)) + if (copy_from_user(&gentry, arg, sizeof(gentry))) return -EFAULT; break; #ifdef CONFIG_COMPAT @@ -332,7 +332,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) switch (cmd) { case MTRRIOC_GET_ENTRY: case MTRRIOC_GET_PAGE_ENTRY: - if (copy_to_user(arg, &gentry, sizeof gentry)) + if (copy_to_user(arg, &gentry, sizeof(gentry))) err = -EFAULT; break; #ifdef CONFIG_COMPAT diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 5e801c8c8ce7..374a52fa5296 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -213,8 +213,9 @@ static unsigned int mem32_serial_in(unsigned long addr, int offset) * early_pci_serial_init() * * This function is invoked when the early_printk param starts with "pciserial" - * The rest of the param should be ",B:D.F,baud" where B, D & F describe the - * location of a PCI device that must be a UART device. + * The rest of the param should be "[force],B:D.F,baud", where B, D & F describe + * the location of a PCI device that must be a UART device. "force" is optional + * and overrides the use of an UART device with a wrong PCI class code. */ static __init void early_pci_serial_init(char *s) { @@ -224,17 +225,23 @@ static __init void early_pci_serial_init(char *s) u32 classcode, bar0; u16 cmdreg; char *e; + int force = 0; - - /* - * First, part the param to get the BDF values - */ if (*s == ',') ++s; if (*s == 0) return; + /* Force the use of an UART device with wrong class code */ + if (!strncmp(s, "force,", 6)) { + force = 1; + s += 6; + } + + /* + * Part the param to get the BDF values + */ bus = (u8)simple_strtoul(s, &e, 16); s = e; if (*s != ':') @@ -253,7 +260,7 @@ static __init void early_pci_serial_init(char *s) s++; /* - * Second, find the device from the BDF + * Find the device from the BDF */ cmdreg = read_pci_config(bus, slot, func, PCI_COMMAND); classcode = read_pci_config(bus, slot, func, PCI_CLASS_REVISION); @@ -264,8 +271,10 @@ static __init void early_pci_serial_init(char *s) */ if (((classcode >> 16 != PCI_CLASS_COMMUNICATION_MODEM) && (classcode >> 16 != PCI_CLASS_COMMUNICATION_SERIAL)) || - (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ - return; + (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ { + if (!force) + return; + } /* * Determine if it is IO or memory mapped @@ -289,7 +298,7 @@ static __init void early_pci_serial_init(char *s) } /* - * Lastly, initialize the hardware + * Initialize the hardware */ if (*s) { if (strcmp(s, "nocfg") == 0) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 5dc377dc9d7b..7663a8eb602b 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -385,7 +385,7 @@ static void __init copy_bootdata(char *real_mode_data) */ sme_map_bootdata(real_mode_data); - memcpy(&boot_params, real_mode_data, sizeof boot_params); + memcpy(&boot_params, real_mode_data, sizeof(boot_params)); sanitize_boot_params(&boot_params); cmd_line_ptr = get_cmd_line_ptr(); if (cmd_line_ptr) { diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index ef688804f80d..4588414e2561 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -115,14 +115,14 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) err = -EBADF; break; } - if (copy_from_user(®s, uregs, sizeof regs)) { + if (copy_from_user(®s, uregs, sizeof(regs))) { err = -EFAULT; break; } err = rdmsr_safe_regs_on_cpu(cpu, regs); if (err) break; - if (copy_to_user(uregs, ®s, sizeof regs)) + if (copy_to_user(uregs, ®s, sizeof(regs))) err = -EFAULT; break; @@ -131,14 +131,14 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) err = -EBADF; break; } - if (copy_from_user(®s, uregs, sizeof regs)) { + if (copy_from_user(®s, uregs, sizeof(regs))) { err = -EFAULT; break; } err = wrmsr_safe_regs_on_cpu(cpu, regs); if (err) break; - if (copy_to_user(uregs, ®s, sizeof regs)) + if (copy_to_user(uregs, ®s, sizeof(regs))) err = -EFAULT; break; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index e4d4df37922a..c0e0101133f3 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -56,17 +56,6 @@ asm (".pushsection .entry.text, \"ax\"\n" ".type _paravirt_nop, @function\n\t" ".popsection"); -/* identity function, which can be inlined */ -u32 notrace _paravirt_ident_32(u32 x) -{ - return x; -} - -u64 notrace _paravirt_ident_64(u64 x) -{ - return x; -} - void __init default_banner(void) { printk(KERN_INFO "Booting paravirtualized kernel on %s\n", @@ -102,6 +91,12 @@ static unsigned paravirt_patch_call(void *insnbuf, const void *target, } #ifdef CONFIG_PARAVIRT_XXL +/* identity function, which can be inlined */ +u64 notrace _paravirt_ident_64(u64 x) +{ + return x; +} + static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, unsigned long addr, unsigned len) { @@ -146,13 +141,11 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf, else if (opfunc == _paravirt_nop) ret = 0; +#ifdef CONFIG_PARAVIRT_XXL /* identity functions just return their single argument */ - else if (opfunc == _paravirt_ident_32) - ret = paravirt_patch_ident_32(insnbuf, len); else if (opfunc == _paravirt_ident_64) ret = paravirt_patch_ident_64(insnbuf, len); -#ifdef CONFIG_PARAVIRT_XXL else if (type == PARAVIRT_PATCH(cpu.iret) || type == PARAVIRT_PATCH(cpu.usergs_sysret64)) /* If operation requires a jmp, then jmp */ @@ -309,13 +302,8 @@ struct pv_info pv_info = { #endif }; -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) -/* 32-bit pagetable entries */ -#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) -#else /* 64-bit pagetable entries */ #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) -#endif struct paravirt_patch_template pv_ops = { /* Init ops. */ @@ -483,5 +471,5 @@ NOKPROBE_SYMBOL(native_set_debugreg); NOKPROBE_SYMBOL(native_load_idt); #endif -EXPORT_SYMBOL_GPL(pv_ops); +EXPORT_SYMBOL(pv_ops); EXPORT_SYMBOL_GPL(pv_info); diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c index 6368c22fa1fa..de138d3912e4 100644 --- a/arch/x86/kernel/paravirt_patch_32.c +++ b/arch/x86/kernel/paravirt_patch_32.c @@ -10,24 +10,18 @@ DEF_NATIVE(cpu, iret, "iret"); DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); -#endif - -#if defined(CONFIG_PARAVIRT_SPINLOCKS) -DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); -DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); -#endif - -unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) -{ - /* arg in %eax, return in %eax */ - return 0; -} unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) { /* arg in %edx:%eax, return in %edx:%eax */ return 0; } +#endif + +#if defined(CONFIG_PARAVIRT_SPINLOCKS) +DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)"); +DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); +#endif extern bool pv_is_native_spin_unlock(void); extern bool pv_is_native_vcpu_is_preempted(void); diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 7ca9cb726f4d..9d9e04b31077 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -15,27 +15,19 @@ DEF_NATIVE(cpu, wbinvd, "wbinvd"); DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); DEF_NATIVE(cpu, swapgs, "swapgs"); -#endif - -DEF_NATIVE(, mov32, "mov %edi, %eax"); DEF_NATIVE(, mov64, "mov %rdi, %rax"); -#if defined(CONFIG_PARAVIRT_SPINLOCKS) -DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)"); -DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); -#endif - -unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) -{ - return paravirt_patch_insns(insnbuf, len, - start__mov32, end__mov32); -} - unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) { return paravirt_patch_insns(insnbuf, len, start__mov64, end__mov64); } +#endif + +#if defined(CONFIG_PARAVIRT_SPINLOCKS) +DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)"); +DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax"); +#endif extern bool pv_is_native_spin_unlock(void); extern bool pv_is_native_vcpu_is_preempted(void); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 31b4755369f0..0e0b4288a4b2 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -701,10 +701,10 @@ static void __set_personality_x32(void) current->mm->context.ia32_compat = TIF_X32; current->personality &= ~READ_IMPLIES_EXEC; /* - * in_compat_syscall() uses the presence of the x32 syscall bit + * in_32bit_syscall() uses the presence of the x32 syscall bit * flag to determine compat status. The x86 mmap() code relies on * the syscall bitness so set x32 syscall bit right here to make - * in_compat_syscall() work during exec(). + * in_32bit_syscall() work during exec(). * * Pretend to come from a x32 execve. */ diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 6a78d4b36a79..f7476ce23b6e 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -105,7 +105,7 @@ out: static void find_start_end(unsigned long addr, unsigned long flags, unsigned long *begin, unsigned long *end) { - if (!in_compat_syscall() && (flags & MAP_32BIT)) { + if (!in_32bit_syscall() && (flags & MAP_32BIT)) { /* This is usually used needed to map code in small model, so it needs to be in the first 31bit. Limit it to that. This means we need to move the @@ -122,7 +122,7 @@ static void find_start_end(unsigned long addr, unsigned long flags, } *begin = get_mmap_base(1); - if (in_compat_syscall()) + if (in_32bit_syscall()) *end = task_size_32bit(); else *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW); @@ -193,7 +193,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; /* for MAP_32BIT mappings we force the legacy mmap base */ - if (!in_compat_syscall() && (flags & MAP_32BIT)) + if (!in_32bit_syscall() && (flags & MAP_32BIT)) goto bottomup; /* requesting a specific address */ @@ -217,9 +217,10 @@ get_unmapped_area: * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area * in the full address space. * - * !in_compat_syscall() check to avoid high addresses for x32. + * !in_32bit_syscall() check to avoid high addresses for x32 + * (and make it no op on native i386). */ - if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall()) + if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; info.align_mask = 0; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 8f6dcd88202e..9b7c4ca8f0a7 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -306,7 +306,7 @@ __visible void __noreturn handle_stack_overflow(const char *message, die(message, regs, 0); /* Be absolutely certain we don't return. */ - panic(message); + panic("%s", message); } #endif diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 34edf198708f..78e430f4e15c 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1509,7 +1509,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, return emulate_gp(ctxt, index << 3 | 0x2); addr = dt.address + index * 8; - return linear_read_system(ctxt, addr, desc, sizeof *desc); + return linear_read_system(ctxt, addr, desc, sizeof(*desc)); } static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, @@ -1522,7 +1522,7 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, struct desc_struct desc; u16 sel; - memset (dt, 0, sizeof *dt); + memset(dt, 0, sizeof(*dt)); if (!ops->get_segment(ctxt, &sel, &desc, &base3, VCPU_SREG_LDTR)) return; @@ -1586,7 +1586,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (rc != X86EMUL_CONTINUE) return rc; - return linear_write_system(ctxt, addr, desc, sizeof *desc); + return linear_write_system(ctxt, addr, desc, sizeof(*desc)); } static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, @@ -1604,7 +1604,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 dummy; u32 base3 = 0; - memset(&seg_desc, 0, sizeof seg_desc); + memset(&seg_desc, 0, sizeof(seg_desc)); if (ctxt->mode == X86EMUL_MODE_REAL) { /* set real mode segment descriptor (keep limit etc. for @@ -3075,17 +3075,17 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, int ret; u32 new_tss_base = get_desc_base(new_desc); - ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss16(ctxt, &tss_seg); - ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); + ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; - ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; @@ -3094,7 +3094,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, ret = linear_write_system(ctxt, new_tss_base, &tss_seg.prev_task_link, - sizeof tss_seg.prev_task_link); + sizeof(tss_seg.prev_task_link)); if (ret != X86EMUL_CONTINUE) return ret; } @@ -3216,7 +3216,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, u32 eip_offset = offsetof(struct tss_segment_32, eip); u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); - ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; @@ -3228,7 +3228,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, if (ret != X86EMUL_CONTINUE) return ret; - ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; @@ -3237,7 +3237,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, ret = linear_write_system(ctxt, new_tss_base, &tss_seg.prev_task_link, - sizeof tss_seg.prev_task_link); + sizeof(tss_seg.prev_task_link)); if (ret != X86EMUL_CONTINUE) return ret; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 3cd227ff807f..89db20f8cb70 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2409,7 +2409,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) r = kvm_apic_state_fixup(vcpu, s, true); if (r) return r; - memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); + memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); recalculate_apic_map(vcpu->kvm); kvm_apic_set_version(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 66d66d77caee..5cd5647120f2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2924,7 +2924,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, unsigned size; r = -EFAULT; - if (copy_from_user(&msrs, user_msrs, sizeof msrs)) + if (copy_from_user(&msrs, user_msrs, sizeof(msrs))) goto out; r = -E2BIG; @@ -3091,11 +3091,11 @@ long kvm_arch_dev_ioctl(struct file *filp, unsigned n; r = -EFAULT; - if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) + if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) goto out; n = msr_list.nmsrs; msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; - if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) + if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) goto out; r = -E2BIG; if (n < msr_list.nmsrs) @@ -3117,7 +3117,7 @@ long kvm_arch_dev_ioctl(struct file *filp, struct kvm_cpuid2 cpuid; r = -EFAULT; - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) goto out; r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, @@ -3126,7 +3126,7 @@ long kvm_arch_dev_ioctl(struct file *filp, goto out; r = -EFAULT; - if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) + if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) goto out; r = 0; break; @@ -3894,7 +3894,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_interrupt irq; r = -EFAULT; - if (copy_from_user(&irq, argp, sizeof irq)) + if (copy_from_user(&irq, argp, sizeof(irq))) goto out; r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; @@ -3912,7 +3912,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_cpuid cpuid; r = -EFAULT; - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) goto out; r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); break; @@ -3922,7 +3922,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_cpuid2 cpuid; r = -EFAULT; - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) goto out; r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, cpuid_arg->entries); @@ -3933,14 +3933,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_cpuid2 cpuid; r = -EFAULT; - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) goto out; r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; - if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) + if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) goto out; r = 0; break; @@ -3961,13 +3961,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_tpr_access_ctl tac; r = -EFAULT; - if (copy_from_user(&tac, argp, sizeof tac)) + if (copy_from_user(&tac, argp, sizeof(tac))) goto out; r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); if (r) goto out; r = -EFAULT; - if (copy_to_user(argp, &tac, sizeof tac)) + if (copy_to_user(argp, &tac, sizeof(tac))) goto out; r = 0; break; @@ -3980,7 +3980,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (!lapic_in_kernel(vcpu)) goto out; r = -EFAULT; - if (copy_from_user(&va, argp, sizeof va)) + if (copy_from_user(&va, argp, sizeof(va))) goto out; idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); @@ -3991,7 +3991,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, u64 mcg_cap; r = -EFAULT; - if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) + if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap))) goto out; r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); break; @@ -4000,7 +4000,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_x86_mce mce; r = -EFAULT; - if (copy_from_user(&mce, argp, sizeof mce)) + if (copy_from_user(&mce, argp, sizeof(mce))) goto out; r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); break; @@ -4536,7 +4536,7 @@ long kvm_arch_vm_ioctl(struct file *filp, if (kvm->created_vcpus) goto set_identity_unlock; r = -EFAULT; - if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) + if (copy_from_user(&ident_addr, argp, sizeof(ident_addr))) goto set_identity_unlock; r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); set_identity_unlock: @@ -4620,7 +4620,7 @@ set_identity_unlock: if (r) goto get_irqchip_out; r = -EFAULT; - if (copy_to_user(argp, chip, sizeof *chip)) + if (copy_to_user(argp, chip, sizeof(*chip))) goto get_irqchip_out; r = 0; get_irqchip_out: @@ -4666,7 +4666,7 @@ set_identity_unlock: } case KVM_SET_PIT: { r = -EFAULT; - if (copy_from_user(&u.ps, argp, sizeof u.ps)) + if (copy_from_user(&u.ps, argp, sizeof(u.ps))) goto out; r = -ENXIO; if (!kvm->arch.vpit) @@ -8205,7 +8205,7 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) sregs->efer = vcpu->arch.efer; sregs->apic_base = kvm_get_apic_base(vcpu); - memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); + memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap)); if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) set_bit(vcpu->arch.interrupt.nr, @@ -8509,7 +8509,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) fpu->last_opcode = fxsave->fop; fpu->last_ip = fxsave->rip; fpu->last_dp = fxsave->rdp; - memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); + memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); vcpu_put(vcpu); return 0; @@ -8530,7 +8530,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) fxsave->fop = fpu->last_opcode; fxsave->rip = fpu->last_ip; fxsave->rdp = fpu->last_dp; - memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); + memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); vcpu_put(vcpu); return 0; diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 00b296617ca4..92e4c4b85bba 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -92,7 +92,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area * in the full address space. */ - info.high_limit = in_compat_syscall() ? + info.high_limit = in_32bit_syscall() ? task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW); info.align_mask = PAGE_MASK & ~huge_page_mask(h); @@ -116,7 +116,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area * in the full address space. */ - if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall()) + if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; info.align_mask = PAGE_MASK & ~huge_page_mask(h); diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 1e95d57760cf..db3165714521 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -166,7 +166,7 @@ unsigned long get_mmap_base(int is_legacy) struct mm_struct *mm = current->mm; #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES - if (in_compat_syscall()) { + if (in_32bit_syscall()) { return is_legacy ? mm->mmap_compat_legacy_base : mm->mmap_compat_base; } diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index a80fdd7fb40f..abffa0be80da 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -399,9 +399,17 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) n = simple_strtoul(emu_cmdline, &emu_cmdline, 0); ret = -1; for_each_node_mask(i, physnode_mask) { + /* + * The reason we pass in blk[0] is due to + * numa_remove_memblk_from() called by + * emu_setup_memblk() will delete entry 0 + * and then move everything else up in the pi.blk + * array. Therefore we should always be looking + * at blk[0]. + */ ret = split_nodes_size_interleave_uniform(&ei, &pi, - pi.blk[i].start, pi.blk[i].end, 0, - n, &pi.blk[i], nid); + pi.blk[0].start, pi.blk[0].end, 0, + n, &pi.blk[0], nid); if (ret < 0) break; if (ret < n) { diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index f799076e3d57..db7a10082238 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -2309,9 +2309,13 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) /* * We should perform an IPI and flush all tlbs, - * but that can deadlock->flush only current cpu: + * but that can deadlock->flush only current cpu. + * Preemption needs to be disabled around __flush_tlb_all() due to + * CR3 reload in __native_flush_tlb(). */ + preempt_disable(); __flush_tlb_all(); + preempt_enable(); arch_flush_lazy_mmu_mode(); } diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 0b08067c45f3..b629f6992d9f 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -130,7 +130,7 @@ static void regex_init(int use_real_mode) REG_EXTENDED|REG_NOSUB); if (err) { - regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf); + regerror(err, &sym_regex_c[i], errbuf, sizeof(errbuf)); die("%s", errbuf); } } @@ -405,7 +405,7 @@ static void read_shdrs(FILE *fp) } for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; - if (fread(&shdr, sizeof shdr, 1, fp) != 1) + if (fread(&shdr, sizeof(shdr), 1, fp) != 1) die("Cannot read ELF section headers %d/%d: %s\n", i, ehdr.e_shnum, strerror(errno)); sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name); diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h index 413f3519d9a1..c907b20d4993 100644 --- a/arch/x86/um/asm/elf.h +++ b/arch/x86/um/asm/elf.h @@ -194,7 +194,7 @@ extern unsigned long um_vdso_addr; typedef unsigned long elf_greg_t; -#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) +#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_i387_struct elf_fpregset_t; diff --git a/arch/x86/um/shared/sysdep/ptrace_32.h b/arch/x86/um/shared/sysdep/ptrace_32.h index b94a108de1dc..db8478a83a09 100644 --- a/arch/x86/um/shared/sysdep/ptrace_32.h +++ b/arch/x86/um/shared/sysdep/ptrace_32.h @@ -8,22 +8,10 @@ #define MAX_FP_NR HOST_FPX_SIZE -static inline void update_debugregs(int seq) {} - -/* syscall emulation path in ptrace */ - -#ifndef PTRACE_SYSEMU -#define PTRACE_SYSEMU 31 -#endif - void set_using_sysemu(int value); int get_using_sysemu(void); extern int sysemu_supported; -#ifndef PTRACE_SYSEMU_SINGLESTEP -#define PTRACE_SYSEMU_SINGLESTEP 32 -#endif - #define UPT_SYSCALL_ARG1(r) UPT_BX(r) #define UPT_SYSCALL_ARG2(r) UPT_CX(r) #define UPT_SYSCALL_ARG3(r) UPT_DX(r) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 60c141af222b..d29b7365da8d 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -1,7 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -config ZONE_DMA - def_bool y - config XTENSA def_bool y select ARCH_HAS_SG_CHAIN diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile index dc9e0ba7122c..294846117fc2 100644 --- a/arch/xtensa/boot/Makefile +++ b/arch/xtensa/boot/Makefile @@ -33,7 +33,7 @@ uImage: $(obj)/uImage boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y)) $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS) -OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary +OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index b727b18a68ac..b80a430453b1 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -131,6 +131,7 @@ SECTIONS .fixup : { *(.fixup) } EXCEPTION_TABLE(16) + NOTES /* Data section */ _sdata = .; @@ -296,38 +297,11 @@ SECTIONS _end = .; - .xt.lit : { *(.xt.lit) } - .xt.prop : { *(.xt.prop) } - - .debug 0 : { *(.debug) } - .line 0 : { *(.line) } - .debug_srcinfo 0 : { *(.debug_srcinfo) } - .debug_sfnames 0 : { *(.debug_sfnames) } - .debug_aranges 0 : { *(.debug_aranges) } - .debug_pubnames 0 : { *(.debug_pubnames) } - .debug_info 0 : { *(.debug_info) } - .debug_abbrev 0 : { *(.debug_abbrev) } - .debug_line 0 : { *(.debug_line) } - .debug_frame 0 : { *(.debug_frame) } - .debug_str 0 : { *(.debug_str) } - .debug_loc 0 : { *(.debug_loc) } - .debug_macinfo 0 : { *(.debug_macinfo) } - .debug_weaknames 0 : { *(.debug_weaknames) } - .debug_funcnames 0 : { *(.debug_funcnames) } - .debug_typenames 0 : { *(.debug_typenames) } - .debug_varnames 0 : { *(.debug_varnames) } - - .xt.insn 0 : - { - *(.xt.insn) - *(.gnu.linkonce.x*) - } + DWARF_DEBUG - .xt.lit 0 : - { - *(.xt.lit) - *(.gnu.linkonce.p*) - } + .xt.prop 0 : { KEEP(*(.xt.prop .xt.prop.* .gnu.linkonce.prop.*)) } + .xt.insn 0 : { KEEP(*(.xt.insn .xt.insn.* .gnu.linkonce.x*)) } + .xt.lit 0 : { KEEP(*(.xt.lit .xt.lit.* .gnu.linkonce.p*)) } /* Sections to be discarded */ DISCARDS diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 9750a48f491b..30a48bba4a47 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -71,7 +71,7 @@ void __init zones_init(void) { /* All pages are DMA-able, so we put them all in the DMA zone. */ unsigned long zones_size[MAX_NR_ZONES] = { - [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET, + [ZONE_NORMAL] = max_low_pfn - ARCH_PFN_OFFSET, #ifdef CONFIG_HIGHMEM [ZONE_HIGHMEM] = max_pfn - max_low_pfn, #endif diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index d9a7916ff0ab..9fe5952d117d 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -642,7 +642,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) uint64_t serial_nr; rcu_read_lock(); - serial_nr = __bio_blkcg(bio)->css.serial_nr; + serial_nr = bio_blkcg(bio)->css.serial_nr; /* * Check whether blkcg has changed. The condition may trigger @@ -651,7 +651,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) goto out; - bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio)); + bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); /* * Update blkg_path for bfq_log_* functions. We cache this * path, and update it here, for the following diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 6075100f03a5..3a27d31fcda6 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -4384,7 +4384,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, rcu_read_lock(); - bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio)); + bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio)); if (!bfqg) { bfqq = &bfqd->oom_bfqq; goto out; diff --git a/block/bio.c b/block/bio.c index bbfeb4ee2892..d5368a445561 100644 --- a/block/bio.c +++ b/block/bio.c @@ -609,9 +609,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) bio->bi_iter = bio_src->bi_iter; bio->bi_io_vec = bio_src->bi_io_vec; - bio_clone_blkg_association(bio, bio_src); - - blkcg_bio_issue_init(bio); + bio_clone_blkcg_association(bio, bio_src); } EXPORT_SYMBOL(__bio_clone_fast); @@ -1256,7 +1254,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, /* * success */ - if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) || + if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) || (map_data && map_data->from_user)) { ret = bio_copy_from_iter(bio, iter); if (ret) @@ -1956,151 +1954,69 @@ EXPORT_SYMBOL(bioset_init_from_src); #ifdef CONFIG_BLK_CGROUP -/** - * bio_associate_blkg - associate a bio with the a blkg - * @bio: target bio - * @blkg: the blkg to associate - * - * This tries to associate @bio with the specified blkg. Association failure - * is handled by walking up the blkg tree. Therefore, the blkg associated can - * be anything between @blkg and the root_blkg. This situation only happens - * when a cgroup is dying and then the remaining bios will spill to the closest - * alive blkg. - * - * A reference will be taken on the @blkg and will be released when @bio is - * freed. - */ -int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg) -{ - if (unlikely(bio->bi_blkg)) - return -EBUSY; - bio->bi_blkg = blkg_tryget_closest(blkg); - return 0; -} - -/** - * __bio_associate_blkg_from_css - internal blkg association function - * - * This in the core association function that all association paths rely on. - * A blkg reference is taken which is released upon freeing of the bio. - */ -static int __bio_associate_blkg_from_css(struct bio *bio, - struct cgroup_subsys_state *css) -{ - struct request_queue *q = bio->bi_disk->queue; - struct blkcg_gq *blkg; - int ret; - - rcu_read_lock(); - - if (!css || !css->parent) - blkg = q->root_blkg; - else - blkg = blkg_lookup_create(css_to_blkcg(css), q); - - ret = bio_associate_blkg(bio, blkg); - - rcu_read_unlock(); - return ret; -} - -/** - * bio_associate_blkg_from_css - associate a bio with a specified css - * @bio: target bio - * @css: target css - * - * Associate @bio with the blkg found by combining the css's blkg and the - * request_queue of the @bio. This falls back to the queue's root_blkg if - * the association fails with the css. - */ -int bio_associate_blkg_from_css(struct bio *bio, - struct cgroup_subsys_state *css) -{ - if (unlikely(bio->bi_blkg)) - return -EBUSY; - return __bio_associate_blkg_from_css(bio, css); -} -EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); - #ifdef CONFIG_MEMCG /** - * bio_associate_blkg_from_page - associate a bio with the page's blkg + * bio_associate_blkcg_from_page - associate a bio with the page's blkcg * @bio: target bio * @page: the page to lookup the blkcg from * - * Associate @bio with the blkg from @page's owning memcg and the respective - * request_queue. If cgroup_e_css returns NULL, fall back to the queue's - * root_blkg. - * - * Note: this must be called after bio has an associated device. + * Associate @bio with the blkcg from @page's owning memcg. This works like + * every other associate function wrt references. */ -int bio_associate_blkg_from_page(struct bio *bio, struct page *page) +int bio_associate_blkcg_from_page(struct bio *bio, struct page *page) { - struct cgroup_subsys_state *css; - int ret; + struct cgroup_subsys_state *blkcg_css; - if (unlikely(bio->bi_blkg)) + if (unlikely(bio->bi_css)) return -EBUSY; if (!page->mem_cgroup) return 0; - - rcu_read_lock(); - - css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys); - - ret = __bio_associate_blkg_from_css(bio, css); - - rcu_read_unlock(); - return ret; + blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup, + &io_cgrp_subsys); + bio->bi_css = blkcg_css; + return 0; } #endif /* CONFIG_MEMCG */ /** - * bio_associate_create_blkg - associate a bio with a blkg from q - * @q: request_queue where bio is going + * bio_associate_blkcg - associate a bio with the specified blkcg * @bio: target bio + * @blkcg_css: css of the blkcg to associate + * + * Associate @bio with the blkcg specified by @blkcg_css. Block layer will + * treat @bio as if it were issued by a task which belongs to the blkcg. * - * Associate @bio with the blkg found from the bio's css and the request_queue. - * If one is not found, bio_lookup_blkg creates the blkg. This falls back to - * the queue's root_blkg if association fails. + * This function takes an extra reference of @blkcg_css which will be put + * when @bio is released. The caller must own @bio and is responsible for + * synchronizing calls to this function. */ -int bio_associate_create_blkg(struct request_queue *q, struct bio *bio) +int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) { - struct cgroup_subsys_state *css; - int ret = 0; - - /* someone has already associated this bio with a blkg */ - if (bio->bi_blkg) - return ret; - - rcu_read_lock(); - - css = blkcg_css(); - - ret = __bio_associate_blkg_from_css(bio, css); - - rcu_read_unlock(); - return ret; + if (unlikely(bio->bi_css)) + return -EBUSY; + css_get(blkcg_css); + bio->bi_css = blkcg_css; + return 0; } +EXPORT_SYMBOL_GPL(bio_associate_blkcg); /** - * bio_reassociate_blkg - reassociate a bio with a blkg from q - * @q: request_queue where bio is going + * bio_associate_blkg - associate a bio with the specified blkg * @bio: target bio + * @blkg: the blkg to associate * - * When submitting a bio, multiple recursive calls to make_request() may occur. - * This causes the initial associate done in blkcg_bio_issue_check() to be - * incorrect and reference the prior request_queue. This performs reassociation - * when this situation happens. + * Associate @bio with the blkg specified by @blkg. This is the queue specific + * blkcg information associated with the @bio, a reference will be taken on the + * @blkg and will be freed when the bio is freed. */ -int bio_reassociate_blkg(struct request_queue *q, struct bio *bio) +int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg) { - if (bio->bi_blkg) { - blkg_put(bio->bi_blkg); - bio->bi_blkg = NULL; - } - - return bio_associate_create_blkg(q, bio); + if (unlikely(bio->bi_blkg)) + return -EBUSY; + if (!blkg_try_get(blkg)) + return -ENODEV; + bio->bi_blkg = blkg; + return 0; } /** @@ -2113,6 +2029,10 @@ void bio_disassociate_task(struct bio *bio) put_io_context(bio->bi_ioc); bio->bi_ioc = NULL; } + if (bio->bi_css) { + css_put(bio->bi_css); + bio->bi_css = NULL; + } if (bio->bi_blkg) { blkg_put(bio->bi_blkg); bio->bi_blkg = NULL; @@ -2120,16 +2040,16 @@ void bio_disassociate_task(struct bio *bio) } /** - * bio_clone_blkg_association - clone blkg association from src to dst bio + * bio_clone_blkcg_association - clone blkcg association from src to dst bio * @dst: destination bio * @src: source bio */ -void bio_clone_blkg_association(struct bio *dst, struct bio *src) +void bio_clone_blkcg_association(struct bio *dst, struct bio *src) { - if (src->bi_blkg) - bio_associate_blkg(dst, src->bi_blkg); + if (src->bi_css) + WARN_ON(bio_associate_blkcg(dst, src->bi_css)); } -EXPORT_SYMBOL_GPL(bio_clone_blkg_association); +EXPORT_SYMBOL_GPL(bio_clone_blkcg_association); #endif /* CONFIG_BLK_CGROUP */ static void __init biovec_init_slabs(void) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 992da5592c6e..c630e02836a8 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -84,37 +84,6 @@ static void blkg_free(struct blkcg_gq *blkg) kfree(blkg); } -static void __blkg_release(struct rcu_head *rcu) -{ - struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); - - percpu_ref_exit(&blkg->refcnt); - - /* release the blkcg and parent blkg refs this blkg has been holding */ - css_put(&blkg->blkcg->css); - if (blkg->parent) - blkg_put(blkg->parent); - - wb_congested_put(blkg->wb_congested); - - blkg_free(blkg); -} - -/* - * A group is RCU protected, but having an rcu lock does not mean that one - * can access all the fields of blkg and assume these are valid. For - * example, don't try to follow throtl_data and request queue links. - * - * Having a reference to blkg under an rcu allows accesses to only values - * local to groups like group stats and group rate limits. - */ -static void blkg_release(struct percpu_ref *ref) -{ - struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); - - call_rcu(&blkg->rcu_head, __blkg_release); -} - /** * blkg_alloc - allocate a blkg * @blkcg: block cgroup the new blkg is associated with @@ -141,6 +110,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, blkg->q = q; INIT_LIST_HEAD(&blkg->q_node); blkg->blkcg = blkcg; + atomic_set(&blkg->refcnt, 1); /* root blkg uses @q->root_rl, init rl only for !root blkgs */ if (blkcg != &blkcg_root) { @@ -247,11 +217,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, blkg_get(blkg->parent); } - ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0, - GFP_NOWAIT | __GFP_NOWARN); - if (ret) - goto err_cancel_ref; - /* invoke per-policy init */ for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; @@ -284,8 +249,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, blkg_put(blkg); return ERR_PTR(ret); -err_cancel_ref: - percpu_ref_exit(&blkg->refcnt); err_put_congested: wb_congested_put(wb_congested); err_put_css: @@ -296,7 +259,7 @@ err_free_blkg: } /** - * __blkg_lookup_create - lookup blkg, try to create one if not there + * blkg_lookup_create - lookup blkg, try to create one if not there * @blkcg: blkcg of interest * @q: request_queue of interest * @@ -305,11 +268,12 @@ err_free_blkg: * that all non-root blkg's have access to the parent blkg. This function * should be called under RCU read lock and @q->queue_lock. * - * Returns the blkg or the closest blkg if blkg_create fails as it walks - * down from root. + * Returns pointer to the looked up or created blkg on success, ERR_PTR() + * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not + * dead and bypassing, returns ERR_PTR(-EBUSY). */ -struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q) +struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q) { struct blkcg_gq *blkg; @@ -321,7 +285,7 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, * we shouldn't allow anything to go through for a bypassing queue. */ if (unlikely(blk_queue_bypass(q))) - return q->root_blkg; + return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY); blkg = __blkg_lookup(blkcg, q, true); if (blkg) @@ -329,58 +293,23 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, /* * Create blkgs walking down from blkcg_root to @blkcg, so that all - * non-root blkgs have access to their parents. Returns the closest - * blkg to the intended blkg should blkg_create() fail. + * non-root blkgs have access to their parents. */ while (true) { struct blkcg *pos = blkcg; struct blkcg *parent = blkcg_parent(blkcg); - struct blkcg_gq *ret_blkg = q->root_blkg; - - while (parent) { - blkg = __blkg_lookup(parent, q, false); - if (blkg) { - /* remember closest blkg */ - ret_blkg = blkg; - break; - } + + while (parent && !__blkg_lookup(parent, q, false)) { pos = parent; parent = blkcg_parent(parent); } blkg = blkg_create(pos, q, NULL); - if (IS_ERR(blkg)) - return ret_blkg; - if (pos == blkcg) + if (pos == blkcg || IS_ERR(blkg)) return blkg; } } -/** - * blkg_lookup_create - find or create a blkg - * @blkcg: target block cgroup - * @q: target request_queue - * - * This looks up or creates the blkg representing the unique pair - * of the blkcg and the request_queue. - */ -struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q) -{ - struct blkcg_gq *blkg = blkg_lookup(blkcg, q); - unsigned long flags; - - if (unlikely(!blkg)) { - spin_lock_irqsave(q->queue_lock, flags); - - blkg = __blkg_lookup_create(blkcg, q); - - spin_unlock_irqrestore(q->queue_lock, flags); - } - - return blkg; -} - static void blkg_destroy(struct blkcg_gq *blkg) { struct blkcg *blkcg = blkg->blkcg; @@ -424,7 +353,7 @@ static void blkg_destroy(struct blkcg_gq *blkg) * Put the reference taken at the time of creation so that when all * queues are gone, group can be destroyed. */ - percpu_ref_kill(&blkg->refcnt); + blkg_put(blkg); } /** @@ -452,6 +381,29 @@ static void blkg_destroy_all(struct request_queue *q) } /* + * A group is RCU protected, but having an rcu lock does not mean that one + * can access all the fields of blkg and assume these are valid. For + * example, don't try to follow throtl_data and request queue links. + * + * Having a reference to blkg under an rcu allows accesses to only values + * local to groups like group stats and group rate limits. + */ +void __blkg_release_rcu(struct rcu_head *rcu_head) +{ + struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); + + /* release the blkcg and parent blkg refs this blkg has been holding */ + css_put(&blkg->blkcg->css); + if (blkg->parent) + blkg_put(blkg->parent); + + wb_congested_put(blkg->wb_congested); + + blkg_free(blkg); +} +EXPORT_SYMBOL_GPL(__blkg_release_rcu); + +/* * The next function used by blk_queue_for_each_rl(). It's a bit tricky * because the root blkg uses @q->root_rl instead of its own rl. */ @@ -1796,7 +1748,8 @@ void blkcg_maybe_throttle_current(void) blkg = blkg_lookup(blkcg, q); if (!blkg) goto out; - if (!blkg_tryget(blkg)) + blkg = blkg_try_get(blkg); + if (!blkg) goto out; rcu_read_unlock(); diff --git a/block/blk-core.c b/block/blk-core.c index bc6ea87d10e0..ce12515f9b9b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -785,6 +785,9 @@ void blk_cleanup_queue(struct request_queue *q) * prevent that q->request_fn() gets invoked after draining finished. */ blk_freeze_queue(q); + + rq_qos_exit(q); + spin_lock_irq(lock); queue_flag_set(QUEUE_FLAG_DEAD, q); spin_unlock_irq(lock); @@ -2432,7 +2435,6 @@ blk_qc_t generic_make_request(struct bio *bio) if (q) blk_queue_exit(q); q = bio->bi_disk->queue; - bio_reassociate_blkg(q, bio); flags = 0; if (bio->bi_opf & REQ_NOWAIT) flags = BLK_MQ_REQ_NOWAIT; diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 28f80d227528..38c35c32aff2 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -482,12 +482,34 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock) { struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); - struct blkcg_gq *blkg = bio->bi_blkg; + struct blkcg *blkcg; + struct blkcg_gq *blkg; + struct request_queue *q = rqos->q; bool issue_as_root = bio_issue_as_root_blkg(bio); if (!blk_iolatency_enabled(blkiolat)) return; + rcu_read_lock(); + blkcg = bio_blkcg(bio); + bio_associate_blkcg(bio, &blkcg->css); + blkg = blkg_lookup(blkcg, q); + if (unlikely(!blkg)) { + if (!lock) + spin_lock_irq(q->queue_lock); + blkg = blkg_lookup_create(blkcg, q); + if (IS_ERR(blkg)) + blkg = NULL; + if (!lock) + spin_unlock_irq(q->queue_lock); + } + if (!blkg) + goto out; + + bio_issue_init(&bio->bi_issue, bio_sectors(bio)); + bio_associate_blkg(bio, blkg); +out: + rcu_read_unlock(); while (blkg && blkg->parent) { struct iolatency_grp *iolat = blkg_to_lat(blkg); if (!iolat) { @@ -708,7 +730,7 @@ static void blkiolatency_timer_fn(struct timer_list *t) * We could be exiting, don't access the pd unless we have a * ref on the blkg. */ - if (!blkg_tryget(blkg)) + if (!blkg_try_get(blkg)) continue; iolat = blkg_to_lat(blkg); diff --git a/block/blk-merge.c b/block/blk-merge.c index 42a46744c11b..6b5ad275ed56 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -714,6 +714,31 @@ static void blk_account_io_merge(struct request *req) part_stat_unlock(); } } +/* + * Two cases of handling DISCARD merge: + * If max_discard_segments > 1, the driver takes every bio + * as a range and send them to controller together. The ranges + * needn't to be contiguous. + * Otherwise, the bios/requests will be handled as same as + * others which should be contiguous. + */ +static inline bool blk_discard_mergable(struct request *req) +{ + if (req_op(req) == REQ_OP_DISCARD && + queue_max_discard_segments(req->q) > 1) + return true; + return false; +} + +enum elv_merge blk_try_req_merge(struct request *req, struct request *next) +{ + if (blk_discard_mergable(req)) + return ELEVATOR_DISCARD_MERGE; + else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) + return ELEVATOR_BACK_MERGE; + + return ELEVATOR_NO_MERGE; +} /* * For non-mq, this has to be called with the request spinlock acquired. @@ -731,12 +756,6 @@ static struct request *attempt_merge(struct request_queue *q, if (req_op(req) != req_op(next)) return NULL; - /* - * not contiguous - */ - if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) - return NULL; - if (rq_data_dir(req) != rq_data_dir(next) || req->rq_disk != next->rq_disk || req_no_special_merge(next)) @@ -760,11 +779,19 @@ static struct request *attempt_merge(struct request_queue *q, * counts here. Handle DISCARDs separately, as they * have separate settings. */ - if (req_op(req) == REQ_OP_DISCARD) { + + switch (blk_try_req_merge(req, next)) { + case ELEVATOR_DISCARD_MERGE: if (!req_attempt_discard_merge(q, req, next)) return NULL; - } else if (!ll_merge_requests_fn(q, req, next)) + break; + case ELEVATOR_BACK_MERGE: + if (!ll_merge_requests_fn(q, req, next)) + return NULL; + break; + default: return NULL; + } /* * If failfast settings disagree or any of the two is already @@ -888,8 +915,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) { - if (req_op(rq) == REQ_OP_DISCARD && - queue_max_discard_segments(rq->q) > 1) + if (blk_discard_mergable(rq)) return ELEVATOR_DISCARD_MERGE; else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) return ELEVATOR_BACK_MERGE; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 0641533597f1..844a454a7b3a 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -1007,8 +1007,6 @@ void blk_unregister_queue(struct gendisk *disk) kobject_del(&q->kobj); blk_trace_remove_sysfs(disk_to_dev(disk)); - rq_qos_exit(q); - mutex_lock(&q->sysfs_lock); if (q->request_fn || (q->mq_ops && q->elevator)) elv_unregister_queue(q); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 4bda70e8db48..db1a3a2ae006 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2115,11 +2115,21 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td) } #endif +static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) +{ +#ifdef CONFIG_BLK_DEV_THROTTLING_LOW + /* fallback to root_blkg if we fail to get a blkg ref */ + if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV)) + bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg); + bio_issue_init(&bio->bi_issue, bio_sectors(bio)); +#endif +} + bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, struct bio *bio) { struct throtl_qnode *qn = NULL; - struct throtl_grp *tg = blkg_to_tg(blkg); + struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg); struct throtl_service_queue *sq; bool rw = bio_data_dir(bio); bool throttled = false; @@ -2138,6 +2148,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, if (unlikely(blk_queue_bypass(q))) goto out_unlock; + blk_throtl_assoc_bio(tg, bio); blk_throtl_update_idletime(tg); sq = &tg->service_queue; diff --git a/block/bounce.c b/block/bounce.c index cf49fe02f65c..36869afc258c 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -276,9 +276,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask, } } - bio_clone_blkg_association(bio, bio_src); - - blkcg_bio_issue_init(bio); + bio_clone_blkcg_association(bio, bio_src); return bio; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 6a3d87dd3c1a..ed41aa978c4a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3759,7 +3759,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) uint64_t serial_nr; rcu_read_lock(); - serial_nr = __bio_blkcg(bio)->css.serial_nr; + serial_nr = bio_blkcg(bio)->css.serial_nr; rcu_read_unlock(); /* @@ -3824,7 +3824,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, struct cfq_group *cfqg; rcu_read_lock(); - cfqg = cfq_lookup_cfqg(cfqd, __bio_blkcg(bio)); + cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio)); if (!cfqg) { cfqq = &cfqd->oom_cfqq; goto out; diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index f3702e533ff4..be70ca6c85d3 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -21,6 +21,18 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE appropriate hash algorithms (such as SHA-1) must be available. ENOPKG will be reported if the requisite algorithm is unavailable. +config ASYMMETRIC_TPM_KEY_SUBTYPE + tristate "Asymmetric TPM backed private key subtype" + depends on TCG_TPM + depends on TRUSTED_KEYS + select CRYPTO_HMAC + select CRYPTO_SHA1 + select CRYPTO_HASH_INFO + help + This option provides support for TPM backed private key type handling. + Operations such as sign, verify, encrypt, decrypt are performed by + the TPM after the private key is loaded. + config X509_CERTIFICATE_PARSER tristate "X.509 certificate parser" depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE @@ -31,6 +43,25 @@ config X509_CERTIFICATE_PARSER data and provides the ability to instantiate a crypto key from a public key packet found inside the certificate. +config PKCS8_PRIVATE_KEY_PARSER + tristate "PKCS#8 private key parser" + depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE + select ASN1 + select OID_REGISTRY + help + This option provides support for parsing PKCS#8 format blobs for + private key data and provides the ability to instantiate a crypto key + from that data. + +config TPM_KEY_PARSER + tristate "TPM private key parser" + depends on ASYMMETRIC_TPM_KEY_SUBTYPE + select ASN1 + help + This option provides support for parsing TPM format blobs for + private key data and provides the ability to instantiate a crypto key + from that data. + config PKCS7_MESSAGE_PARSER tristate "PKCS#7 message parser" depends on X509_CERTIFICATE_PARSER diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile index d4b2e1b2dc65..28b91adba2ae 100644 --- a/crypto/asymmetric_keys/Makefile +++ b/crypto/asymmetric_keys/Makefile @@ -11,6 +11,7 @@ asymmetric_keys-y := \ signature.o obj-$(CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE) += public_key.o +obj-$(CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE) += asym_tpm.o # # X.509 Certificate handling @@ -30,6 +31,19 @@ $(obj)/x509.asn1.o: $(obj)/x509.asn1.c $(obj)/x509.asn1.h $(obj)/x509_akid.asn1.o: $(obj)/x509_akid.asn1.c $(obj)/x509_akid.asn1.h # +# PKCS#8 private key handling +# +obj-$(CONFIG_PKCS8_PRIVATE_KEY_PARSER) += pkcs8_key_parser.o +pkcs8_key_parser-y := \ + pkcs8.asn1.o \ + pkcs8_parser.o + +$(obj)/pkcs8_parser.o: $(obj)/pkcs8.asn1.h +$(obj)/pkcs8-asn1.o: $(obj)/pkcs8.asn1.c $(obj)/pkcs8.asn1.h + +clean-files += pkcs8.asn1.c pkcs8.asn1.h + +# # PKCS#7 message handling # obj-$(CONFIG_PKCS7_MESSAGE_PARSER) += pkcs7_message.o @@ -61,3 +75,14 @@ verify_signed_pefile-y := \ $(obj)/mscode_parser.o: $(obj)/mscode.asn1.h $(obj)/mscode.asn1.h $(obj)/mscode.asn1.o: $(obj)/mscode.asn1.c $(obj)/mscode.asn1.h + +# +# TPM private key parsing +# +obj-$(CONFIG_TPM_KEY_PARSER) += tpm_key_parser.o +tpm_key_parser-y := \ + tpm.asn1.o \ + tpm_parser.o + +$(obj)/tpm_parser.o: $(obj)/tpm.asn1.h +$(obj)/tpm.asn1.o: $(obj)/tpm.asn1.c $(obj)/tpm.asn1.h diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c new file mode 100644 index 000000000000..5d4c270463f6 --- /dev/null +++ b/crypto/asymmetric_keys/asym_tpm.c @@ -0,0 +1,988 @@ +// SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) "ASYM-TPM: "fmt +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/seq_file.h> +#include <linux/scatterlist.h> +#include <linux/tpm.h> +#include <linux/tpm_command.h> +#include <crypto/akcipher.h> +#include <crypto/hash.h> +#include <crypto/sha.h> +#include <asm/unaligned.h> +#include <keys/asymmetric-subtype.h> +#include <keys/trusted.h> +#include <crypto/asym_tpm_subtype.h> +#include <crypto/public_key.h> + +#define TPM_ORD_FLUSHSPECIFIC 186 +#define TPM_ORD_LOADKEY2 65 +#define TPM_ORD_UNBIND 30 +#define TPM_ORD_SIGN 60 +#define TPM_LOADKEY2_SIZE 59 +#define TPM_FLUSHSPECIFIC_SIZE 18 +#define TPM_UNBIND_SIZE 63 +#define TPM_SIGN_SIZE 63 + +#define TPM_RT_KEY 0x00000001 + +/* + * Load a TPM key from the blob provided by userspace + */ +static int tpm_loadkey2(struct tpm_buf *tb, + uint32_t keyhandle, unsigned char *keyauth, + const unsigned char *keyblob, int keybloblen, + uint32_t *newhandle) +{ + unsigned char nonceodd[TPM_NONCE_SIZE]; + unsigned char enonce[TPM_NONCE_SIZE]; + unsigned char authdata[SHA1_DIGEST_SIZE]; + uint32_t authhandle = 0; + unsigned char cont = 0; + uint32_t ordinal; + int ret; + + ordinal = htonl(TPM_ORD_LOADKEY2); + + /* session for loading the key */ + ret = oiap(tb, &authhandle, enonce); + if (ret < 0) { + pr_info("oiap failed (%d)\n", ret); + return ret; + } + + /* generate odd nonce */ + ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE); + if (ret < 0) { + pr_info("tpm_get_random failed (%d)\n", ret); + return ret; + } + + /* calculate authorization HMAC value */ + ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce, + nonceodd, cont, sizeof(uint32_t), &ordinal, + keybloblen, keyblob, 0, 0); + if (ret < 0) + return ret; + + /* build the request buffer */ + INIT_BUF(tb); + store16(tb, TPM_TAG_RQU_AUTH1_COMMAND); + store32(tb, TPM_LOADKEY2_SIZE + keybloblen); + store32(tb, TPM_ORD_LOADKEY2); + store32(tb, keyhandle); + storebytes(tb, keyblob, keybloblen); + store32(tb, authhandle); + storebytes(tb, nonceodd, TPM_NONCE_SIZE); + store8(tb, cont); + storebytes(tb, authdata, SHA1_DIGEST_SIZE); + + ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE); + if (ret < 0) { + pr_info("authhmac failed (%d)\n", ret); + return ret; + } + + ret = TSS_checkhmac1(tb->data, ordinal, nonceodd, keyauth, + SHA1_DIGEST_SIZE, 0, 0); + if (ret < 0) { + pr_info("TSS_checkhmac1 failed (%d)\n", ret); + return ret; + } + + *newhandle = LOAD32(tb->data, TPM_DATA_OFFSET); + return 0; +} + +/* + * Execute the FlushSpecific TPM command + */ +static int tpm_flushspecific(struct tpm_buf *tb, uint32_t handle) +{ + INIT_BUF(tb); + store16(tb, TPM_TAG_RQU_COMMAND); + store32(tb, TPM_FLUSHSPECIFIC_SIZE); + store32(tb, TPM_ORD_FLUSHSPECIFIC); + store32(tb, handle); + store32(tb, TPM_RT_KEY); + + return trusted_tpm_send(tb->data, MAX_BUF_SIZE); +} + +/* + * Decrypt a blob provided by userspace using a specific key handle. + * The handle is a well known handle or previously loaded by e.g. LoadKey2 + */ +static int tpm_unbind(struct tpm_buf *tb, + uint32_t keyhandle, unsigned char *keyauth, + const unsigned char *blob, uint32_t bloblen, + void *out, uint32_t outlen) +{ + unsigned char nonceodd[TPM_NONCE_SIZE]; + unsigned char enonce[TPM_NONCE_SIZE]; + unsigned char authdata[SHA1_DIGEST_SIZE]; + uint32_t authhandle = 0; + unsigned char cont = 0; + uint32_t ordinal; + uint32_t datalen; + int ret; + + ordinal = htonl(TPM_ORD_UNBIND); + datalen = htonl(bloblen); + + /* session for loading the key */ + ret = oiap(tb, &authhandle, enonce); + if (ret < 0) { + pr_info("oiap failed (%d)\n", ret); + return ret; + } + + /* generate odd nonce */ + ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE); + if (ret < 0) { + pr_info("tpm_get_random failed (%d)\n", ret); + return ret; + } + + /* calculate authorization HMAC value */ + ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce, + nonceodd, cont, sizeof(uint32_t), &ordinal, + sizeof(uint32_t), &datalen, + bloblen, blob, 0, 0); + if (ret < 0) + return ret; + + /* build the request buffer */ + INIT_BUF(tb); + store16(tb, TPM_TAG_RQU_AUTH1_COMMAND); + store32(tb, TPM_UNBIND_SIZE + bloblen); + store32(tb, TPM_ORD_UNBIND); + store32(tb, keyhandle); + store32(tb, bloblen); + storebytes(tb, blob, bloblen); + store32(tb, authhandle); + storebytes(tb, nonceodd, TPM_NONCE_SIZE); + store8(tb, cont); + storebytes(tb, authdata, SHA1_DIGEST_SIZE); + + ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE); + if (ret < 0) { + pr_info("authhmac failed (%d)\n", ret); + return ret; + } + + datalen = LOAD32(tb->data, TPM_DATA_OFFSET); + + ret = TSS_checkhmac1(tb->data, ordinal, nonceodd, + keyauth, SHA1_DIGEST_SIZE, + sizeof(uint32_t), TPM_DATA_OFFSET, + datalen, TPM_DATA_OFFSET + sizeof(uint32_t), + 0, 0); + if (ret < 0) { + pr_info("TSS_checkhmac1 failed (%d)\n", ret); + return ret; + } + + memcpy(out, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), + min(outlen, datalen)); + + return datalen; +} + +/* + * Sign a blob provided by userspace (that has had the hash function applied) + * using a specific key handle. The handle is assumed to have been previously + * loaded by e.g. LoadKey2. + * + * Note that the key signature scheme of the used key should be set to + * TPM_SS_RSASSAPKCS1v15_DER. This allows the hashed input to be of any size + * up to key_length_in_bytes - 11 and not be limited to size 20 like the + * TPM_SS_RSASSAPKCS1v15_SHA1 signature scheme. + */ +static int tpm_sign(struct tpm_buf *tb, + uint32_t keyhandle, unsigned char *keyauth, + const unsigned char *blob, uint32_t bloblen, + void *out, uint32_t outlen) +{ + unsigned char nonceodd[TPM_NONCE_SIZE]; + unsigned char enonce[TPM_NONCE_SIZE]; + unsigned char authdata[SHA1_DIGEST_SIZE]; + uint32_t authhandle = 0; + unsigned char cont = 0; + uint32_t ordinal; + uint32_t datalen; + int ret; + + ordinal = htonl(TPM_ORD_SIGN); + datalen = htonl(bloblen); + + /* session for loading the key */ + ret = oiap(tb, &authhandle, enonce); + if (ret < 0) { + pr_info("oiap failed (%d)\n", ret); + return ret; + } + + /* generate odd nonce */ + ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE); + if (ret < 0) { + pr_info("tpm_get_random failed (%d)\n", ret); + return ret; + } + + /* calculate authorization HMAC value */ + ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce, + nonceodd, cont, sizeof(uint32_t), &ordinal, + sizeof(uint32_t), &datalen, + bloblen, blob, 0, 0); + if (ret < 0) + return ret; + + /* build the request buffer */ + INIT_BUF(tb); + store16(tb, TPM_TAG_RQU_AUTH1_COMMAND); + store32(tb, TPM_SIGN_SIZE + bloblen); + store32(tb, TPM_ORD_SIGN); + store32(tb, keyhandle); + store32(tb, bloblen); + storebytes(tb, blob, bloblen); + store32(tb, authhandle); + storebytes(tb, nonceodd, TPM_NONCE_SIZE); + store8(tb, cont); + storebytes(tb, authdata, SHA1_DIGEST_SIZE); + + ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE); + if (ret < 0) { + pr_info("authhmac failed (%d)\n", ret); + return ret; + } + + datalen = LOAD32(tb->data, TPM_DATA_OFFSET); + + ret = TSS_checkhmac1(tb->data, ordinal, nonceodd, + keyauth, SHA1_DIGEST_SIZE, + sizeof(uint32_t), TPM_DATA_OFFSET, + datalen, TPM_DATA_OFFSET + sizeof(uint32_t), + 0, 0); + if (ret < 0) { + pr_info("TSS_checkhmac1 failed (%d)\n", ret); + return ret; + } + + memcpy(out, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), + min(datalen, outlen)); + + return datalen; +} +/* + * Maximum buffer size for the BER/DER encoded public key. The public key + * is of the form SEQUENCE { INTEGER n, INTEGER e } where n is a maximum 2048 + * bit key and e is usually 65537 + * The encoding overhead is: + * - max 4 bytes for SEQUENCE + * - max 4 bytes for INTEGER n type/length + * - 257 bytes of n + * - max 2 bytes for INTEGER e type/length + * - 3 bytes of e + */ +#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3) + +/* + * Provide a part of a description of the key for /proc/keys. + */ +static void asym_tpm_describe(const struct key *asymmetric_key, + struct seq_file *m) +{ + struct tpm_key *tk = asymmetric_key->payload.data[asym_crypto]; + + if (!tk) + return; + + seq_printf(m, "TPM1.2/Blob"); +} + +static void asym_tpm_destroy(void *payload0, void *payload3) +{ + struct tpm_key *tk = payload0; + + if (!tk) + return; + + kfree(tk->blob); + tk->blob_len = 0; + + kfree(tk); +} + +/* How many bytes will it take to encode the length */ +static inline uint32_t definite_length(uint32_t len) +{ + if (len <= 127) + return 1; + if (len <= 255) + return 2; + return 3; +} + +static inline uint8_t *encode_tag_length(uint8_t *buf, uint8_t tag, + uint32_t len) +{ + *buf++ = tag; + + if (len <= 127) { + buf[0] = len; + return buf + 1; + } + + if (len <= 255) { + buf[0] = 0x81; + buf[1] = len; + return buf + 2; + } + + buf[0] = 0x82; + put_unaligned_be16(len, buf + 1); + return buf + 3; +} + +static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf) +{ + uint8_t *cur = buf; + uint32_t n_len = definite_length(len) + 1 + len + 1; + uint32_t e_len = definite_length(3) + 1 + 3; + uint8_t e[3] = { 0x01, 0x00, 0x01 }; + + /* SEQUENCE */ + cur = encode_tag_length(cur, 0x30, n_len + e_len); + /* INTEGER n */ + cur = encode_tag_length(cur, 0x02, len + 1); + cur[0] = 0x00; + memcpy(cur + 1, pub_key, len); + cur += len + 1; + cur = encode_tag_length(cur, 0x02, sizeof(e)); + memcpy(cur, e, sizeof(e)); + cur += sizeof(e); + + return cur - buf; +} + +/* + * Determine the crypto algorithm name. + */ +static int determine_akcipher(const char *encoding, const char *hash_algo, + char alg_name[CRYPTO_MAX_ALG_NAME]) +{ + if (strcmp(encoding, "pkcs1") == 0) { + if (!hash_algo) { + strcpy(alg_name, "pkcs1pad(rsa)"); + return 0; + } + + if (snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "pkcs1pad(rsa,%s)", + hash_algo) >= CRYPTO_MAX_ALG_NAME) + return -EINVAL; + + return 0; + } + + if (strcmp(encoding, "raw") == 0) { + strcpy(alg_name, "rsa"); + return 0; + } + + return -ENOPKG; +} + +/* + * Query information about a key. + */ +static int tpm_key_query(const struct kernel_pkey_params *params, + struct kernel_pkey_query *info) +{ + struct tpm_key *tk = params->key->payload.data[asym_crypto]; + int ret; + char alg_name[CRYPTO_MAX_ALG_NAME]; + struct crypto_akcipher *tfm; + uint8_t der_pub_key[PUB_KEY_BUF_SIZE]; + uint32_t der_pub_key_len; + int len; + + /* TPM only works on private keys, public keys still done in software */ + ret = determine_akcipher(params->encoding, params->hash_algo, alg_name); + if (ret < 0) + return ret; + + tfm = crypto_alloc_akcipher(alg_name, 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len, + der_pub_key); + + ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len); + if (ret < 0) + goto error_free_tfm; + + len = crypto_akcipher_maxsize(tfm); + + info->key_size = tk->key_len; + info->max_data_size = tk->key_len / 8; + info->max_sig_size = len; + info->max_enc_size = len; + info->max_dec_size = tk->key_len / 8; + + info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT | + KEYCTL_SUPPORTS_DECRYPT | + KEYCTL_SUPPORTS_VERIFY | + KEYCTL_SUPPORTS_SIGN; + + ret = 0; +error_free_tfm: + crypto_free_akcipher(tfm); + pr_devel("<==%s() = %d\n", __func__, ret); + return ret; +} + +/* + * Encryption operation is performed with the public key. Hence it is done + * in software + */ +static int tpm_key_encrypt(struct tpm_key *tk, + struct kernel_pkey_params *params, + const void *in, void *out) +{ + char alg_name[CRYPTO_MAX_ALG_NAME]; + struct crypto_akcipher *tfm; + struct akcipher_request *req; + struct crypto_wait cwait; + struct scatterlist in_sg, out_sg; + uint8_t der_pub_key[PUB_KEY_BUF_SIZE]; + uint32_t der_pub_key_len; + int ret; + + pr_devel("==>%s()\n", __func__); + + ret = determine_akcipher(params->encoding, params->hash_algo, alg_name); + if (ret < 0) + return ret; + + tfm = crypto_alloc_akcipher(alg_name, 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len, + der_pub_key); + + ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len); + if (ret < 0) + goto error_free_tfm; + + req = akcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) + goto error_free_tfm; + + sg_init_one(&in_sg, in, params->in_len); + sg_init_one(&out_sg, out, params->out_len); + akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len, + params->out_len); + crypto_init_wait(&cwait); + akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &cwait); + + ret = crypto_akcipher_encrypt(req); + ret = crypto_wait_req(ret, &cwait); + + if (ret == 0) + ret = req->dst_len; + + akcipher_request_free(req); +error_free_tfm: + crypto_free_akcipher(tfm); + pr_devel("<==%s() = %d\n", __func__, ret); + return ret; +} + +/* + * Decryption operation is performed with the private key in the TPM. + */ +static int tpm_key_decrypt(struct tpm_key *tk, + struct kernel_pkey_params *params, + const void *in, void *out) +{ + struct tpm_buf *tb; + uint32_t keyhandle; + uint8_t srkauth[SHA1_DIGEST_SIZE]; + uint8_t keyauth[SHA1_DIGEST_SIZE]; + int r; + + pr_devel("==>%s()\n", __func__); + + if (params->hash_algo) + return -ENOPKG; + + if (strcmp(params->encoding, "pkcs1")) + return -ENOPKG; + + tb = kzalloc(sizeof(*tb), GFP_KERNEL); + if (!tb) + return -ENOMEM; + + /* TODO: Handle a non-all zero SRK authorization */ + memset(srkauth, 0, sizeof(srkauth)); + + r = tpm_loadkey2(tb, SRKHANDLE, srkauth, + tk->blob, tk->blob_len, &keyhandle); + if (r < 0) { + pr_devel("loadkey2 failed (%d)\n", r); + goto error; + } + + /* TODO: Handle a non-all zero key authorization */ + memset(keyauth, 0, sizeof(keyauth)); + + r = tpm_unbind(tb, keyhandle, keyauth, + in, params->in_len, out, params->out_len); + if (r < 0) + pr_devel("tpm_unbind failed (%d)\n", r); + + if (tpm_flushspecific(tb, keyhandle) < 0) + pr_devel("flushspecific failed (%d)\n", r); + +error: + kzfree(tb); + pr_devel("<==%s() = %d\n", __func__, r); + return r; +} + +/* + * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2]. + */ +static const u8 digest_info_md5[] = { + 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */ + 0x05, 0x00, 0x04, 0x10 +}; + +static const u8 digest_info_sha1[] = { + 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, + 0x2b, 0x0e, 0x03, 0x02, 0x1a, + 0x05, 0x00, 0x04, 0x14 +}; + +static const u8 digest_info_rmd160[] = { + 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, + 0x2b, 0x24, 0x03, 0x02, 0x01, + 0x05, 0x00, 0x04, 0x14 +}; + +static const u8 digest_info_sha224[] = { + 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, + 0x05, 0x00, 0x04, 0x1c +}; + +static const u8 digest_info_sha256[] = { + 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, + 0x05, 0x00, 0x04, 0x20 +}; + +static const u8 digest_info_sha384[] = { + 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, + 0x05, 0x00, 0x04, 0x30 +}; + +static const u8 digest_info_sha512[] = { + 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, + 0x05, 0x00, 0x04, 0x40 +}; + +static const struct asn1_template { + const char *name; + const u8 *data; + size_t size; +} asn1_templates[] = { +#define _(X) { #X, digest_info_##X, sizeof(digest_info_##X) } + _(md5), + _(sha1), + _(rmd160), + _(sha256), + _(sha384), + _(sha512), + _(sha224), + { NULL } +#undef _ +}; + +static const struct asn1_template *lookup_asn1(const char *name) +{ + const struct asn1_template *p; + + for (p = asn1_templates; p->name; p++) + if (strcmp(name, p->name) == 0) + return p; + return NULL; +} + +/* + * Sign operation is performed with the private key in the TPM. + */ +static int tpm_key_sign(struct tpm_key *tk, + struct kernel_pkey_params *params, + const void *in, void *out) +{ + struct tpm_buf *tb; + uint32_t keyhandle; + uint8_t srkauth[SHA1_DIGEST_SIZE]; + uint8_t keyauth[SHA1_DIGEST_SIZE]; + void *asn1_wrapped = NULL; + uint32_t in_len = params->in_len; + int r; + + pr_devel("==>%s()\n", __func__); + + if (strcmp(params->encoding, "pkcs1")) + return -ENOPKG; + + if (params->hash_algo) { + const struct asn1_template *asn1 = + lookup_asn1(params->hash_algo); + + if (!asn1) + return -ENOPKG; + + /* request enough space for the ASN.1 template + input hash */ + asn1_wrapped = kzalloc(in_len + asn1->size, GFP_KERNEL); + if (!asn1_wrapped) + return -ENOMEM; + + /* Copy ASN.1 template, then the input */ + memcpy(asn1_wrapped, asn1->data, asn1->size); + memcpy(asn1_wrapped + asn1->size, in, in_len); + + in = asn1_wrapped; + in_len += asn1->size; + } + + if (in_len > tk->key_len / 8 - 11) { + r = -EOVERFLOW; + goto error_free_asn1_wrapped; + } + + r = -ENOMEM; + tb = kzalloc(sizeof(*tb), GFP_KERNEL); + if (!tb) + goto error_free_asn1_wrapped; + + /* TODO: Handle a non-all zero SRK authorization */ + memset(srkauth, 0, sizeof(srkauth)); + + r = tpm_loadkey2(tb, SRKHANDLE, srkauth, + tk->blob, tk->blob_len, &keyhandle); + if (r < 0) { + pr_devel("loadkey2 failed (%d)\n", r); + goto error_free_tb; + } + + /* TODO: Handle a non-all zero key authorization */ + memset(keyauth, 0, sizeof(keyauth)); + + r = tpm_sign(tb, keyhandle, keyauth, in, in_len, out, params->out_len); + if (r < 0) + pr_devel("tpm_sign failed (%d)\n", r); + + if (tpm_flushspecific(tb, keyhandle) < 0) + pr_devel("flushspecific failed (%d)\n", r); + +error_free_tb: + kzfree(tb); +error_free_asn1_wrapped: + kfree(asn1_wrapped); + pr_devel("<==%s() = %d\n", __func__, r); + return r; +} + +/* + * Do encryption, decryption and signing ops. + */ +static int tpm_key_eds_op(struct kernel_pkey_params *params, + const void *in, void *out) +{ + struct tpm_key *tk = params->key->payload.data[asym_crypto]; + int ret = -EOPNOTSUPP; + + /* Perform the encryption calculation. */ + switch (params->op) { + case kernel_pkey_encrypt: + ret = tpm_key_encrypt(tk, params, in, out); + break; + case kernel_pkey_decrypt: + ret = tpm_key_decrypt(tk, params, in, out); + break; + case kernel_pkey_sign: + ret = tpm_key_sign(tk, params, in, out); + break; + default: + BUG(); + } + + return ret; +} + +/* + * Verify a signature using a public key. + */ +static int tpm_key_verify_signature(const struct key *key, + const struct public_key_signature *sig) +{ + const struct tpm_key *tk = key->payload.data[asym_crypto]; + struct crypto_wait cwait; + struct crypto_akcipher *tfm; + struct akcipher_request *req; + struct scatterlist sig_sg, digest_sg; + char alg_name[CRYPTO_MAX_ALG_NAME]; + uint8_t der_pub_key[PUB_KEY_BUF_SIZE]; + uint32_t der_pub_key_len; + void *output; + unsigned int outlen; + int ret; + + pr_devel("==>%s()\n", __func__); + + BUG_ON(!tk); + BUG_ON(!sig); + BUG_ON(!sig->s); + + if (!sig->digest) + return -ENOPKG; + + ret = determine_akcipher(sig->encoding, sig->hash_algo, alg_name); + if (ret < 0) + return ret; + + tfm = crypto_alloc_akcipher(alg_name, 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len, + der_pub_key); + + ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len); + if (ret < 0) + goto error_free_tfm; + + ret = -ENOMEM; + req = akcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) + goto error_free_tfm; + + ret = -ENOMEM; + outlen = crypto_akcipher_maxsize(tfm); + output = kmalloc(outlen, GFP_KERNEL); + if (!output) + goto error_free_req; + + sg_init_one(&sig_sg, sig->s, sig->s_size); + sg_init_one(&digest_sg, output, outlen); + akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size, + outlen); + crypto_init_wait(&cwait); + akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &cwait); + + /* Perform the verification calculation. This doesn't actually do the + * verification, but rather calculates the hash expected by the + * signature and returns that to us. + */ + ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); + if (ret) + goto out_free_output; + + /* Do the actual verification step. */ + if (req->dst_len != sig->digest_size || + memcmp(sig->digest, output, sig->digest_size) != 0) + ret = -EKEYREJECTED; + +out_free_output: + kfree(output); +error_free_req: + akcipher_request_free(req); +error_free_tfm: + crypto_free_akcipher(tfm); + pr_devel("<==%s() = %d\n", __func__, ret); + if (WARN_ON_ONCE(ret > 0)) + ret = -EINVAL; + return ret; +} + +/* + * Parse enough information out of TPM_KEY structure: + * TPM_STRUCT_VER -> 4 bytes + * TPM_KEY_USAGE -> 2 bytes + * TPM_KEY_FLAGS -> 4 bytes + * TPM_AUTH_DATA_USAGE -> 1 byte + * TPM_KEY_PARMS -> variable + * UINT32 PCRInfoSize -> 4 bytes + * BYTE* -> PCRInfoSize bytes + * TPM_STORE_PUBKEY + * UINT32 encDataSize; + * BYTE* -> encDataSize; + * + * TPM_KEY_PARMS: + * TPM_ALGORITHM_ID -> 4 bytes + * TPM_ENC_SCHEME -> 2 bytes + * TPM_SIG_SCHEME -> 2 bytes + * UINT32 parmSize -> 4 bytes + * BYTE* -> variable + */ +static int extract_key_parameters(struct tpm_key *tk) +{ + const void *cur = tk->blob; + uint32_t len = tk->blob_len; + const void *pub_key; + uint32_t sz; + uint32_t key_len; + + if (len < 11) + return -EBADMSG; + + /* Ensure this is a legacy key */ + if (get_unaligned_be16(cur + 4) != 0x0015) + return -EBADMSG; + + /* Skip to TPM_KEY_PARMS */ + cur += 11; + len -= 11; + + if (len < 12) + return -EBADMSG; + + /* Make sure this is an RSA key */ + if (get_unaligned_be32(cur) != 0x00000001) + return -EBADMSG; + + /* Make sure this is TPM_ES_RSAESPKCSv15 encoding scheme */ + if (get_unaligned_be16(cur + 4) != 0x0002) + return -EBADMSG; + + /* Make sure this is TPM_SS_RSASSAPKCS1v15_DER signature scheme */ + if (get_unaligned_be16(cur + 6) != 0x0003) + return -EBADMSG; + + sz = get_unaligned_be32(cur + 8); + if (len < sz + 12) + return -EBADMSG; + + /* Move to TPM_RSA_KEY_PARMS */ + len -= 12; + cur += 12; + + /* Grab the RSA key length */ + key_len = get_unaligned_be32(cur); + + switch (key_len) { + case 512: + case 1024: + case 1536: + case 2048: + break; + default: + return -EINVAL; + } + + /* Move just past TPM_KEY_PARMS */ + cur += sz; + len -= sz; + + if (len < 4) + return -EBADMSG; + + sz = get_unaligned_be32(cur); + if (len < 4 + sz) + return -EBADMSG; + + /* Move to TPM_STORE_PUBKEY */ + cur += 4 + sz; + len -= 4 + sz; + + /* Grab the size of the public key, it should jive with the key size */ + sz = get_unaligned_be32(cur); + if (sz > 256) + return -EINVAL; + + pub_key = cur + 4; + + tk->key_len = key_len; + tk->pub_key = pub_key; + tk->pub_key_len = sz; + + return 0; +} + +/* Given the blob, parse it and load it into the TPM */ +struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len) +{ + int r; + struct tpm_key *tk; + + r = tpm_is_tpm2(NULL); + if (r < 0) + goto error; + + /* We don't support TPM2 yet */ + if (r > 0) { + r = -ENODEV; + goto error; + } + + r = -ENOMEM; + tk = kzalloc(sizeof(struct tpm_key), GFP_KERNEL); + if (!tk) + goto error; + + tk->blob = kmemdup(blob, blob_len, GFP_KERNEL); + if (!tk->blob) + goto error_memdup; + + tk->blob_len = blob_len; + + r = extract_key_parameters(tk); + if (r < 0) + goto error_extract; + + return tk; + +error_extract: + kfree(tk->blob); + tk->blob_len = 0; +error_memdup: + kfree(tk); +error: + return ERR_PTR(r); +} +EXPORT_SYMBOL_GPL(tpm_key_create); + +/* + * TPM-based asymmetric key subtype + */ +struct asymmetric_key_subtype asym_tpm_subtype = { + .owner = THIS_MODULE, + .name = "asym_tpm", + .name_len = sizeof("asym_tpm") - 1, + .describe = asym_tpm_describe, + .destroy = asym_tpm_destroy, + .query = tpm_key_query, + .eds_op = tpm_key_eds_op, + .verify_signature = tpm_key_verify_signature, +}; +EXPORT_SYMBOL_GPL(asym_tpm_subtype); + +MODULE_DESCRIPTION("TPM based asymmetric key subtype"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); diff --git a/crypto/asymmetric_keys/asymmetric_keys.h b/crypto/asymmetric_keys/asymmetric_keys.h index ca8e9ac34ce6..7be1ccf4fa9f 100644 --- a/crypto/asymmetric_keys/asymmetric_keys.h +++ b/crypto/asymmetric_keys/asymmetric_keys.h @@ -16,3 +16,6 @@ extern struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id); extern int __asymmetric_key_hex_to_key_id(const char *id, struct asymmetric_key_id *match_id, size_t hexlen); + +extern int asymmetric_key_eds_op(struct kernel_pkey_params *params, + const void *in, void *out); diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index 26539e9a8bda..69a0788a7de5 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -18,6 +18,7 @@ #include <linux/slab.h> #include <linux/ctype.h> #include <keys/system_keyring.h> +#include <keys/user-type.h> #include "asymmetric_keys.h" MODULE_LICENSE("GPL"); @@ -538,6 +539,45 @@ out: return ret; } +int asymmetric_key_eds_op(struct kernel_pkey_params *params, + const void *in, void *out) +{ + const struct asymmetric_key_subtype *subtype; + struct key *key = params->key; + int ret; + + pr_devel("==>%s()\n", __func__); + + if (key->type != &key_type_asymmetric) + return -EINVAL; + subtype = asymmetric_key_subtype(key); + if (!subtype || + !key->payload.data[0]) + return -EINVAL; + if (!subtype->eds_op) + return -ENOTSUPP; + + ret = subtype->eds_op(params, in, out); + + pr_devel("<==%s() = %d\n", __func__, ret); + return ret; +} + +static int asymmetric_key_verify_signature(struct kernel_pkey_params *params, + const void *in, const void *in2) +{ + struct public_key_signature sig = { + .s_size = params->in2_len, + .digest_size = params->in_len, + .encoding = params->encoding, + .hash_algo = params->hash_algo, + .digest = (void *)in, + .s = (void *)in2, + }; + + return verify_signature(params->key, &sig); +} + struct key_type key_type_asymmetric = { .name = "asymmetric", .preparse = asymmetric_key_preparse, @@ -548,6 +588,9 @@ struct key_type key_type_asymmetric = { .destroy = asymmetric_key_destroy, .describe = asymmetric_key_describe, .lookup_restriction = asymmetric_lookup_restriction, + .asym_query = query_asymmetric_key, + .asym_eds_op = asymmetric_key_eds_op, + .asym_verify_signature = asymmetric_key_verify_signature, }; EXPORT_SYMBOL_GPL(key_type_asymmetric); diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 0f134162cef4..f0d56e1a8b7e 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -271,6 +271,7 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, switch (ctx->last_oid) { case OID_rsaEncryption: ctx->sinfo->sig->pkey_algo = "rsa"; + ctx->sinfo->sig->encoding = "pkcs1"; break; default: printk("Unsupported pkey algo: %u\n", ctx->last_oid); diff --git a/crypto/asymmetric_keys/pkcs8.asn1 b/crypto/asymmetric_keys/pkcs8.asn1 new file mode 100644 index 000000000000..702c41a3c713 --- /dev/null +++ b/crypto/asymmetric_keys/pkcs8.asn1 @@ -0,0 +1,24 @@ +-- +-- This is the unencrypted variant +-- +PrivateKeyInfo ::= SEQUENCE { + version Version, + privateKeyAlgorithm PrivateKeyAlgorithmIdentifier, + privateKey PrivateKey, + attributes [0] IMPLICIT Attributes OPTIONAL +} + +Version ::= INTEGER ({ pkcs8_note_version }) + +PrivateKeyAlgorithmIdentifier ::= AlgorithmIdentifier ({ pkcs8_note_algo }) + +PrivateKey ::= OCTET STRING ({ pkcs8_note_key }) + +Attributes ::= SET OF Attribute + +Attribute ::= ANY + +AlgorithmIdentifier ::= SEQUENCE { + algorithm OBJECT IDENTIFIER ({ pkcs8_note_OID }), + parameters ANY OPTIONAL +} diff --git a/crypto/asymmetric_keys/pkcs8_parser.c b/crypto/asymmetric_keys/pkcs8_parser.c new file mode 100644 index 000000000000..5f6a7ecc9765 --- /dev/null +++ b/crypto/asymmetric_keys/pkcs8_parser.c @@ -0,0 +1,184 @@ +/* PKCS#8 Private Key parser [RFC 5208]. + * + * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#define pr_fmt(fmt) "PKCS8: "fmt +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/oid_registry.h> +#include <keys/asymmetric-subtype.h> +#include <keys/asymmetric-parser.h> +#include <crypto/public_key.h> +#include "pkcs8.asn1.h" + +struct pkcs8_parse_context { + struct public_key *pub; + unsigned long data; /* Start of data */ + enum OID last_oid; /* Last OID encountered */ + enum OID algo_oid; /* Algorithm OID */ + u32 key_size; + const void *key; +}; + +/* + * Note an OID when we find one for later processing when we know how to + * interpret it. + */ +int pkcs8_note_OID(void *context, size_t hdrlen, + unsigned char tag, + const void *value, size_t vlen) +{ + struct pkcs8_parse_context *ctx = context; + + ctx->last_oid = look_up_OID(value, vlen); + if (ctx->last_oid == OID__NR) { + char buffer[50]; + + sprint_oid(value, vlen, buffer, sizeof(buffer)); + pr_info("Unknown OID: [%lu] %s\n", + (unsigned long)value - ctx->data, buffer); + } + return 0; +} + +/* + * Note the version number of the ASN.1 blob. + */ +int pkcs8_note_version(void *context, size_t hdrlen, + unsigned char tag, + const void *value, size_t vlen) +{ + if (vlen != 1 || ((const u8 *)value)[0] != 0) { + pr_warn("Unsupported PKCS#8 version\n"); + return -EBADMSG; + } + return 0; +} + +/* + * Note the public algorithm. + */ +int pkcs8_note_algo(void *context, size_t hdrlen, + unsigned char tag, + const void *value, size_t vlen) +{ + struct pkcs8_parse_context *ctx = context; + + if (ctx->last_oid != OID_rsaEncryption) + return -ENOPKG; + + ctx->pub->pkey_algo = "rsa"; + return 0; +} + +/* + * Note the key data of the ASN.1 blob. + */ +int pkcs8_note_key(void *context, size_t hdrlen, + unsigned char tag, + const void *value, size_t vlen) +{ + struct pkcs8_parse_context *ctx = context; + + ctx->key = value; + ctx->key_size = vlen; + return 0; +} + +/* + * Parse a PKCS#8 private key blob. + */ +static struct public_key *pkcs8_parse(const void *data, size_t datalen) +{ + struct pkcs8_parse_context ctx; + struct public_key *pub; + long ret; + + memset(&ctx, 0, sizeof(ctx)); + + ret = -ENOMEM; + ctx.pub = kzalloc(sizeof(struct public_key), GFP_KERNEL); + if (!ctx.pub) + goto error; + + ctx.data = (unsigned long)data; + + /* Attempt to decode the private key */ + ret = asn1_ber_decoder(&pkcs8_decoder, &ctx, data, datalen); + if (ret < 0) + goto error_decode; + + ret = -ENOMEM; + pub = ctx.pub; + pub->key = kmemdup(ctx.key, ctx.key_size, GFP_KERNEL); + if (!pub->key) + goto error_decode; + + pub->keylen = ctx.key_size; + pub->key_is_private = true; + return pub; + +error_decode: + kfree(ctx.pub); +error: + return ERR_PTR(ret); +} + +/* + * Attempt to parse a data blob for a key as a PKCS#8 private key. + */ +static int pkcs8_key_preparse(struct key_preparsed_payload *prep) +{ + struct public_key *pub; + + pub = pkcs8_parse(prep->data, prep->datalen); + if (IS_ERR(pub)) + return PTR_ERR(pub); + + pr_devel("Cert Key Algo: %s\n", pub->pkey_algo); + pub->id_type = "PKCS8"; + + /* We're pinning the module by being linked against it */ + __module_get(public_key_subtype.owner); + prep->payload.data[asym_subtype] = &public_key_subtype; + prep->payload.data[asym_key_ids] = NULL; + prep->payload.data[asym_crypto] = pub; + prep->payload.data[asym_auth] = NULL; + prep->quotalen = 100; + return 0; +} + +static struct asymmetric_key_parser pkcs8_key_parser = { + .owner = THIS_MODULE, + .name = "pkcs8", + .parse = pkcs8_key_preparse, +}; + +/* + * Module stuff + */ +static int __init pkcs8_key_init(void) +{ + return register_asymmetric_key_parser(&pkcs8_key_parser); +} + +static void __exit pkcs8_key_exit(void) +{ + unregister_asymmetric_key_parser(&pkcs8_key_parser); +} + +module_init(pkcs8_key_init); +module_exit(pkcs8_key_exit); + +MODULE_DESCRIPTION("PKCS#8 certificate parser"); +MODULE_LICENSE("GPL"); diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index e929fe1e4106..f5d85b47fcc6 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -60,6 +60,165 @@ static void public_key_destroy(void *payload0, void *payload3) } /* + * Determine the crypto algorithm name. + */ +static +int software_key_determine_akcipher(const char *encoding, + const char *hash_algo, + const struct public_key *pkey, + char alg_name[CRYPTO_MAX_ALG_NAME]) +{ + int n; + + if (strcmp(encoding, "pkcs1") == 0) { + /* The data wangled by the RSA algorithm is typically padded + * and encoded in some manner, such as EMSA-PKCS1-1_5 [RFC3447 + * sec 8.2]. + */ + if (!hash_algo) + n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, + "pkcs1pad(%s)", + pkey->pkey_algo); + else + n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, + "pkcs1pad(%s,%s)", + pkey->pkey_algo, hash_algo); + return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; + } + + if (strcmp(encoding, "raw") == 0) { + strcpy(alg_name, pkey->pkey_algo); + return 0; + } + + return -ENOPKG; +} + +/* + * Query information about a key. + */ +static int software_key_query(const struct kernel_pkey_params *params, + struct kernel_pkey_query *info) +{ + struct crypto_akcipher *tfm; + struct public_key *pkey = params->key->payload.data[asym_crypto]; + char alg_name[CRYPTO_MAX_ALG_NAME]; + int ret, len; + + ret = software_key_determine_akcipher(params->encoding, + params->hash_algo, + pkey, alg_name); + if (ret < 0) + return ret; + + tfm = crypto_alloc_akcipher(alg_name, 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + if (pkey->key_is_private) + ret = crypto_akcipher_set_priv_key(tfm, + pkey->key, pkey->keylen); + else + ret = crypto_akcipher_set_pub_key(tfm, + pkey->key, pkey->keylen); + if (ret < 0) + goto error_free_tfm; + + len = crypto_akcipher_maxsize(tfm); + info->key_size = len * 8; + info->max_data_size = len; + info->max_sig_size = len; + info->max_enc_size = len; + info->max_dec_size = len; + info->supported_ops = (KEYCTL_SUPPORTS_ENCRYPT | + KEYCTL_SUPPORTS_VERIFY); + if (pkey->key_is_private) + info->supported_ops |= (KEYCTL_SUPPORTS_DECRYPT | + KEYCTL_SUPPORTS_SIGN); + ret = 0; + +error_free_tfm: + crypto_free_akcipher(tfm); + pr_devel("<==%s() = %d\n", __func__, ret); + return ret; +} + +/* + * Do encryption, decryption and signing ops. + */ +static int software_key_eds_op(struct kernel_pkey_params *params, + const void *in, void *out) +{ + const struct public_key *pkey = params->key->payload.data[asym_crypto]; + struct akcipher_request *req; + struct crypto_akcipher *tfm; + struct crypto_wait cwait; + struct scatterlist in_sg, out_sg; + char alg_name[CRYPTO_MAX_ALG_NAME]; + int ret; + + pr_devel("==>%s()\n", __func__); + + ret = software_key_determine_akcipher(params->encoding, + params->hash_algo, + pkey, alg_name); + if (ret < 0) + return ret; + + tfm = crypto_alloc_akcipher(alg_name, 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + req = akcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) + goto error_free_tfm; + + if (pkey->key_is_private) + ret = crypto_akcipher_set_priv_key(tfm, + pkey->key, pkey->keylen); + else + ret = crypto_akcipher_set_pub_key(tfm, + pkey->key, pkey->keylen); + if (ret) + goto error_free_req; + + sg_init_one(&in_sg, in, params->in_len); + sg_init_one(&out_sg, out, params->out_len); + akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len, + params->out_len); + crypto_init_wait(&cwait); + akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &cwait); + + /* Perform the encryption calculation. */ + switch (params->op) { + case kernel_pkey_encrypt: + ret = crypto_akcipher_encrypt(req); + break; + case kernel_pkey_decrypt: + ret = crypto_akcipher_decrypt(req); + break; + case kernel_pkey_sign: + ret = crypto_akcipher_sign(req); + break; + default: + BUG(); + } + + ret = crypto_wait_req(ret, &cwait); + if (ret == 0) + ret = req->dst_len; + +error_free_req: + akcipher_request_free(req); +error_free_tfm: + crypto_free_akcipher(tfm); + pr_devel("<==%s() = %d\n", __func__, ret); + return ret; +} + +/* * Verify a signature using a public key. */ int public_key_verify_signature(const struct public_key *pkey, @@ -69,8 +228,7 @@ int public_key_verify_signature(const struct public_key *pkey, struct crypto_akcipher *tfm; struct akcipher_request *req; struct scatterlist sig_sg, digest_sg; - const char *alg_name; - char alg_name_buf[CRYPTO_MAX_ALG_NAME]; + char alg_name[CRYPTO_MAX_ALG_NAME]; void *output; unsigned int outlen; int ret; @@ -81,21 +239,11 @@ int public_key_verify_signature(const struct public_key *pkey, BUG_ON(!sig); BUG_ON(!sig->s); - if (!sig->digest) - return -ENOPKG; - - alg_name = sig->pkey_algo; - if (strcmp(sig->pkey_algo, "rsa") == 0) { - /* The data wangled by the RSA algorithm is typically padded - * and encoded in some manner, such as EMSA-PKCS1-1_5 [RFC3447 - * sec 8.2]. - */ - if (snprintf(alg_name_buf, CRYPTO_MAX_ALG_NAME, - "pkcs1pad(rsa,%s)", sig->hash_algo - ) >= CRYPTO_MAX_ALG_NAME) - return -EINVAL; - alg_name = alg_name_buf; - } + ret = software_key_determine_akcipher(sig->encoding, + sig->hash_algo, + pkey, alg_name); + if (ret < 0) + return ret; tfm = crypto_alloc_akcipher(alg_name, 0, 0); if (IS_ERR(tfm)) @@ -106,7 +254,12 @@ int public_key_verify_signature(const struct public_key *pkey, if (!req) goto error_free_tfm; - ret = crypto_akcipher_set_pub_key(tfm, pkey->key, pkey->keylen); + if (pkey->key_is_private) + ret = crypto_akcipher_set_priv_key(tfm, + pkey->key, pkey->keylen); + else + ret = crypto_akcipher_set_pub_key(tfm, + pkey->key, pkey->keylen); if (ret) goto error_free_req; @@ -167,6 +320,8 @@ struct asymmetric_key_subtype public_key_subtype = { .name_len = sizeof("public_key") - 1, .describe = public_key_describe, .destroy = public_key_destroy, + .query = software_key_query, + .eds_op = software_key_eds_op, .verify_signature = public_key_verify_signature_2, }; EXPORT_SYMBOL_GPL(public_key_subtype); diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 28198314bc39..ad95a58c6642 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -16,7 +16,9 @@ #include <linux/export.h> #include <linux/err.h> #include <linux/slab.h> +#include <linux/keyctl.h> #include <crypto/public_key.h> +#include <keys/user-type.h> #include "asymmetric_keys.h" /* @@ -37,6 +39,99 @@ void public_key_signature_free(struct public_key_signature *sig) EXPORT_SYMBOL_GPL(public_key_signature_free); /** + * query_asymmetric_key - Get information about an aymmetric key. + * @params: Various parameters. + * @info: Where to put the information. + */ +int query_asymmetric_key(const struct kernel_pkey_params *params, + struct kernel_pkey_query *info) +{ + const struct asymmetric_key_subtype *subtype; + struct key *key = params->key; + int ret; + + pr_devel("==>%s()\n", __func__); + + if (key->type != &key_type_asymmetric) + return -EINVAL; + subtype = asymmetric_key_subtype(key); + if (!subtype || + !key->payload.data[0]) + return -EINVAL; + if (!subtype->query) + return -ENOTSUPP; + + ret = subtype->query(params, info); + + pr_devel("<==%s() = %d\n", __func__, ret); + return ret; +} +EXPORT_SYMBOL_GPL(query_asymmetric_key); + +/** + * encrypt_blob - Encrypt data using an asymmetric key + * @params: Various parameters + * @data: Data blob to be encrypted, length params->data_len + * @enc: Encrypted data buffer, length params->enc_len + * + * Encrypt the specified data blob using the private key specified by + * params->key. The encrypted data is wrapped in an encoding if + * params->encoding is specified (eg. "pkcs1"). + * + * Returns the length of the data placed in the encrypted data buffer or an + * error. + */ +int encrypt_blob(struct kernel_pkey_params *params, + const void *data, void *enc) +{ + params->op = kernel_pkey_encrypt; + return asymmetric_key_eds_op(params, data, enc); +} +EXPORT_SYMBOL_GPL(encrypt_blob); + +/** + * decrypt_blob - Decrypt data using an asymmetric key + * @params: Various parameters + * @enc: Encrypted data to be decrypted, length params->enc_len + * @data: Decrypted data buffer, length params->data_len + * + * Decrypt the specified data blob using the private key specified by + * params->key. The decrypted data is wrapped in an encoding if + * params->encoding is specified (eg. "pkcs1"). + * + * Returns the length of the data placed in the decrypted data buffer or an + * error. + */ +int decrypt_blob(struct kernel_pkey_params *params, + const void *enc, void *data) +{ + params->op = kernel_pkey_decrypt; + return asymmetric_key_eds_op(params, enc, data); +} +EXPORT_SYMBOL_GPL(decrypt_blob); + +/** + * create_signature - Sign some data using an asymmetric key + * @params: Various parameters + * @data: Data blob to be signed, length params->data_len + * @enc: Signature buffer, length params->enc_len + * + * Sign the specified data blob using the private key specified by params->key. + * The signature is wrapped in an encoding if params->encoding is specified + * (eg. "pkcs1"). If the encoding needs to know the digest type, this can be + * passed through params->hash_algo (eg. "sha1"). + * + * Returns the length of the data placed in the signature buffer or an error. + */ +int create_signature(struct kernel_pkey_params *params, + const void *data, void *enc) +{ + params->op = kernel_pkey_sign; + return asymmetric_key_eds_op(params, data, enc); +} +EXPORT_SYMBOL_GPL(create_signature); + +/** * verify_signature - Initiate the use of an asymmetric key to verify a signature * @key: The asymmetric key to verify against * @sig: The signature to check diff --git a/crypto/asymmetric_keys/tpm.asn1 b/crypto/asymmetric_keys/tpm.asn1 new file mode 100644 index 000000000000..d7f194232f30 --- /dev/null +++ b/crypto/asymmetric_keys/tpm.asn1 @@ -0,0 +1,5 @@ +-- +-- Unencryted TPM Blob. For details of the format, see: +-- http://david.woodhou.se/draft-woodhouse-cert-best-practice.html#I-D.mavrogiannopoulos-tpmuri +-- +PrivateKeyInfo ::= OCTET STRING ({ tpm_note_key }) diff --git a/crypto/asymmetric_keys/tpm_parser.c b/crypto/asymmetric_keys/tpm_parser.c new file mode 100644 index 000000000000..96405d8dcd98 --- /dev/null +++ b/crypto/asymmetric_keys/tpm_parser.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) "TPM-PARSER: "fmt +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <keys/asymmetric-subtype.h> +#include <keys/asymmetric-parser.h> +#include <crypto/asym_tpm_subtype.h> +#include "tpm.asn1.h" + +struct tpm_parse_context { + const void *blob; + u32 blob_len; +}; + +/* + * Note the key data of the ASN.1 blob. + */ +int tpm_note_key(void *context, size_t hdrlen, + unsigned char tag, + const void *value, size_t vlen) +{ + struct tpm_parse_context *ctx = context; + + ctx->blob = value; + ctx->blob_len = vlen; + + return 0; +} + +/* + * Parse a TPM-encrypted private key blob. + */ +static struct tpm_key *tpm_parse(const void *data, size_t datalen) +{ + struct tpm_parse_context ctx; + long ret; + + memset(&ctx, 0, sizeof(ctx)); + + /* Attempt to decode the private key */ + ret = asn1_ber_decoder(&tpm_decoder, &ctx, data, datalen); + if (ret < 0) + goto error; + + return tpm_key_create(ctx.blob, ctx.blob_len); + +error: + return ERR_PTR(ret); +} +/* + * Attempt to parse a data blob for a key as a TPM private key blob. + */ +static int tpm_key_preparse(struct key_preparsed_payload *prep) +{ + struct tpm_key *tk; + + /* + * TPM 1.2 keys are max 2048 bits long, so assume the blob is no + * more than 4x that + */ + if (prep->datalen > 256 * 4) + return -EMSGSIZE; + + tk = tpm_parse(prep->data, prep->datalen); + + if (IS_ERR(tk)) + return PTR_ERR(tk); + + /* We're pinning the module by being linked against it */ + __module_get(asym_tpm_subtype.owner); + prep->payload.data[asym_subtype] = &asym_tpm_subtype; + prep->payload.data[asym_key_ids] = NULL; + prep->payload.data[asym_crypto] = tk; + prep->payload.data[asym_auth] = NULL; + prep->quotalen = 100; + return 0; +} + +static struct asymmetric_key_parser tpm_key_parser = { + .owner = THIS_MODULE, + .name = "tpm_parser", + .parse = tpm_key_preparse, +}; + +static int __init tpm_key_init(void) +{ + return register_asymmetric_key_parser(&tpm_key_parser); +} + +static void __exit tpm_key_exit(void) +{ + unregister_asymmetric_key_parser(&tpm_key_parser); +} + +module_init(tpm_key_init); +module_exit(tpm_key_exit); + +MODULE_DESCRIPTION("TPM private key-blob parser"); +MODULE_LICENSE("GPL v2"); diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index b6cabac4b62b..991f4d735a4e 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -199,35 +199,32 @@ int x509_note_pkey_algo(void *context, size_t hdrlen, case OID_md4WithRSAEncryption: ctx->cert->sig->hash_algo = "md4"; - ctx->cert->sig->pkey_algo = "rsa"; - break; + goto rsa_pkcs1; case OID_sha1WithRSAEncryption: ctx->cert->sig->hash_algo = "sha1"; - ctx->cert->sig->pkey_algo = "rsa"; - break; + goto rsa_pkcs1; case OID_sha256WithRSAEncryption: ctx->cert->sig->hash_algo = "sha256"; - ctx->cert->sig->pkey_algo = "rsa"; - break; + goto rsa_pkcs1; case OID_sha384WithRSAEncryption: ctx->cert->sig->hash_algo = "sha384"; - ctx->cert->sig->pkey_algo = "rsa"; - break; + goto rsa_pkcs1; case OID_sha512WithRSAEncryption: ctx->cert->sig->hash_algo = "sha512"; - ctx->cert->sig->pkey_algo = "rsa"; - break; + goto rsa_pkcs1; case OID_sha224WithRSAEncryption: ctx->cert->sig->hash_algo = "sha224"; - ctx->cert->sig->pkey_algo = "rsa"; - break; + goto rsa_pkcs1; } +rsa_pkcs1: + ctx->cert->sig->pkey_algo = "rsa"; + ctx->cert->sig->encoding = "pkcs1"; ctx->algo_oid = ctx->last_oid; return 0; } diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 812476e46821..cfc04e15fd97 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -392,7 +392,8 @@ static int pkcs1pad_sign(struct akcipher_request *req) if (!ctx->key_size) return -EINVAL; - digest_size = digest_info->size; + if (digest_info) + digest_size = digest_info->size; if (req->src_len + digest_size > ctx->key_size - 11) return -EOVERFLOW; @@ -412,8 +413,9 @@ static int pkcs1pad_sign(struct akcipher_request *req) memset(req_ctx->in_buf + 1, 0xff, ps_end - 1); req_ctx->in_buf[ps_end] = 0x00; - memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data, - digest_info->size); + if (digest_info) + memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data, + digest_info->size); pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, ctx->key_size - 1 - req->src_len, req->src); @@ -475,10 +477,13 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) goto done; pos++; - if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size)) - goto done; + if (digest_info) { + if (crypto_memneq(out_buf + pos, digest_info->data, + digest_info->size)) + goto done; - pos += digest_info->size; + pos += digest_info->size; + } err = 0; @@ -608,11 +613,14 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) hash_name = crypto_attr_alg_name(tb[2]); if (IS_ERR(hash_name)) - return PTR_ERR(hash_name); + hash_name = NULL; - digest_info = rsa_lookup_asn1(hash_name); - if (!digest_info) - return -EINVAL; + if (hash_name) { + digest_info = rsa_lookup_asn1(hash_name); + if (!digest_info) + return -EINVAL; + } else + digest_info = NULL; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -632,14 +640,29 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) err = -ENAMETOOLONG; - if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, hash_name) >= - CRYPTO_MAX_ALG_NAME || - snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "pkcs1pad(%s,%s)", - rsa_alg->base.cra_driver_name, hash_name) >= - CRYPTO_MAX_ALG_NAME) - goto out_drop_alg; + if (!hash_name) { + if (snprintf(inst->alg.base.cra_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) + goto out_drop_alg; + + if (snprintf(inst->alg.base.cra_driver_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto out_drop_alg; + } else { + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, + hash_name) >= CRYPTO_MAX_ALG_NAME) + goto out_drop_alg; + + if (snprintf(inst->alg.base.cra_driver_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)", + rsa_alg->base.cra_driver_name, + hash_name) >= CRYPTO_MAX_ALG_NAME) + goto out_drop_alg; + } inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = rsa_alg->base.cra_priority; diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index a7c2673ffd36..824ae985ad93 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -126,6 +126,7 @@ int acpi_device_get_power(struct acpi_device *device, int *state) return 0; } +EXPORT_SYMBOL(acpi_device_get_power); static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state) { diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index 3b25a643058c..21b9b2f2470a 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -155,10 +155,9 @@ struct logical_input { int release_data; } std; struct { /* valid when type == INPUT_TYPE_KBD */ - /* strings can be non null-terminated */ - char press_str[sizeof(void *) + sizeof(int)]; - char repeat_str[sizeof(void *) + sizeof(int)]; - char release_str[sizeof(void *) + sizeof(int)]; + char press_str[sizeof(void *) + sizeof(int)] __nonstring; + char repeat_str[sizeof(void *) + sizeof(int)] __nonstring; + char release_str[sizeof(void *) + sizeof(int)] __nonstring; } kbd; } u; }; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index df8103dd40ac..c18586fccb6f 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -396,15 +396,14 @@ static struct brd_device *brd_alloc(int i) disk->first_minor = i * max_part; disk->fops = &brd_fops; disk->private_data = brd; - disk->queue = brd->brd_queue; disk->flags = GENHD_FL_EXT_DEVT; sprintf(disk->disk_name, "ram%d", i); set_capacity(disk, rd_size * 2); - disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; + brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; /* Tell the block layer that this is not a rotational device */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue); return brd; @@ -436,6 +435,7 @@ static struct brd_device *brd_init_one(int i, bool *new) brd = brd_alloc(i); if (brd) { + brd->brd_disk->queue = brd->brd_queue; add_disk(brd->brd_disk); list_add_tail(&brd->brd_list, &brd_devices); } @@ -503,8 +503,14 @@ static int __init brd_init(void) /* point of no return */ - list_for_each_entry(brd, &brd_devices, brd_list) + list_for_each_entry(brd, &brd_devices, brd_list) { + /* + * associate with queue just before adding disk for + * avoiding to mess up failure path + */ + brd->brd_disk->queue = brd->brd_queue; add_disk(brd->brd_disk); + } blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS, THIS_MODULE, brd_probe, NULL, NULL); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 55fd104f1ed4..fa8204214ac0 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1856,7 +1856,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, /* THINK if (signal_pending) return ... ? */ - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size); + iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size); if (sock == connection->data.socket) { rcu_read_lock(); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index fc67fd853375..61c392752fe4 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -516,7 +516,7 @@ static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flag struct msghdr msg = { .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) }; - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size); + iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size); return sock_recvmsg(sock, &msg, msg.msg_flags); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index abad6d15f956..cb0cc8685076 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -77,7 +77,6 @@ #include <linux/falloc.h> #include <linux/uio.h> #include <linux/ioprio.h> -#include <linux/blk-cgroup.h> #include "loop.h" @@ -269,7 +268,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) struct iov_iter i; ssize_t bw; - iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len); + iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len); file_start_write(file); bw = vfs_iter_write(file, &i, ppos, 0); @@ -347,7 +346,7 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq, ssize_t len; rq_for_each_segment(bvec, rq, iter) { - iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len); + iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len); len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); if (len < 0) return len; @@ -388,7 +387,7 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq, b.bv_offset = 0; b.bv_len = bvec.bv_len; - iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len); + iov_iter_bvec(&i, READ, &b, 1, b.bv_len); len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); if (len < 0) { ret = len; @@ -555,8 +554,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, } atomic_set(&cmd->ref, 2); - iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, - segments, blk_rq_bytes(rq)); + iov_iter_bvec(&iter, rw, bvec, segments, blk_rq_bytes(rq)); iter.iov_offset = offset; cmd->iocb.ki_pos = pos; @@ -1761,8 +1759,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, /* always use the first bio's css */ #ifdef CONFIG_BLK_CGROUP - if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) { - cmd->css = &bio_blkcg(rq->bio)->css; + if (cmd->use_aio && rq->bio && rq->bio->bi_css) { + cmd->css = rq->bio->bi_css; css_get(cmd->css); } else #endif diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index dfc8de6ce525..a7daa8acbab3 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -1942,8 +1942,8 @@ static int exec_drive_taskfile(struct driver_data *dd, dev_warn(&dd->pdev->dev, "data movement but " "sect_count is 0\n"); - err = -EINVAL; - goto abort; + err = -EINVAL; + goto abort; } } } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 14a51254c3db..4d4d6129ff66 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -473,7 +473,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) u32 nbd_cmd_flags = 0; int sent = nsock->sent, skip = 0; - iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); + iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); switch (req_op(req)) { case REQ_OP_DISCARD: @@ -564,8 +564,7 @@ send_pages: dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", req, bvec.bv_len); - iov_iter_bvec(&from, ITER_BVEC | WRITE, - &bvec, 1, bvec.bv_len); + iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len); if (skip) { if (skip >= iov_iter_count(&from)) { skip -= iov_iter_count(&from); @@ -624,7 +623,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) int ret = 0; reply.magic = 0; - iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); + iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply)); result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); if (result <= 0) { if (!nbd_disconnected(config)) @@ -678,8 +677,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) struct bio_vec bvec; rq_for_each_segment(bvec, req, iter) { - iov_iter_bvec(&to, ITER_BVEC | READ, - &bvec, 1, bvec.bv_len); + iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len); result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", @@ -1073,7 +1071,7 @@ static void send_disconnects(struct nbd_device *nbd) for (i = 0; i < config->num_connections; i++) { struct nbd_sock *nsock = config->socks[i]; - iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); + iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); mutex_lock(&nsock->tx_lock); ret = sock_xmit(nbd, i, 1, &from, 0, NULL); if (ret <= 0) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 73ed5f3a862d..8e5140bbf241 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1500,9 +1500,6 @@ rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops) rbd_dev->header.object_prefix, obj_req->ex.oe_objno)) goto err_req; - if (ceph_osdc_alloc_messages(req, GFP_NOIO)) - goto err_req; - return req; err_req: @@ -1945,6 +1942,10 @@ static int __rbd_img_fill_request(struct rbd_img_request *img_req) } if (ret) return ret; + + ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO); + if (ret) + return ret; } return 0; @@ -2374,8 +2375,7 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes) if (!obj_req->osd_req) return -ENOMEM; - ret = osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd", - "copyup"); + ret = osd_req_op_cls_init(obj_req->osd_req, 0, "rbd", "copyup"); if (ret) return ret; @@ -2405,6 +2405,10 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes) rbd_assert(0); } + ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO); + if (ret) + return ret; + rbd_obj_request_submit(obj_req); return 0; } @@ -3784,10 +3788,6 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, ceph_oloc_copy(&req->r_base_oloc, oloc); req->r_flags = CEPH_OSD_FLAG_READ; - ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); - if (ret) - goto out_req; - pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); if (IS_ERR(pages)) { ret = PTR_ERR(pages); @@ -3798,6 +3798,10 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false, true); + ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); + if (ret) + goto out_req; + ceph_osdc_start_request(osdc, req, false); ret = ceph_osdc_wait_request(osdc, req); if (ret >= 0) @@ -6067,7 +6071,7 @@ static ssize_t rbd_remove_single_major(struct bus_type *bus, * create control files in sysfs * /sys/bus/rbd/... */ -static int rbd_sysfs_init(void) +static int __init rbd_sysfs_init(void) { int ret; @@ -6082,13 +6086,13 @@ static int rbd_sysfs_init(void) return ret; } -static void rbd_sysfs_cleanup(void) +static void __exit rbd_sysfs_cleanup(void) { bus_unregister(&rbd_bus_type); device_unregister(&rbd_root_dev); } -static int rbd_slab_init(void) +static int __init rbd_slab_init(void) { rbd_assert(!rbd_img_request_cache); rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0); diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 1deafb4db60c..81cdb4eaca07 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -287,6 +287,7 @@ source "drivers/clk/actions/Kconfig" source "drivers/clk/bcm/Kconfig" source "drivers/clk/hisilicon/Kconfig" source "drivers/clk/imgtec/Kconfig" +source "drivers/clk/ingenic/Kconfig" source "drivers/clk/keystone/Kconfig" source "drivers/clk/mediatek/Kconfig" source "drivers/clk/meson/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index ad11421bdacd..72be7a38cff1 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -72,7 +72,8 @@ obj-$(CONFIG_H8300) += h8300/ obj-$(CONFIG_ARCH_HISI) += hisilicon/ obj-y += imgtec/ obj-$(CONFIG_ARCH_MXC) += imx/ -obj-$(CONFIG_MACH_INGENIC) += ingenic/ +obj-y += ingenic/ +obj-$(CONFIG_ARCH_K3) += keystone/ obj-$(CONFIG_ARCH_KEYSTONE) += keystone/ obj-$(CONFIG_MACH_LOONGSON32) += loongson1/ obj-y += mediatek/ diff --git a/drivers/clk/actions/Kconfig b/drivers/clk/actions/Kconfig index dc38c85a4833..04f0a6355726 100644 --- a/drivers/clk/actions/Kconfig +++ b/drivers/clk/actions/Kconfig @@ -2,6 +2,7 @@ config CLK_ACTIONS bool "Clock driver for Actions Semi SoCs" depends on ARCH_ACTIONS || COMPILE_TEST select REGMAP_MMIO + select RESET_CONTROLLER default ARCH_ACTIONS if CLK_ACTIONS diff --git a/drivers/clk/actions/Makefile b/drivers/clk/actions/Makefile index 78c17d56f991..ccfdf9781cef 100644 --- a/drivers/clk/actions/Makefile +++ b/drivers/clk/actions/Makefile @@ -7,6 +7,7 @@ clk-owl-y += owl-divider.o clk-owl-y += owl-factor.o clk-owl-y += owl-composite.o clk-owl-y += owl-pll.o +clk-owl-y += owl-reset.o # SoC support obj-$(CONFIG_CLK_OWL_S700) += owl-s700.o diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c index 61c1071b5180..32dd29e0a37e 100644 --- a/drivers/clk/actions/owl-common.c +++ b/drivers/clk/actions/owl-common.c @@ -39,7 +39,7 @@ static void owl_clk_set_regmap(const struct owl_clk_desc *desc, } int owl_clk_regmap_init(struct platform_device *pdev, - const struct owl_clk_desc *desc) + struct owl_clk_desc *desc) { void __iomem *base; struct regmap *regmap; @@ -57,6 +57,7 @@ int owl_clk_regmap_init(struct platform_device *pdev, } owl_clk_set_regmap(desc, regmap); + desc->regmap = regmap; return 0; } diff --git a/drivers/clk/actions/owl-common.h b/drivers/clk/actions/owl-common.h index 4fd726ec54a6..5a866a8b913d 100644 --- a/drivers/clk/actions/owl-common.h +++ b/drivers/clk/actions/owl-common.h @@ -26,6 +26,9 @@ struct owl_clk_desc { struct owl_clk_common **clks; unsigned long num_clks; struct clk_hw_onecell_data *hw_clks; + const struct owl_reset_map *resets; + unsigned long num_resets; + struct regmap *regmap; }; static inline struct owl_clk_common * @@ -35,7 +38,7 @@ static inline struct owl_clk_common * } int owl_clk_regmap_init(struct platform_device *pdev, - const struct owl_clk_desc *desc); + struct owl_clk_desc *desc); int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks); #endif /* _OWL_COMMON_H_ */ diff --git a/drivers/clk/actions/owl-reset.c b/drivers/clk/actions/owl-reset.c new file mode 100644 index 000000000000..203f8f34a8d4 --- /dev/null +++ b/drivers/clk/actions/owl-reset.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// +// Actions Semi Owl SoCs Reset Management Unit driver +// +// Copyright (c) 2018 Linaro Ltd. +// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + +#include <linux/delay.h> +#include <linux/regmap.h> +#include <linux/reset-controller.h> + +#include "owl-reset.h" + +static int owl_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct owl_reset *reset = to_owl_reset(rcdev); + const struct owl_reset_map *map = &reset->reset_map[id]; + + return regmap_update_bits(reset->regmap, map->reg, map->bit, 0); +} + +static int owl_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct owl_reset *reset = to_owl_reset(rcdev); + const struct owl_reset_map *map = &reset->reset_map[id]; + + return regmap_update_bits(reset->regmap, map->reg, map->bit, map->bit); +} + +static int owl_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + owl_reset_assert(rcdev, id); + udelay(1); + owl_reset_deassert(rcdev, id); + + return 0; +} + +static int owl_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct owl_reset *reset = to_owl_reset(rcdev); + const struct owl_reset_map *map = &reset->reset_map[id]; + u32 reg; + int ret; + + ret = regmap_read(reset->regmap, map->reg, ®); + if (ret) + return ret; + + /* + * The reset control API expects 0 if reset is not asserted, + * which is the opposite of what our hardware uses. + */ + return !(map->bit & reg); +} + +const struct reset_control_ops owl_reset_ops = { + .assert = owl_reset_assert, + .deassert = owl_reset_deassert, + .reset = owl_reset_reset, + .status = owl_reset_status, +}; diff --git a/drivers/clk/actions/owl-reset.h b/drivers/clk/actions/owl-reset.h new file mode 100644 index 000000000000..10f5774979a6 --- /dev/null +++ b/drivers/clk/actions/owl-reset.h @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// +// Actions Semi Owl SoCs Reset Management Unit driver +// +// Copyright (c) 2018 Linaro Ltd. +// Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + +#ifndef _OWL_RESET_H_ +#define _OWL_RESET_H_ + +#include <linux/reset-controller.h> + +struct owl_reset_map { + u32 reg; + u32 bit; +}; + +struct owl_reset { + struct reset_controller_dev rcdev; + const struct owl_reset_map *reset_map; + struct regmap *regmap; +}; + +static inline struct owl_reset *to_owl_reset(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct owl_reset, rcdev); +} + +extern const struct reset_control_ops owl_reset_ops; + +#endif /* _OWL_RESET_H_ */ diff --git a/drivers/clk/actions/owl-s700.c b/drivers/clk/actions/owl-s700.c index 5e9531392ee5..a2f34d13fb54 100644 --- a/drivers/clk/actions/owl-s700.c +++ b/drivers/clk/actions/owl-s700.c @@ -20,8 +20,10 @@ #include "owl-gate.h" #include "owl-mux.h" #include "owl-pll.h" +#include "owl-reset.h" #include <dt-bindings/clock/actions,s700-cmu.h> +#include <dt-bindings/reset/actions,s700-reset.h> #define CMU_COREPLL (0x0000) #define CMU_DEVPLL (0x0004) @@ -569,20 +571,69 @@ static struct clk_hw_onecell_data s700_hw_clks = { .num = CLK_NR_CLKS, }; -static const struct owl_clk_desc s700_clk_desc = { +static const struct owl_reset_map s700_resets[] = { + [RESET_DE] = { CMU_DEVRST0, BIT(0) }, + [RESET_LCD0] = { CMU_DEVRST0, BIT(1) }, + [RESET_DSI] = { CMU_DEVRST0, BIT(2) }, + [RESET_CSI] = { CMU_DEVRST0, BIT(13) }, + [RESET_SI] = { CMU_DEVRST0, BIT(14) }, + [RESET_I2C0] = { CMU_DEVRST1, BIT(0) }, + [RESET_I2C1] = { CMU_DEVRST1, BIT(1) }, + [RESET_I2C2] = { CMU_DEVRST1, BIT(2) }, + [RESET_I2C3] = { CMU_DEVRST1, BIT(3) }, + [RESET_SPI0] = { CMU_DEVRST1, BIT(4) }, + [RESET_SPI1] = { CMU_DEVRST1, BIT(5) }, + [RESET_SPI2] = { CMU_DEVRST1, BIT(6) }, + [RESET_SPI3] = { CMU_DEVRST1, BIT(7) }, + [RESET_UART0] = { CMU_DEVRST1, BIT(8) }, + [RESET_UART1] = { CMU_DEVRST1, BIT(9) }, + [RESET_UART2] = { CMU_DEVRST1, BIT(10) }, + [RESET_UART3] = { CMU_DEVRST1, BIT(11) }, + [RESET_UART4] = { CMU_DEVRST1, BIT(12) }, + [RESET_UART5] = { CMU_DEVRST1, BIT(13) }, + [RESET_UART6] = { CMU_DEVRST1, BIT(14) }, + [RESET_KEY] = { CMU_DEVRST1, BIT(24) }, + [RESET_GPIO] = { CMU_DEVRST1, BIT(25) }, + [RESET_AUDIO] = { CMU_DEVRST1, BIT(29) }, +}; + +static struct owl_clk_desc s700_clk_desc = { .clks = s700_clks, .num_clks = ARRAY_SIZE(s700_clks), .hw_clks = &s700_hw_clks, + + .resets = s700_resets, + .num_resets = ARRAY_SIZE(s700_resets), }; static int s700_clk_probe(struct platform_device *pdev) { - const struct owl_clk_desc *desc; + struct owl_clk_desc *desc; + struct owl_reset *reset; + int ret; desc = &s700_clk_desc; owl_clk_regmap_init(pdev, desc); + /* + * FIXME: Reset controller registration should be moved to + * common code, once all SoCs of Owl family supports it. + */ + reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL); + if (!reset) + return -ENOMEM; + + reset->rcdev.of_node = pdev->dev.of_node; + reset->rcdev.ops = &owl_reset_ops; + reset->rcdev.nr_resets = desc->num_resets; + reset->reset_map = desc->resets; + reset->regmap = desc->regmap; + + ret = devm_reset_controller_register(&pdev->dev, &reset->rcdev); + if (ret) + dev_err(&pdev->dev, "Failed to register reset controller\n"); + return owl_clk_probe(&pdev->dev, desc->hw_clks); } diff --git a/drivers/clk/actions/owl-s900.c b/drivers/clk/actions/owl-s900.c index 7f60ed6afe63..790890978424 100644 --- a/drivers/clk/actions/owl-s900.c +++ b/drivers/clk/actions/owl-s900.c @@ -19,8 +19,10 @@ #include "owl-gate.h" #include "owl-mux.h" #include "owl-pll.h" +#include "owl-reset.h" #include <dt-bindings/clock/actions,s900-cmu.h> +#include <dt-bindings/reset/actions,s900-reset.h> #define CMU_COREPLL (0x0000) #define CMU_DEVPLL (0x0004) @@ -684,20 +686,100 @@ static struct clk_hw_onecell_data s900_hw_clks = { .num = CLK_NR_CLKS, }; -static const struct owl_clk_desc s900_clk_desc = { +static const struct owl_reset_map s900_resets[] = { + [RESET_DMAC] = { CMU_DEVRST0, BIT(0) }, + [RESET_SRAMI] = { CMU_DEVRST0, BIT(1) }, + [RESET_DDR_CTL_PHY] = { CMU_DEVRST0, BIT(2) }, + [RESET_NANDC0] = { CMU_DEVRST0, BIT(3) }, + [RESET_SD0] = { CMU_DEVRST0, BIT(4) }, + [RESET_SD1] = { CMU_DEVRST0, BIT(5) }, + [RESET_PCM1] = { CMU_DEVRST0, BIT(6) }, + [RESET_DE] = { CMU_DEVRST0, BIT(7) }, + [RESET_LVDS] = { CMU_DEVRST0, BIT(8) }, + [RESET_SD2] = { CMU_DEVRST0, BIT(9) }, + [RESET_DSI] = { CMU_DEVRST0, BIT(10) }, + [RESET_CSI0] = { CMU_DEVRST0, BIT(11) }, + [RESET_BISP_AXI] = { CMU_DEVRST0, BIT(12) }, + [RESET_CSI1] = { CMU_DEVRST0, BIT(13) }, + [RESET_GPIO] = { CMU_DEVRST0, BIT(15) }, + [RESET_EDP] = { CMU_DEVRST0, BIT(16) }, + [RESET_AUDIO] = { CMU_DEVRST0, BIT(17) }, + [RESET_PCM0] = { CMU_DEVRST0, BIT(18) }, + [RESET_HDE] = { CMU_DEVRST0, BIT(21) }, + [RESET_GPU3D_PA] = { CMU_DEVRST0, BIT(22) }, + [RESET_IMX] = { CMU_DEVRST0, BIT(23) }, + [RESET_SE] = { CMU_DEVRST0, BIT(24) }, + [RESET_NANDC1] = { CMU_DEVRST0, BIT(25) }, + [RESET_SD3] = { CMU_DEVRST0, BIT(26) }, + [RESET_GIC] = { CMU_DEVRST0, BIT(27) }, + [RESET_GPU3D_PB] = { CMU_DEVRST0, BIT(28) }, + [RESET_DDR_CTL_PHY_AXI] = { CMU_DEVRST0, BIT(29) }, + [RESET_CMU_DDR] = { CMU_DEVRST0, BIT(30) }, + [RESET_DMM] = { CMU_DEVRST0, BIT(31) }, + [RESET_USB2HUB] = { CMU_DEVRST1, BIT(0) }, + [RESET_USB2HSIC] = { CMU_DEVRST1, BIT(1) }, + [RESET_HDMI] = { CMU_DEVRST1, BIT(2) }, + [RESET_HDCP2TX] = { CMU_DEVRST1, BIT(3) }, + [RESET_UART6] = { CMU_DEVRST1, BIT(4) }, + [RESET_UART0] = { CMU_DEVRST1, BIT(5) }, + [RESET_UART1] = { CMU_DEVRST1, BIT(6) }, + [RESET_UART2] = { CMU_DEVRST1, BIT(7) }, + [RESET_SPI0] = { CMU_DEVRST1, BIT(8) }, + [RESET_SPI1] = { CMU_DEVRST1, BIT(9) }, + [RESET_SPI2] = { CMU_DEVRST1, BIT(10) }, + [RESET_SPI3] = { CMU_DEVRST1, BIT(11) }, + [RESET_I2C0] = { CMU_DEVRST1, BIT(12) }, + [RESET_I2C1] = { CMU_DEVRST1, BIT(13) }, + [RESET_USB3] = { CMU_DEVRST1, BIT(14) }, + [RESET_UART3] = { CMU_DEVRST1, BIT(15) }, + [RESET_UART4] = { CMU_DEVRST1, BIT(16) }, + [RESET_UART5] = { CMU_DEVRST1, BIT(17) }, + [RESET_I2C2] = { CMU_DEVRST1, BIT(18) }, + [RESET_I2C3] = { CMU_DEVRST1, BIT(19) }, + [RESET_ETHERNET] = { CMU_DEVRST1, BIT(20) }, + [RESET_CHIPID] = { CMU_DEVRST1, BIT(21) }, + [RESET_I2C4] = { CMU_DEVRST1, BIT(22) }, + [RESET_I2C5] = { CMU_DEVRST1, BIT(23) }, + [RESET_CPU_SCNT] = { CMU_DEVRST1, BIT(30) } +}; + +static struct owl_clk_desc s900_clk_desc = { .clks = s900_clks, .num_clks = ARRAY_SIZE(s900_clks), .hw_clks = &s900_hw_clks, + + .resets = s900_resets, + .num_resets = ARRAY_SIZE(s900_resets), }; static int s900_clk_probe(struct platform_device *pdev) { - const struct owl_clk_desc *desc; + struct owl_clk_desc *desc; + struct owl_reset *reset; + int ret; desc = &s900_clk_desc; owl_clk_regmap_init(pdev, desc); + /* + * FIXME: Reset controller registration should be moved to + * common code, once all SoCs of Owl family supports it. + */ + reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL); + if (!reset) + return -ENOMEM; + + reset->rcdev.of_node = pdev->dev.of_node; + reset->rcdev.ops = &owl_reset_ops; + reset->rcdev.nr_resets = desc->num_resets; + reset->reset_map = desc->resets; + reset->regmap = desc->regmap; + + ret = devm_reset_controller_register(&pdev->dev, &reset->rcdev); + if (ret) + dev_err(&pdev->dev, "Failed to register reset controller\n"); + return owl_clk_probe(&pdev->dev, desc->hw_clks); } diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile index facc169ebb68..c75df1cad60e 100644 --- a/drivers/clk/at91/Makefile +++ b/drivers/clk/at91/Makefile @@ -3,7 +3,7 @@ # Makefile for at91 specific clk # -obj-y += pmc.o sckc.o +obj-y += pmc.o sckc.o dt-compat.o obj-y += clk-slow.o clk-main.o clk-pll.o clk-plldiv.o clk-master.o obj-y += clk-system.o clk-peripheral.o clk-programmable.o @@ -14,3 +14,6 @@ obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o obj-$(CONFIG_HAVE_AT91_H32MX) += clk-h32mx.o obj-$(CONFIG_HAVE_AT91_GENERATED_CLK) += clk-generated.o obj-$(CONFIG_HAVE_AT91_I2S_MUX_CLK) += clk-i2s-mux.o +obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o +obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o +obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c new file mode 100644 index 000000000000..b1af5a395423 --- /dev/null +++ b/drivers/clk/at91/at91sam9260.c @@ -0,0 +1,494 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/at91.h> + +#include "pmc.h" + +struct sck { + char *n; + char *p; + u8 id; +}; + +struct pck { + char *n; + u8 id; +}; + +struct at91sam926x_data { + const struct clk_pll_layout *plla_layout; + const struct clk_pll_characteristics *plla_characteristics; + const struct clk_pll_layout *pllb_layout; + const struct clk_pll_characteristics *pllb_characteristics; + const struct clk_master_characteristics *mck_characteristics; + const struct sck *sck; + const struct pck *pck; + u8 num_sck; + u8 num_pck; + u8 num_progck; + bool has_slck; +}; + +static const struct clk_master_characteristics sam9260_mck_characteristics = { + .output = { .min = 0, .max = 105000000 }, + .divisors = { 1, 2, 4, 0 }, +}; + +static u8 sam9260_plla_out[] = { 0, 2 }; + +static u16 sam9260_plla_icpll[] = { 1, 1 }; + +static struct clk_range sam9260_plla_outputs[] = { + { .min = 80000000, .max = 160000000 }, + { .min = 150000000, .max = 240000000 }, +}; + +static const struct clk_pll_characteristics sam9260_plla_characteristics = { + .input = { .min = 1000000, .max = 32000000 }, + .num_output = ARRAY_SIZE(sam9260_plla_outputs), + .output = sam9260_plla_outputs, + .icpll = sam9260_plla_icpll, + .out = sam9260_plla_out, +}; + +static u8 sam9260_pllb_out[] = { 1 }; + +static u16 sam9260_pllb_icpll[] = { 1 }; + +static struct clk_range sam9260_pllb_outputs[] = { + { .min = 70000000, .max = 130000000 }, +}; + +static const struct clk_pll_characteristics sam9260_pllb_characteristics = { + .input = { .min = 1000000, .max = 5000000 }, + .num_output = ARRAY_SIZE(sam9260_pllb_outputs), + .output = sam9260_pllb_outputs, + .icpll = sam9260_pllb_icpll, + .out = sam9260_pllb_out, +}; + +static const struct sck at91sam9260_systemck[] = { + { .n = "uhpck", .p = "usbck", .id = 6 }, + { .n = "udpck", .p = "usbck", .id = 7 }, + { .n = "pck0", .p = "prog0", .id = 8 }, + { .n = "pck1", .p = "prog1", .id = 9 }, +}; + +static const struct pck at91sam9260_periphck[] = { + { .n = "pioA_clk", .id = 2 }, + { .n = "pioB_clk", .id = 3 }, + { .n = "pioC_clk", .id = 4 }, + { .n = "adc_clk", .id = 5 }, + { .n = "usart0_clk", .id = 6 }, + { .n = "usart1_clk", .id = 7 }, + { .n = "usart2_clk", .id = 8 }, + { .n = "mci0_clk", .id = 9 }, + { .n = "udc_clk", .id = 10 }, + { .n = "twi0_clk", .id = 11 }, + { .n = "spi0_clk", .id = 12 }, + { .n = "spi1_clk", .id = 13 }, + { .n = "ssc0_clk", .id = 14 }, + { .n = "tc0_clk", .id = 17 }, + { .n = "tc1_clk", .id = 18 }, + { .n = "tc2_clk", .id = 19 }, + { .n = "ohci_clk", .id = 20 }, + { .n = "macb0_clk", .id = 21 }, + { .n = "isi_clk", .id = 22 }, + { .n = "usart3_clk", .id = 23 }, + { .n = "uart0_clk", .id = 24 }, + { .n = "uart1_clk", .id = 25 }, + { .n = "tc3_clk", .id = 26 }, + { .n = "tc4_clk", .id = 27 }, + { .n = "tc5_clk", .id = 28 }, +}; + +static struct at91sam926x_data at91sam9260_data = { + .plla_layout = &at91rm9200_pll_layout, + .plla_characteristics = &sam9260_plla_characteristics, + .pllb_layout = &at91rm9200_pll_layout, + .pllb_characteristics = &sam9260_pllb_characteristics, + .mck_characteristics = &sam9260_mck_characteristics, + .sck = at91sam9260_systemck, + .num_sck = ARRAY_SIZE(at91sam9260_systemck), + .pck = at91sam9260_periphck, + .num_pck = ARRAY_SIZE(at91sam9260_periphck), + .num_progck = 2, + .has_slck = true, +}; + +static const struct clk_master_characteristics sam9g20_mck_characteristics = { + .output = { .min = 0, .max = 133000000 }, + .divisors = { 1, 2, 4, 6 }, +}; + +static u8 sam9g20_plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 }; + +static u16 sam9g20_plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 }; + +static struct clk_range sam9g20_plla_outputs[] = { + { .min = 745000000, .max = 800000000 }, + { .min = 695000000, .max = 750000000 }, + { .min = 645000000, .max = 700000000 }, + { .min = 595000000, .max = 650000000 }, + { .min = 545000000, .max = 600000000 }, + { .min = 495000000, .max = 550000000 }, + { .min = 445000000, .max = 500000000 }, + { .min = 400000000, .max = 450000000 }, +}; + +static const struct clk_pll_characteristics sam9g20_plla_characteristics = { + .input = { .min = 2000000, .max = 32000000 }, + .num_output = ARRAY_SIZE(sam9g20_plla_outputs), + .output = sam9g20_plla_outputs, + .icpll = sam9g20_plla_icpll, + .out = sam9g20_plla_out, +}; + +static u8 sam9g20_pllb_out[] = { 0 }; + +static u16 sam9g20_pllb_icpll[] = { 0 }; + +static struct clk_range sam9g20_pllb_outputs[] = { + { .min = 30000000, .max = 100000000 }, +}; + +static const struct clk_pll_characteristics sam9g20_pllb_characteristics = { + .input = { .min = 2000000, .max = 32000000 }, + .num_output = ARRAY_SIZE(sam9g20_pllb_outputs), + .output = sam9g20_pllb_outputs, + .icpll = sam9g20_pllb_icpll, + .out = sam9g20_pllb_out, +}; + +static struct at91sam926x_data at91sam9g20_data = { + .plla_layout = &at91sam9g45_pll_layout, + .plla_characteristics = &sam9g20_plla_characteristics, + .pllb_layout = &at91sam9g20_pllb_layout, + .pllb_characteristics = &sam9g20_pllb_characteristics, + .mck_characteristics = &sam9g20_mck_characteristics, + .sck = at91sam9260_systemck, + .num_sck = ARRAY_SIZE(at91sam9260_systemck), + .pck = at91sam9260_periphck, + .num_pck = ARRAY_SIZE(at91sam9260_periphck), + .num_progck = 2, + .has_slck = true, +}; + +static const struct clk_master_characteristics sam9261_mck_characteristics = { + .output = { .min = 0, .max = 94000000 }, + .divisors = { 1, 2, 4, 0 }, +}; + +static struct clk_range sam9261_plla_outputs[] = { + { .min = 80000000, .max = 200000000 }, + { .min = 190000000, .max = 240000000 }, +}; + +static const struct clk_pll_characteristics sam9261_plla_characteristics = { + .input = { .min = 1000000, .max = 32000000 }, + .num_output = ARRAY_SIZE(sam9261_plla_outputs), + .output = sam9261_plla_outputs, + .icpll = sam9260_plla_icpll, + .out = sam9260_plla_out, +}; + +static u8 sam9261_pllb_out[] = { 1 }; + +static u16 sam9261_pllb_icpll[] = { 1 }; + +static struct clk_range sam9261_pllb_outputs[] = { + { .min = 70000000, .max = 130000000 }, +}; + +static const struct clk_pll_characteristics sam9261_pllb_characteristics = { + .input = { .min = 1000000, .max = 5000000 }, + .num_output = ARRAY_SIZE(sam9261_pllb_outputs), + .output = sam9261_pllb_outputs, + .icpll = sam9261_pllb_icpll, + .out = sam9261_pllb_out, +}; + +static const struct sck at91sam9261_systemck[] = { + { .n = "uhpck", .p = "usbck", .id = 6 }, + { .n = "udpck", .p = "usbck", .id = 7 }, + { .n = "pck0", .p = "prog0", .id = 8 }, + { .n = "pck1", .p = "prog1", .id = 9 }, + { .n = "pck2", .p = "prog2", .id = 10 }, + { .n = "pck3", .p = "prog3", .id = 11 }, + { .n = "hclk0", .p = "masterck", .id = 16 }, + { .n = "hclk1", .p = "masterck", .id = 17 }, +}; + +static const struct pck at91sam9261_periphck[] = { + { .n = "pioA_clk", .id = 2, }, + { .n = "pioB_clk", .id = 3, }, + { .n = "pioC_clk", .id = 4, }, + { .n = "usart0_clk", .id = 6, }, + { .n = "usart1_clk", .id = 7, }, + { .n = "usart2_clk", .id = 8, }, + { .n = "mci0_clk", .id = 9, }, + { .n = "udc_clk", .id = 10, }, + { .n = "twi0_clk", .id = 11, }, + { .n = "spi0_clk", .id = 12, }, + { .n = "spi1_clk", .id = 13, }, + { .n = "ssc0_clk", .id = 14, }, + { .n = "ssc1_clk", .id = 15, }, + { .n = "ssc2_clk", .id = 16, }, + { .n = "tc0_clk", .id = 17, }, + { .n = "tc1_clk", .id = 18, }, + { .n = "tc2_clk", .id = 19, }, + { .n = "ohci_clk", .id = 20, }, + { .n = "lcd_clk", .id = 21, }, +}; + +static struct at91sam926x_data at91sam9261_data = { + .plla_layout = &at91rm9200_pll_layout, + .plla_characteristics = &sam9261_plla_characteristics, + .pllb_layout = &at91rm9200_pll_layout, + .pllb_characteristics = &sam9261_pllb_characteristics, + .mck_characteristics = &sam9261_mck_characteristics, + .sck = at91sam9261_systemck, + .num_sck = ARRAY_SIZE(at91sam9261_systemck), + .pck = at91sam9261_periphck, + .num_pck = ARRAY_SIZE(at91sam9261_periphck), + .num_progck = 4, +}; + +static const struct clk_master_characteristics sam9263_mck_characteristics = { + .output = { .min = 0, .max = 120000000 }, + .divisors = { 1, 2, 4, 0 }, +}; + +static struct clk_range sam9263_pll_outputs[] = { + { .min = 80000000, .max = 200000000 }, + { .min = 190000000, .max = 240000000 }, +}; + +static const struct clk_pll_characteristics sam9263_pll_characteristics = { + .input = { .min = 1000000, .max = 32000000 }, + .num_output = ARRAY_SIZE(sam9263_pll_outputs), + .output = sam9263_pll_outputs, + .icpll = sam9260_plla_icpll, + .out = sam9260_plla_out, +}; + +static const struct sck at91sam9263_systemck[] = { + { .n = "uhpck", .p = "usbck", .id = 6 }, + { .n = "udpck", .p = "usbck", .id = 7 }, + { .n = "pck0", .p = "prog0", .id = 8 }, + { .n = "pck1", .p = "prog1", .id = 9 }, + { .n = "pck2", .p = "prog2", .id = 10 }, + { .n = "pck3", .p = "prog3", .id = 11 }, +}; + +static const struct pck at91sam9263_periphck[] = { + { .n = "pioA_clk", .id = 2, }, + { .n = "pioB_clk", .id = 3, }, + { .n = "pioCDE_clk", .id = 4, }, + { .n = "usart0_clk", .id = 7, }, + { .n = "usart1_clk", .id = 8, }, + { .n = "usart2_clk", .id = 9, }, + { .n = "mci0_clk", .id = 10, }, + { .n = "mci1_clk", .id = 11, }, + { .n = "can_clk", .id = 12, }, + { .n = "twi0_clk", .id = 13, }, + { .n = "spi0_clk", .id = 14, }, + { .n = "spi1_clk", .id = 15, }, + { .n = "ssc0_clk", .id = 16, }, + { .n = "ssc1_clk", .id = 17, }, + { .n = "ac97_clk", .id = 18, }, + { .n = "tcb_clk", .id = 19, }, + { .n = "pwm_clk", .id = 20, }, + { .n = "macb0_clk", .id = 21, }, + { .n = "g2de_clk", .id = 23, }, + { .n = "udc_clk", .id = 24, }, + { .n = "isi_clk", .id = 25, }, + { .n = "lcd_clk", .id = 26, }, + { .n = "dma_clk", .id = 27, }, + { .n = "ohci_clk", .id = 29, }, +}; + +static struct at91sam926x_data at91sam9263_data = { + .plla_layout = &at91rm9200_pll_layout, + .plla_characteristics = &sam9263_pll_characteristics, + .pllb_layout = &at91rm9200_pll_layout, + .pllb_characteristics = &sam9263_pll_characteristics, + .mck_characteristics = &sam9263_mck_characteristics, + .sck = at91sam9263_systemck, + .num_sck = ARRAY_SIZE(at91sam9263_systemck), + .pck = at91sam9263_periphck, + .num_pck = ARRAY_SIZE(at91sam9263_periphck), + .num_progck = 4, +}; + +static void __init at91sam926x_pmc_setup(struct device_node *np, + struct at91sam926x_data *data) +{ + const char *slowxtal_name, *mainxtal_name; + struct pmc_data *at91sam9260_pmc; + u32 usb_div[] = { 1, 2, 4, 0 }; + const char *parent_names[6]; + const char *slck_name; + struct regmap *regmap; + struct clk_hw *hw; + int i; + bool bypass; + + i = of_property_match_string(np, "clock-names", "slow_xtal"); + if (i < 0) + return; + + slowxtal_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "main_xtal"); + if (i < 0) + return; + mainxtal_name = of_clk_get_parent_name(np, i); + + regmap = syscon_node_to_regmap(np); + if (IS_ERR(regmap)) + return; + + at91sam9260_pmc = pmc_data_allocate(PMC_MAIN + 1, + ndck(data->sck, data->num_sck), + ndck(data->pck, data->num_pck), 0); + if (!at91sam9260_pmc) + return; + + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + + hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, + bypass); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc"); + if (IS_ERR(hw)) + goto err_free; + + at91sam9260_pmc->chws[PMC_MAIN] = hw; + + if (data->has_slck) { + hw = clk_hw_register_fixed_rate_with_accuracy(NULL, + "slow_rc_osc", + NULL, 0, 32768, + 50000000); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = "slow_rc_osc"; + parent_names[1] = "slow_xtal"; + hw = at91_clk_register_sam9260_slow(regmap, "slck", + parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + at91sam9260_pmc->chws[PMC_SLOW] = hw; + slck_name = "slck"; + } else { + slck_name = slowxtal_name; + } + + hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0, + data->plla_layout, + data->plla_characteristics); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1, + data->pllb_layout, + data->pllb_characteristics); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "pllack"; + parent_names[3] = "pllbck"; + hw = at91_clk_register_master(regmap, "masterck", 4, parent_names, + &at91rm9200_master_layout, + data->mck_characteristics); + if (IS_ERR(hw)) + goto err_free; + + at91sam9260_pmc->chws[PMC_MCK] = hw; + + hw = at91rm9200_clk_register_usb(regmap, "usbck", "pllbck", usb_div); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "pllack"; + parent_names[3] = "pllbck"; + for (i = 0; i < data->num_progck; i++) { + char name[6]; + + snprintf(name, sizeof(name), "prog%d", i); + + hw = at91_clk_register_programmable(regmap, name, + parent_names, 4, i, + &at91rm9200_programmable_layout); + if (IS_ERR(hw)) + goto err_free; + } + + for (i = 0; i < data->num_sck; i++) { + hw = at91_clk_register_system(regmap, data->sck[i].n, + data->sck[i].p, + data->sck[i].id); + if (IS_ERR(hw)) + goto err_free; + + at91sam9260_pmc->shws[data->sck[i].id] = hw; + } + + for (i = 0; i < data->num_pck; i++) { + hw = at91_clk_register_peripheral(regmap, + data->pck[i].n, + "masterck", + data->pck[i].id); + if (IS_ERR(hw)) + goto err_free; + + at91sam9260_pmc->phws[data->pck[i].id] = hw; + } + + of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91sam9260_pmc); + + return; + +err_free: + pmc_data_free(at91sam9260_pmc); +} + +static void __init at91sam9260_pmc_setup(struct device_node *np) +{ + at91sam926x_pmc_setup(np, &at91sam9260_data); +} +CLK_OF_DECLARE_DRIVER(at91sam9260_pmc, "atmel,at91sam9260-pmc", + at91sam9260_pmc_setup); + +static void __init at91sam9261_pmc_setup(struct device_node *np) +{ + at91sam926x_pmc_setup(np, &at91sam9261_data); +} +CLK_OF_DECLARE_DRIVER(at91sam9261_pmc, "atmel,at91sam9261-pmc", + at91sam9261_pmc_setup); + +static void __init at91sam9263_pmc_setup(struct device_node *np) +{ + at91sam926x_pmc_setup(np, &at91sam9263_data); +} +CLK_OF_DECLARE_DRIVER(at91sam9263_pmc, "atmel,at91sam9263-pmc", + at91sam9263_pmc_setup); + +static void __init at91sam9g20_pmc_setup(struct device_node *np) +{ + at91sam926x_pmc_setup(np, &at91sam9g20_data); +} +CLK_OF_DECLARE_DRIVER(at91sam9g20_pmc, "atmel,at91sam9g20-pmc", + at91sam9g20_pmc_setup); diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c new file mode 100644 index 000000000000..5aeef68b4bdd --- /dev/null +++ b/drivers/clk/at91/at91sam9rl.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/at91.h> + +#include "pmc.h" + +static const struct clk_master_characteristics sam9rl_mck_characteristics = { + .output = { .min = 0, .max = 94000000 }, + .divisors = { 1, 2, 4, 0 }, +}; + +static u8 sam9rl_plla_out[] = { 0, 2 }; + +static struct clk_range sam9rl_plla_outputs[] = { + { .min = 80000000, .max = 200000000 }, + { .min = 190000000, .max = 240000000 }, +}; + +static const struct clk_pll_characteristics sam9rl_plla_characteristics = { + .input = { .min = 1000000, .max = 32000000 }, + .num_output = ARRAY_SIZE(sam9rl_plla_outputs), + .output = sam9rl_plla_outputs, + .out = sam9rl_plla_out, +}; + +static const struct { + char *n; + char *p; + u8 id; +} at91sam9rl_systemck[] = { + { .n = "pck0", .p = "prog0", .id = 8 }, + { .n = "pck1", .p = "prog1", .id = 9 }, +}; + +static const struct { + char *n; + u8 id; +} at91sam9rl_periphck[] = { + { .n = "pioA_clk", .id = 2, }, + { .n = "pioB_clk", .id = 3, }, + { .n = "pioC_clk", .id = 4, }, + { .n = "pioD_clk", .id = 5, }, + { .n = "usart0_clk", .id = 6, }, + { .n = "usart1_clk", .id = 7, }, + { .n = "usart2_clk", .id = 8, }, + { .n = "usart3_clk", .id = 9, }, + { .n = "mci0_clk", .id = 10, }, + { .n = "twi0_clk", .id = 11, }, + { .n = "twi1_clk", .id = 12, }, + { .n = "spi0_clk", .id = 13, }, + { .n = "ssc0_clk", .id = 14, }, + { .n = "ssc1_clk", .id = 15, }, + { .n = "tc0_clk", .id = 16, }, + { .n = "tc1_clk", .id = 17, }, + { .n = "tc2_clk", .id = 18, }, + { .n = "pwm_clk", .id = 19, }, + { .n = "adc_clk", .id = 20, }, + { .n = "dma0_clk", .id = 21, }, + { .n = "udphs_clk", .id = 22, }, + { .n = "lcd_clk", .id = 23, }, +}; + +static void __init at91sam9rl_pmc_setup(struct device_node *np) +{ + const char *slck_name, *mainxtal_name; + struct pmc_data *at91sam9rl_pmc; + const char *parent_names[6]; + struct regmap *regmap; + struct clk_hw *hw; + int i; + + i = of_property_match_string(np, "clock-names", "slow_clk"); + if (i < 0) + return; + + slck_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "main_xtal"); + if (i < 0) + return; + mainxtal_name = of_clk_get_parent_name(np, i); + + regmap = syscon_node_to_regmap(np); + if (IS_ERR(regmap)) + return; + + at91sam9rl_pmc = pmc_data_allocate(PMC_MAIN + 1, + nck(at91sam9rl_systemck), + nck(at91sam9rl_periphck), 0); + if (!at91sam9rl_pmc) + return; + + hw = at91_clk_register_rm9200_main(regmap, "mainck", mainxtal_name); + if (IS_ERR(hw)) + goto err_free; + + at91sam9rl_pmc->chws[PMC_MAIN] = hw; + + hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0, + &at91rm9200_pll_layout, + &sam9rl_plla_characteristics); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck"); + if (IS_ERR(hw)) + goto err_free; + + at91sam9rl_pmc->chws[PMC_UTMI] = hw; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "pllack"; + parent_names[3] = "utmick"; + hw = at91_clk_register_master(regmap, "masterck", 4, parent_names, + &at91rm9200_master_layout, + &sam9rl_mck_characteristics); + if (IS_ERR(hw)) + goto err_free; + + at91sam9rl_pmc->chws[PMC_MCK] = hw; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "pllack"; + parent_names[3] = "utmick"; + parent_names[4] = "masterck"; + for (i = 0; i < 2; i++) { + char name[6]; + + snprintf(name, sizeof(name), "prog%d", i); + + hw = at91_clk_register_programmable(regmap, name, + parent_names, 5, i, + &at91rm9200_programmable_layout); + if (IS_ERR(hw)) + goto err_free; + } + + for (i = 0; i < ARRAY_SIZE(at91sam9rl_systemck); i++) { + hw = at91_clk_register_system(regmap, at91sam9rl_systemck[i].n, + at91sam9rl_systemck[i].p, + at91sam9rl_systemck[i].id); + if (IS_ERR(hw)) + goto err_free; + + at91sam9rl_pmc->shws[at91sam9rl_systemck[i].id] = hw; + } + + for (i = 0; i < ARRAY_SIZE(at91sam9rl_periphck); i++) { + hw = at91_clk_register_peripheral(regmap, + at91sam9rl_periphck[i].n, + "masterck", + at91sam9rl_periphck[i].id); + if (IS_ERR(hw)) + goto err_free; + + at91sam9rl_pmc->phws[at91sam9rl_periphck[i].id] = hw; + } + + of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91sam9rl_pmc); + + return; + +err_free: + pmc_data_free(at91sam9rl_pmc); +} +CLK_OF_DECLARE_DRIVER(at91sam9rl_pmc, "atmel,at91sam9rl-pmc", at91sam9rl_pmc_setup); diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c new file mode 100644 index 000000000000..2fe225a697df --- /dev/null +++ b/drivers/clk/at91/at91sam9x5.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/at91.h> + +#include "pmc.h" + +static const struct clk_master_characteristics mck_characteristics = { + .output = { .min = 0, .max = 133333333 }, + .divisors = { 1, 2, 4, 3 }, + .have_div3_pres = 1, +}; + +static u8 plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 }; + +static u16 plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 }; + +static struct clk_range plla_outputs[] = { + { .min = 745000000, .max = 800000000 }, + { .min = 695000000, .max = 750000000 }, + { .min = 645000000, .max = 700000000 }, + { .min = 595000000, .max = 650000000 }, + { .min = 545000000, .max = 600000000 }, + { .min = 495000000, .max = 555000000 }, + { .min = 445000000, .max = 500000000 }, + { .min = 400000000, .max = 450000000 }, +}; + +static const struct clk_pll_characteristics plla_characteristics = { + .input = { .min = 2000000, .max = 32000000 }, + .num_output = ARRAY_SIZE(plla_outputs), + .output = plla_outputs, + .icpll = plla_icpll, + .out = plla_out, +}; + +static const struct { + char *n; + char *p; + u8 id; +} at91sam9x5_systemck[] = { + { .n = "ddrck", .p = "masterck", .id = 2 }, + { .n = "smdck", .p = "smdclk", .id = 4 }, + { .n = "uhpck", .p = "usbck", .id = 6 }, + { .n = "udpck", .p = "usbck", .id = 7 }, + { .n = "pck0", .p = "prog0", .id = 8 }, + { .n = "pck1", .p = "prog1", .id = 9 }, +}; + +struct pck { + char *n; + u8 id; +}; + +static const struct pck at91sam9x5_periphck[] = { + { .n = "pioAB_clk", .id = 2, }, + { .n = "pioCD_clk", .id = 3, }, + { .n = "smd_clk", .id = 4, }, + { .n = "usart0_clk", .id = 5, }, + { .n = "usart1_clk", .id = 6, }, + { .n = "usart2_clk", .id = 7, }, + { .n = "twi0_clk", .id = 9, }, + { .n = "twi1_clk", .id = 10, }, + { .n = "twi2_clk", .id = 11, }, + { .n = "mci0_clk", .id = 12, }, + { .n = "spi0_clk", .id = 13, }, + { .n = "spi1_clk", .id = 14, }, + { .n = "uart0_clk", .id = 15, }, + { .n = "uart1_clk", .id = 16, }, + { .n = "tcb0_clk", .id = 17, }, + { .n = "pwm_clk", .id = 18, }, + { .n = "adc_clk", .id = 19, }, + { .n = "dma0_clk", .id = 20, }, + { .n = "dma1_clk", .id = 21, }, + { .n = "uhphs_clk", .id = 22, }, + { .n = "udphs_clk", .id = 23, }, + { .n = "mci1_clk", .id = 26, }, + { .n = "ssc0_clk", .id = 28, }, +}; + +static const struct pck at91sam9g15_periphck[] = { + { .n = "lcdc_clk", .id = 25, }, + { /* sentinel */} +}; + +static const struct pck at91sam9g25_periphck[] = { + { .n = "usart3_clk", .id = 8, }, + { .n = "macb0_clk", .id = 24, }, + { .n = "isi_clk", .id = 25, }, + { /* sentinel */} +}; + +static const struct pck at91sam9g35_periphck[] = { + { .n = "macb0_clk", .id = 24, }, + { .n = "lcdc_clk", .id = 25, }, + { /* sentinel */} +}; + +static const struct pck at91sam9x25_periphck[] = { + { .n = "usart3_clk", .id = 8, }, + { .n = "macb0_clk", .id = 24, }, + { .n = "macb1_clk", .id = 27, }, + { .n = "can0_clk", .id = 29, }, + { .n = "can1_clk", .id = 30, }, + { /* sentinel */} +}; + +static const struct pck at91sam9x35_periphck[] = { + { .n = "macb0_clk", .id = 24, }, + { .n = "lcdc_clk", .id = 25, }, + { .n = "can0_clk", .id = 29, }, + { .n = "can1_clk", .id = 30, }, + { /* sentinel */} +}; + +static void __init at91sam9x5_pmc_setup(struct device_node *np, + const struct pck *extra_pcks, + bool has_lcdck) +{ + struct clk_range range = CLK_RANGE(0, 0); + const char *slck_name, *mainxtal_name; + struct pmc_data *at91sam9x5_pmc; + const char *parent_names[6]; + struct regmap *regmap; + struct clk_hw *hw; + int i; + bool bypass; + + i = of_property_match_string(np, "clock-names", "slow_clk"); + if (i < 0) + return; + + slck_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "main_xtal"); + if (i < 0) + return; + mainxtal_name = of_clk_get_parent_name(np, i); + + regmap = syscon_node_to_regmap(np); + if (IS_ERR(regmap)) + return; + + at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1, + nck(at91sam9x5_systemck), + nck(at91sam9x35_periphck), 0); + if (!at91sam9x5_pmc) + return; + + hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000, + 50000000); + if (IS_ERR(hw)) + goto err_free; + + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + + hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, + bypass); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = "main_rc_osc"; + parent_names[1] = "main_osc"; + hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + at91sam9x5_pmc->chws[PMC_MAIN] = hw; + + hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0, + &at91rm9200_pll_layout, &plla_characteristics); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack"); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck"); + if (IS_ERR(hw)) + goto err_free; + + at91sam9x5_pmc->chws[PMC_UTMI] = hw; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "utmick"; + hw = at91_clk_register_master(regmap, "masterck", 4, parent_names, + &at91sam9x5_master_layout, + &mck_characteristics); + if (IS_ERR(hw)) + goto err_free; + + at91sam9x5_pmc->chws[PMC_MCK] = hw; + + parent_names[0] = "plladivck"; + parent_names[1] = "utmick"; + hw = at91sam9x5_clk_register_usb(regmap, "usbck", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + hw = at91sam9x5_clk_register_smd(regmap, "smdclk", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "utmick"; + parent_names[4] = "mck"; + for (i = 0; i < 2; i++) { + char name[6]; + + snprintf(name, sizeof(name), "prog%d", i); + + hw = at91_clk_register_programmable(regmap, name, + parent_names, 5, i, + &at91sam9x5_programmable_layout); + if (IS_ERR(hw)) + goto err_free; + } + + for (i = 0; i < ARRAY_SIZE(at91sam9x5_systemck); i++) { + hw = at91_clk_register_system(regmap, at91sam9x5_systemck[i].n, + at91sam9x5_systemck[i].p, + at91sam9x5_systemck[i].id); + if (IS_ERR(hw)) + goto err_free; + + at91sam9x5_pmc->shws[at91sam9x5_systemck[i].id] = hw; + } + + if (has_lcdck) { + hw = at91_clk_register_system(regmap, "lcdck", "masterck", 3); + if (IS_ERR(hw)) + goto err_free; + + at91sam9x5_pmc->shws[3] = hw; + } + + for (i = 0; i < ARRAY_SIZE(at91sam9x5_periphck); i++) { + hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock, + at91sam9x5_periphck[i].n, + "masterck", + at91sam9x5_periphck[i].id, + &range); + if (IS_ERR(hw)) + goto err_free; + + at91sam9x5_pmc->phws[at91sam9x5_periphck[i].id] = hw; + } + + for (i = 0; extra_pcks[i].id; i++) { + hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock, + extra_pcks[i].n, + "masterck", + extra_pcks[i].id, + &range); + if (IS_ERR(hw)) + goto err_free; + + at91sam9x5_pmc->phws[extra_pcks[i].id] = hw; + } + + of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91sam9x5_pmc); + + return; + +err_free: + pmc_data_free(at91sam9x5_pmc); +} + +static void __init at91sam9g15_pmc_setup(struct device_node *np) +{ + at91sam9x5_pmc_setup(np, at91sam9g15_periphck, true); +} +CLK_OF_DECLARE_DRIVER(at91sam9g15_pmc, "atmel,at91sam9g15-pmc", + at91sam9g15_pmc_setup); + +static void __init at91sam9g25_pmc_setup(struct device_node *np) +{ + at91sam9x5_pmc_setup(np, at91sam9g25_periphck, false); +} +CLK_OF_DECLARE_DRIVER(at91sam9g25_pmc, "atmel,at91sam9g25-pmc", + at91sam9g25_pmc_setup); + +static void __init at91sam9g35_pmc_setup(struct device_node *np) +{ + at91sam9x5_pmc_setup(np, at91sam9g35_periphck, true); +} +CLK_OF_DECLARE_DRIVER(at91sam9g35_pmc, "atmel,at91sam9g35-pmc", + at91sam9g35_pmc_setup); + +static void __init at91sam9x25_pmc_setup(struct device_node *np) +{ + at91sam9x5_pmc_setup(np, at91sam9x25_periphck, false); +} +CLK_OF_DECLARE_DRIVER(at91sam9x25_pmc, "atmel,at91sam9x25-pmc", + at91sam9x25_pmc_setup); + +static void __init at91sam9x35_pmc_setup(struct device_node *np) +{ + at91sam9x5_pmc_setup(np, at91sam9x35_periphck, true); +} +CLK_OF_DECLARE_DRIVER(at91sam9x35_pmc, "atmel,at91sam9x35-pmc", + at91sam9x35_pmc_setup); diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c index da7bafcfbe70..36d77146a3bd 100644 --- a/drivers/clk/at91/clk-audio-pll.c +++ b/drivers/clk/at91/clk-audio-pll.c @@ -43,6 +43,8 @@ #include <linux/regmap.h> #include <linux/slab.h> +#include "pmc.h" + #define AUDIO_PLL_DIV_FRAC BIT(22) #define AUDIO_PLL_ND_MAX (AT91_PMC_AUDIO_PLL_ND_MASK >> \ AT91_PMC_AUDIO_PLL_ND_OFFSET) @@ -444,93 +446,94 @@ static const struct clk_ops audio_pll_pmc_ops = { .set_rate = clk_audio_pll_pmc_set_rate, }; -static int of_sama5d2_clk_audio_pll_setup(struct device_node *np, - struct clk_init_data *init, - struct clk_hw *hw, - struct regmap **clk_audio_regmap) -{ - struct regmap *regmap; - const char *parent_names[1]; - int ret; - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return PTR_ERR(regmap); - - init->name = np->name; - of_clk_parent_fill(np, parent_names, 1); - init->parent_names = parent_names; - init->num_parents = 1; - - hw->init = init; - *clk_audio_regmap = regmap; - - ret = clk_hw_register(NULL, hw); - if (ret) - return ret; - - return of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); -} - -static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np) +struct clk_hw * __init +at91_clk_register_audio_pll_frac(struct regmap *regmap, const char *name, + const char *parent_name) { struct clk_audio_frac *frac_ck; struct clk_init_data init = {}; + int ret; frac_ck = kzalloc(sizeof(*frac_ck), GFP_KERNEL); if (!frac_ck) - return; + return ERR_PTR(-ENOMEM); + init.name = name; init.ops = &audio_pll_frac_ops; + init.parent_names = &parent_name; + init.num_parents = 1; init.flags = CLK_SET_RATE_GATE; - if (of_sama5d2_clk_audio_pll_setup(np, &init, &frac_ck->hw, - &frac_ck->regmap)) + frac_ck->hw.init = &init; + frac_ck->regmap = regmap; + + ret = clk_hw_register(NULL, &frac_ck->hw); + if (ret) { kfree(frac_ck); + return ERR_PTR(ret); + } + + return &frac_ck->hw; } -static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np) +struct clk_hw * __init +at91_clk_register_audio_pll_pad(struct regmap *regmap, const char *name, + const char *parent_name) { struct clk_audio_pad *apad_ck; - struct clk_init_data init = {}; + struct clk_init_data init; + int ret; apad_ck = kzalloc(sizeof(*apad_ck), GFP_KERNEL); if (!apad_ck) - return; + return ERR_PTR(-ENOMEM); + init.name = name; init.ops = &audio_pll_pad_ops; + init.parent_names = &parent_name; + init.num_parents = 1; init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | CLK_SET_RATE_PARENT; - if (of_sama5d2_clk_audio_pll_setup(np, &init, &apad_ck->hw, - &apad_ck->regmap)) + apad_ck->hw.init = &init; + apad_ck->regmap = regmap; + + ret = clk_hw_register(NULL, &apad_ck->hw); + if (ret) { kfree(apad_ck); + return ERR_PTR(ret); + } + + return &apad_ck->hw; } -static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np) +struct clk_hw * __init +at91_clk_register_audio_pll_pmc(struct regmap *regmap, const char *name, + const char *parent_name) { - struct clk_audio_pad *apmc_ck; - struct clk_init_data init = {}; + struct clk_audio_pmc *apmc_ck; + struct clk_init_data init; + int ret; apmc_ck = kzalloc(sizeof(*apmc_ck), GFP_KERNEL); if (!apmc_ck) - return; + return ERR_PTR(-ENOMEM); + init.name = name; init.ops = &audio_pll_pmc_ops; + init.parent_names = &parent_name; + init.num_parents = 1; init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | CLK_SET_RATE_PARENT; - if (of_sama5d2_clk_audio_pll_setup(np, &init, &apmc_ck->hw, - &apmc_ck->regmap)) + apmc_ck->hw.init = &init; + apmc_ck->regmap = regmap; + + ret = clk_hw_register(NULL, &apmc_ck->hw); + if (ret) { kfree(apmc_ck); -} + return ERR_PTR(ret); + } -CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_frac_setup, - "atmel,sama5d2-clk-audio-pll-frac", - of_sama5d2_clk_audio_pll_frac_setup); -CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pad_setup, - "atmel,sama5d2-clk-audio-pll-pad", - of_sama5d2_clk_audio_pll_pad_setup); -CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pmc_setup, - "atmel,sama5d2-clk-audio-pll-pmc", - of_sama5d2_clk_audio_pll_pmc_setup); + return &apmc_ck->hw; +} diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index 33481368740e..66e7f7baf958 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c @@ -20,17 +20,8 @@ #include "pmc.h" -#define PERIPHERAL_MAX 64 -#define PERIPHERAL_ID_MIN 2 - -#define GENERATED_SOURCE_MAX 6 #define GENERATED_MAX_DIV 255 -#define GCK_ID_SSC0 43 -#define GCK_ID_SSC1 44 -#define GCK_ID_I2S0 54 -#define GCK_ID_I2S1 55 -#define GCK_ID_CLASSD 59 #define GCK_INDEX_DT_AUDIO_PLL 5 struct clk_generated { @@ -279,10 +270,10 @@ static void clk_generated_startup(struct clk_generated *gck) >> AT91_PMC_PCR_GCKDIV_OFFSET; } -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const char *name, const char **parent_names, - u8 num_parents, u8 id, + u8 num_parents, u8 id, bool pll_audio, const struct clk_range *range) { struct clk_generated *gck; @@ -306,6 +297,7 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, gck->regmap = regmap; gck->lock = lock; gck->range = *range; + gck->audio_pll_allowed = pll_audio; clk_generated_startup(gck); hw = &gck->hw; @@ -319,70 +311,3 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, return hw; } - -static void __init of_sama5d2_clk_generated_setup(struct device_node *np) -{ - int num; - u32 id; - const char *name; - struct clk_hw *hw; - unsigned int num_parents; - const char *parent_names[GENERATED_SOURCE_MAX]; - struct device_node *gcknp; - struct clk_range range = CLK_RANGE(0, 0); - struct regmap *regmap; - struct clk_generated *gck; - - num_parents = of_clk_get_parent_count(np); - if (num_parents == 0 || num_parents > GENERATED_SOURCE_MAX) - return; - - of_clk_parent_fill(np, parent_names, num_parents); - - num = of_get_child_count(np); - if (!num || num > PERIPHERAL_MAX) - return; - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - for_each_child_of_node(np, gcknp) { - if (of_property_read_u32(gcknp, "reg", &id)) - continue; - - if (id < PERIPHERAL_ID_MIN || id >= PERIPHERAL_MAX) - continue; - - if (of_property_read_string(np, "clock-output-names", &name)) - name = gcknp->name; - - of_at91_get_clk_range(gcknp, "atmel,clk-output-range", - &range); - - hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name, - parent_names, num_parents, - id, &range); - - gck = to_clk_generated(hw); - - if (of_device_is_compatible(np, - "atmel,sama5d2-clk-generated")) { - if (gck->id == GCK_ID_SSC0 || gck->id == GCK_ID_SSC1 || - gck->id == GCK_ID_I2S0 || gck->id == GCK_ID_I2S1 || - gck->id == GCK_ID_CLASSD) - gck->audio_pll_allowed = true; - else - gck->audio_pll_allowed = false; - } else { - gck->audio_pll_allowed = false; - } - - if (IS_ERR(hw)) - continue; - - of_clk_add_hw_provider(gcknp, of_clk_hw_simple_get, hw); - } -} -CLK_OF_DECLARE(of_sama5d2_clk_generated_setup, "atmel,sama5d2-clk-generated", - of_sama5d2_clk_generated_setup); diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c index e0daa4a31f88..f0a2c6baab37 100644 --- a/drivers/clk/at91/clk-h32mx.c +++ b/drivers/clk/at91/clk-h32mx.c @@ -86,25 +86,19 @@ static const struct clk_ops h32mx_ops = { .set_rate = clk_sama5d4_h32mx_set_rate, }; -static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np) +struct clk_hw * __init +at91_clk_register_h32mx(struct regmap *regmap, const char *name, + const char *parent_name) { struct clk_sama5d4_h32mx *h32mxclk; struct clk_init_data init; - const char *parent_name; - struct regmap *regmap; int ret; - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - h32mxclk = kzalloc(sizeof(*h32mxclk), GFP_KERNEL); if (!h32mxclk) - return; - - parent_name = of_clk_get_parent_name(np, 0); + return ERR_PTR(-ENOMEM); - init.name = np->name; + init.name = name; init.ops = &h32mx_ops; init.parent_names = parent_name ? &parent_name : NULL; init.num_parents = parent_name ? 1 : 0; @@ -116,10 +110,8 @@ static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np) ret = clk_hw_register(NULL, &h32mxclk->hw); if (ret) { kfree(h32mxclk); - return; + return ERR_PTR(ret); } - of_clk_add_hw_provider(np, of_clk_hw_simple_get, &h32mxclk->hw); + return &h32mxclk->hw; } -CLK_OF_DECLARE(of_sama5d4_clk_h32mx_setup, "atmel,sama5d4-clk-h32mx", - of_sama5d4_clk_h32mx_setup); diff --git a/drivers/clk/at91/clk-i2s-mux.c b/drivers/clk/at91/clk-i2s-mux.c index f0c3c3079f04..fe6ce172b8b0 100644 --- a/drivers/clk/at91/clk-i2s-mux.c +++ b/drivers/clk/at91/clk-i2s-mux.c @@ -14,7 +14,7 @@ #include <soc/at91/atmel-sfr.h> -#define I2S_BUS_NR 2 +#include "pmc.h" struct clk_i2s_mux { struct clk_hw hw; @@ -48,7 +48,7 @@ static const struct clk_ops clk_i2s_mux_ops = { .determine_rate = __clk_mux_determine_rate, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_i2s_mux_register(struct regmap *regmap, const char *name, const char * const *parent_names, unsigned int num_parents, u8 bus_id) @@ -78,39 +78,3 @@ at91_clk_i2s_mux_register(struct regmap *regmap, const char *name, return &i2s_ck->hw; } - -static void __init of_sama5d2_clk_i2s_mux_setup(struct device_node *np) -{ - struct regmap *regmap_sfr; - u8 bus_id; - const char *parent_names[2]; - struct device_node *i2s_mux_np; - struct clk_hw *hw; - int ret; - - regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr"); - if (IS_ERR(regmap_sfr)) - return; - - for_each_child_of_node(np, i2s_mux_np) { - if (of_property_read_u8(i2s_mux_np, "reg", &bus_id)) - continue; - - if (bus_id > I2S_BUS_NR) - continue; - - ret = of_clk_parent_fill(i2s_mux_np, parent_names, 2); - if (ret != 2) - continue; - - hw = at91_clk_i2s_mux_register(regmap_sfr, i2s_mux_np->name, - parent_names, 2, bus_id); - if (IS_ERR(hw)) - continue; - - of_clk_add_hw_provider(i2s_mux_np, of_clk_hw_simple_get, hw); - } -} - -CLK_OF_DECLARE(sama5d2_clk_i2s_mux, "atmel,sama5d2-clk-i2s-mux", - of_sama5d2_clk_i2s_mux_setup); diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c index c813c27f2e58..7ac0facdb28b 100644 --- a/drivers/clk/at91/clk-main.c +++ b/drivers/clk/at91/clk-main.c @@ -12,7 +12,6 @@ #include <linux/clkdev.h> #include <linux/clk/at91_pmc.h> #include <linux/delay.h> -#include <linux/of.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> @@ -128,7 +127,7 @@ static const struct clk_ops main_osc_ops = { .is_prepared = clk_main_osc_is_prepared, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_main_osc(struct regmap *regmap, const char *name, const char *parent_name, @@ -171,31 +170,6 @@ at91_clk_register_main_osc(struct regmap *regmap, return hw; } -static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np) -{ - struct clk_hw *hw; - const char *name = np->name; - const char *parent_name; - struct regmap *regmap; - bool bypass; - - of_property_read_string(np, "clock-output-names", &name); - bypass = of_property_read_bool(np, "atmel,osc-bypass"); - parent_name = of_clk_get_parent_name(np, 0); - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - hw = at91_clk_register_main_osc(regmap, name, parent_name, bypass); - if (IS_ERR(hw)) - return; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); -} -CLK_OF_DECLARE(at91rm9200_clk_main_osc, "atmel,at91rm9200-clk-main-osc", - of_at91rm9200_clk_main_osc_setup); - static bool clk_main_rc_osc_ready(struct regmap *regmap) { unsigned int status; @@ -275,7 +249,7 @@ static const struct clk_ops main_rc_osc_ops = { .recalc_accuracy = clk_main_rc_osc_recalc_accuracy, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_main_rc_osc(struct regmap *regmap, const char *name, u32 frequency, u32 accuracy) @@ -313,32 +287,6 @@ at91_clk_register_main_rc_osc(struct regmap *regmap, return hw; } -static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np) -{ - struct clk_hw *hw; - u32 frequency = 0; - u32 accuracy = 0; - const char *name = np->name; - struct regmap *regmap; - - of_property_read_string(np, "clock-output-names", &name); - of_property_read_u32(np, "clock-frequency", &frequency); - of_property_read_u32(np, "clock-accuracy", &accuracy); - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - hw = at91_clk_register_main_rc_osc(regmap, name, frequency, accuracy); - if (IS_ERR(hw)) - return; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); -} -CLK_OF_DECLARE(at91sam9x5_clk_main_rc_osc, "atmel,at91sam9x5-clk-main-rc-osc", - of_at91sam9x5_clk_main_rc_osc_setup); - - static int clk_main_probe_frequency(struct regmap *regmap) { unsigned long prep_time, timeout; @@ -403,7 +351,7 @@ static const struct clk_ops rm9200_main_ops = { .recalc_rate = clk_rm9200_main_recalc_rate, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_rm9200_main(struct regmap *regmap, const char *name, const char *parent_name) @@ -442,29 +390,6 @@ at91_clk_register_rm9200_main(struct regmap *regmap, return hw; } -static void __init of_at91rm9200_clk_main_setup(struct device_node *np) -{ - struct clk_hw *hw; - const char *parent_name; - const char *name = np->name; - struct regmap *regmap; - - parent_name = of_clk_get_parent_name(np, 0); - of_property_read_string(np, "clock-output-names", &name); - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - hw = at91_clk_register_rm9200_main(regmap, name, parent_name); - if (IS_ERR(hw)) - return; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); -} -CLK_OF_DECLARE(at91rm9200_clk_main, "atmel,at91rm9200-clk-main", - of_at91rm9200_clk_main_setup); - static inline bool clk_sam9x5_main_ready(struct regmap *regmap) { unsigned int status; @@ -541,7 +466,7 @@ static const struct clk_ops sam9x5_main_ops = { .get_parent = clk_sam9x5_main_get_parent, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_sam9x5_main(struct regmap *regmap, const char *name, const char **parent_names, @@ -583,32 +508,3 @@ at91_clk_register_sam9x5_main(struct regmap *regmap, return hw; } - -static void __init of_at91sam9x5_clk_main_setup(struct device_node *np) -{ - struct clk_hw *hw; - const char *parent_names[2]; - unsigned int num_parents; - const char *name = np->name; - struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents == 0 || num_parents > 2) - return; - - of_clk_parent_fill(np, parent_names, num_parents); - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - of_property_read_string(np, "clock-output-names", &name); - - hw = at91_clk_register_sam9x5_main(regmap, name, parent_names, - num_parents); - if (IS_ERR(hw)) - return; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); -} -CLK_OF_DECLARE(at91sam9x5_clk_main, "atmel,at91sam9x5-clk-main", - of_at91sam9x5_clk_main_setup); diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index e9cba9fc26d7..eb53b4a8fab6 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c @@ -17,24 +17,11 @@ #include "pmc.h" -#define MASTER_SOURCE_MAX 4 - #define MASTER_PRES_MASK 0x7 #define MASTER_PRES_MAX MASTER_PRES_MASK #define MASTER_DIV_SHIFT 8 #define MASTER_DIV_MASK 0x3 -struct clk_master_characteristics { - struct clk_range output; - u32 divisors[4]; - u8 have_div3_pres; -}; - -struct clk_master_layout { - u32 mask; - u8 pres_shift; -}; - #define to_clk_master(hw) container_of(hw, struct clk_master, hw) struct clk_master { @@ -120,7 +107,7 @@ static const struct clk_ops master_ops = { .get_parent = clk_master_get_parent, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_master(struct regmap *regmap, const char *name, int num_parents, const char **parent_names, @@ -161,92 +148,12 @@ at91_clk_register_master(struct regmap *regmap, } -static const struct clk_master_layout at91rm9200_master_layout = { +const struct clk_master_layout at91rm9200_master_layout = { .mask = 0x31F, .pres_shift = 2, }; -static const struct clk_master_layout at91sam9x5_master_layout = { +const struct clk_master_layout at91sam9x5_master_layout = { .mask = 0x373, .pres_shift = 4, }; - - -static struct clk_master_characteristics * __init -of_at91_clk_master_get_characteristics(struct device_node *np) -{ - struct clk_master_characteristics *characteristics; - - characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL); - if (!characteristics) - return NULL; - - if (of_at91_get_clk_range(np, "atmel,clk-output-range", &characteristics->output)) - goto out_free_characteristics; - - of_property_read_u32_array(np, "atmel,clk-divisors", - characteristics->divisors, 4); - - characteristics->have_div3_pres = - of_property_read_bool(np, "atmel,master-clk-have-div3-pres"); - - return characteristics; - -out_free_characteristics: - kfree(characteristics); - return NULL; -} - -static void __init -of_at91_clk_master_setup(struct device_node *np, - const struct clk_master_layout *layout) -{ - struct clk_hw *hw; - unsigned int num_parents; - const char *parent_names[MASTER_SOURCE_MAX]; - const char *name = np->name; - struct clk_master_characteristics *characteristics; - struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents == 0 || num_parents > MASTER_SOURCE_MAX) - return; - - of_clk_parent_fill(np, parent_names, num_parents); - - of_property_read_string(np, "clock-output-names", &name); - - characteristics = of_at91_clk_master_get_characteristics(np); - if (!characteristics) - return; - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - hw = at91_clk_register_master(regmap, name, num_parents, - parent_names, layout, - characteristics); - if (IS_ERR(hw)) - goto out_free_characteristics; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); - return; - -out_free_characteristics: - kfree(characteristics); -} - -static void __init of_at91rm9200_clk_master_setup(struct device_node *np) -{ - of_at91_clk_master_setup(np, &at91rm9200_master_layout); -} -CLK_OF_DECLARE(at91rm9200_clk_master, "atmel,at91rm9200-clk-master", - of_at91rm9200_clk_master_setup); - -static void __init of_at91sam9x5_clk_master_setup(struct device_node *np) -{ - of_at91_clk_master_setup(np, &at91sam9x5_master_layout); -} -CLK_OF_DECLARE(at91sam9x5_clk_master, "atmel,at91sam9x5-clk-master", - of_at91sam9x5_clk_master_setup); diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c index 770118369230..65c1defa78e4 100644 --- a/drivers/clk/at91/clk-peripheral.c +++ b/drivers/clk/at91/clk-peripheral.c @@ -19,11 +19,6 @@ DEFINE_SPINLOCK(pmc_pcr_lock); -#define PERIPHERAL_MAX 64 - -#define PERIPHERAL_AT91RM9200 0 -#define PERIPHERAL_AT91SAM9X5 1 - #define PERIPHERAL_ID_MIN 2 #define PERIPHERAL_ID_MAX 31 #define PERIPHERAL_MASK(id) (1 << ((id) & PERIPHERAL_ID_MAX)) @@ -104,7 +99,7 @@ static const struct clk_ops peripheral_ops = { .is_enabled = clk_peripheral_is_enabled, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_peripheral(struct regmap *regmap, const char *name, const char *parent_name, u32 id) { @@ -331,7 +326,7 @@ static const struct clk_ops sam9x5_peripheral_ops = { .set_rate = clk_sam9x5_peripheral_set_rate, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock, const char *name, const char *parent_name, u32 id, const struct clk_range *range) @@ -374,75 +369,3 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock, return hw; } - -static void __init -of_at91_clk_periph_setup(struct device_node *np, u8 type) -{ - int num; - u32 id; - struct clk_hw *hw; - const char *parent_name; - const char *name; - struct device_node *periphclknp; - struct regmap *regmap; - - parent_name = of_clk_get_parent_name(np, 0); - if (!parent_name) - return; - - num = of_get_child_count(np); - if (!num || num > PERIPHERAL_MAX) - return; - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - for_each_child_of_node(np, periphclknp) { - if (of_property_read_u32(periphclknp, "reg", &id)) - continue; - - if (id >= PERIPHERAL_MAX) - continue; - - if (of_property_read_string(np, "clock-output-names", &name)) - name = periphclknp->name; - - if (type == PERIPHERAL_AT91RM9200) { - hw = at91_clk_register_peripheral(regmap, name, - parent_name, id); - } else { - struct clk_range range = CLK_RANGE(0, 0); - - of_at91_get_clk_range(periphclknp, - "atmel,clk-output-range", - &range); - - hw = at91_clk_register_sam9x5_peripheral(regmap, - &pmc_pcr_lock, - name, - parent_name, - id, &range); - } - - if (IS_ERR(hw)) - continue; - - of_clk_add_hw_provider(periphclknp, of_clk_hw_simple_get, hw); - } -} - -static void __init of_at91rm9200_clk_periph_setup(struct device_node *np) -{ - of_at91_clk_periph_setup(np, PERIPHERAL_AT91RM9200); -} -CLK_OF_DECLARE(at91rm9200_clk_periph, "atmel,at91rm9200-clk-peripheral", - of_at91rm9200_clk_periph_setup); - -static void __init of_at91sam9x5_clk_periph_setup(struct device_node *np) -{ - of_at91_clk_periph_setup(np, PERIPHERAL_AT91SAM9X5); -} -CLK_OF_DECLARE(at91sam9x5_clk_periph, "atmel,at91sam9x5-clk-peripheral", - of_at91sam9x5_clk_periph_setup); - diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c index 72b6091eb7b9..b4138fcacf49 100644 --- a/drivers/clk/at91/clk-pll.c +++ b/drivers/clk/at91/clk-pll.c @@ -34,20 +34,6 @@ #define PLL_OUT_SHIFT 14 #define PLL_MAX_ID 1 -struct clk_pll_characteristics { - struct clk_range input; - int num_output; - struct clk_range *output; - u16 *icpll; - u8 *out; -}; - -struct clk_pll_layout { - u32 pllr_mask; - u16 mul_mask; - u8 mul_shift; -}; - #define to_clk_pll(hw) container_of(hw, struct clk_pll, hw) struct clk_pll { @@ -133,6 +119,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, { struct clk_pll *pll = to_clk_pll(hw); + if (!pll->div || !pll->mul) + return 0; + return (parent_rate / pll->div) * (pll->mul + 1); } @@ -285,7 +274,7 @@ static const struct clk_ops pll_ops = { .set_rate = clk_pll_set_rate, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_pll(struct regmap *regmap, const char *name, const char *parent_name, u8 id, const struct clk_pll_layout *layout, @@ -331,189 +320,26 @@ at91_clk_register_pll(struct regmap *regmap, const char *name, } -static const struct clk_pll_layout at91rm9200_pll_layout = { +const struct clk_pll_layout at91rm9200_pll_layout = { .pllr_mask = 0x7FFFFFF, .mul_shift = 16, .mul_mask = 0x7FF, }; -static const struct clk_pll_layout at91sam9g45_pll_layout = { +const struct clk_pll_layout at91sam9g45_pll_layout = { .pllr_mask = 0xFFFFFF, .mul_shift = 16, .mul_mask = 0xFF, }; -static const struct clk_pll_layout at91sam9g20_pllb_layout = { +const struct clk_pll_layout at91sam9g20_pllb_layout = { .pllr_mask = 0x3FFFFF, .mul_shift = 16, .mul_mask = 0x3F, }; -static const struct clk_pll_layout sama5d3_pll_layout = { +const struct clk_pll_layout sama5d3_pll_layout = { .pllr_mask = 0x1FFFFFF, .mul_shift = 18, .mul_mask = 0x7F, }; - - -static struct clk_pll_characteristics * __init -of_at91_clk_pll_get_characteristics(struct device_node *np) -{ - int i; - int offset; - u32 tmp; - int num_output; - u32 num_cells; - struct clk_range input; - struct clk_range *output; - u8 *out = NULL; - u16 *icpll = NULL; - struct clk_pll_characteristics *characteristics; - - if (of_at91_get_clk_range(np, "atmel,clk-input-range", &input)) - return NULL; - - if (of_property_read_u32(np, "#atmel,pll-clk-output-range-cells", - &num_cells)) - return NULL; - - if (num_cells < 2 || num_cells > 4) - return NULL; - - if (!of_get_property(np, "atmel,pll-clk-output-ranges", &tmp)) - return NULL; - num_output = tmp / (sizeof(u32) * num_cells); - - characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL); - if (!characteristics) - return NULL; - - output = kcalloc(num_output, sizeof(*output), GFP_KERNEL); - if (!output) - goto out_free_characteristics; - - if (num_cells > 2) { - out = kcalloc(num_output, sizeof(*out), GFP_KERNEL); - if (!out) - goto out_free_output; - } - - if (num_cells > 3) { - icpll = kcalloc(num_output, sizeof(*icpll), GFP_KERNEL); - if (!icpll) - goto out_free_output; - } - - for (i = 0; i < num_output; i++) { - offset = i * num_cells; - if (of_property_read_u32_index(np, - "atmel,pll-clk-output-ranges", - offset, &tmp)) - goto out_free_output; - output[i].min = tmp; - if (of_property_read_u32_index(np, - "atmel,pll-clk-output-ranges", - offset + 1, &tmp)) - goto out_free_output; - output[i].max = tmp; - - if (num_cells == 2) - continue; - - if (of_property_read_u32_index(np, - "atmel,pll-clk-output-ranges", - offset + 2, &tmp)) - goto out_free_output; - out[i] = tmp; - - if (num_cells == 3) - continue; - - if (of_property_read_u32_index(np, - "atmel,pll-clk-output-ranges", - offset + 3, &tmp)) - goto out_free_output; - icpll[i] = tmp; - } - - characteristics->input = input; - characteristics->num_output = num_output; - characteristics->output = output; - characteristics->out = out; - characteristics->icpll = icpll; - return characteristics; - -out_free_output: - kfree(icpll); - kfree(out); - kfree(output); -out_free_characteristics: - kfree(characteristics); - return NULL; -} - -static void __init -of_at91_clk_pll_setup(struct device_node *np, - const struct clk_pll_layout *layout) -{ - u32 id; - struct clk_hw *hw; - struct regmap *regmap; - const char *parent_name; - const char *name = np->name; - struct clk_pll_characteristics *characteristics; - - if (of_property_read_u32(np, "reg", &id)) - return; - - parent_name = of_clk_get_parent_name(np, 0); - - of_property_read_string(np, "clock-output-names", &name); - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - characteristics = of_at91_clk_pll_get_characteristics(np); - if (!characteristics) - return; - - hw = at91_clk_register_pll(regmap, name, parent_name, id, layout, - characteristics); - if (IS_ERR(hw)) - goto out_free_characteristics; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); - return; - -out_free_characteristics: - kfree(characteristics); -} - -static void __init of_at91rm9200_clk_pll_setup(struct device_node *np) -{ - of_at91_clk_pll_setup(np, &at91rm9200_pll_layout); -} -CLK_OF_DECLARE(at91rm9200_clk_pll, "atmel,at91rm9200-clk-pll", - of_at91rm9200_clk_pll_setup); - -static void __init of_at91sam9g45_clk_pll_setup(struct device_node *np) -{ - of_at91_clk_pll_setup(np, &at91sam9g45_pll_layout); -} -CLK_OF_DECLARE(at91sam9g45_clk_pll, "atmel,at91sam9g45-clk-pll", - of_at91sam9g45_clk_pll_setup); - -static void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np) -{ - of_at91_clk_pll_setup(np, &at91sam9g20_pllb_layout); -} -CLK_OF_DECLARE(at91sam9g20_clk_pllb, "atmel,at91sam9g20-clk-pllb", - of_at91sam9g20_clk_pllb_setup); - -static void __init of_sama5d3_clk_pll_setup(struct device_node *np) -{ - of_at91_clk_pll_setup(np, &sama5d3_pll_layout); -} -CLK_OF_DECLARE(sama5d3_clk_pll, "atmel,sama5d3-clk-pll", - of_sama5d3_clk_pll_setup); diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c index b4afaf22f3fd..e8c4f8b02f28 100644 --- a/drivers/clk/at91/clk-plldiv.c +++ b/drivers/clk/at91/clk-plldiv.c @@ -75,7 +75,7 @@ static const struct clk_ops plldiv_ops = { .set_rate = clk_plldiv_set_rate, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_plldiv(struct regmap *regmap, const char *name, const char *parent_name) { @@ -106,28 +106,3 @@ at91_clk_register_plldiv(struct regmap *regmap, const char *name, return hw; } - -static void __init -of_at91sam9x5_clk_plldiv_setup(struct device_node *np) -{ - struct clk_hw *hw; - const char *parent_name; - const char *name = np->name; - struct regmap *regmap; - - parent_name = of_clk_get_parent_name(np, 0); - - of_property_read_string(np, "clock-output-names", &name); - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - hw = at91_clk_register_plldiv(regmap, name, parent_name); - if (IS_ERR(hw)) - return; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); -} -CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv", - of_at91sam9x5_clk_plldiv_setup); diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c index 0e6aab1252fc..5bc68b9c5498 100644 --- a/drivers/clk/at91/clk-programmable.c +++ b/drivers/clk/at91/clk-programmable.c @@ -17,7 +17,6 @@ #include "pmc.h" -#define PROG_SOURCE_MAX 5 #define PROG_ID_MAX 7 #define PROG_STATUS_MASK(id) (1 << ((id) + 8)) @@ -25,12 +24,6 @@ #define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK) #define PROG_MAX_RM9200_CSS 3 -struct clk_programmable_layout { - u8 pres_shift; - u8 css_mask; - u8 have_slck_mck; -}; - struct clk_programmable { struct clk_hw hw; struct regmap *regmap; @@ -170,7 +163,7 @@ static const struct clk_ops programmable_ops = { .set_rate = clk_programmable_set_rate, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_programmable(struct regmap *regmap, const char *name, const char **parent_names, u8 num_parents, u8 id, @@ -211,86 +204,20 @@ at91_clk_register_programmable(struct regmap *regmap, return hw; } -static const struct clk_programmable_layout at91rm9200_programmable_layout = { +const struct clk_programmable_layout at91rm9200_programmable_layout = { .pres_shift = 2, .css_mask = 0x3, .have_slck_mck = 0, }; -static const struct clk_programmable_layout at91sam9g45_programmable_layout = { +const struct clk_programmable_layout at91sam9g45_programmable_layout = { .pres_shift = 2, .css_mask = 0x3, .have_slck_mck = 1, }; -static const struct clk_programmable_layout at91sam9x5_programmable_layout = { +const struct clk_programmable_layout at91sam9x5_programmable_layout = { .pres_shift = 4, .css_mask = 0x7, .have_slck_mck = 0, }; - -static void __init -of_at91_clk_prog_setup(struct device_node *np, - const struct clk_programmable_layout *layout) -{ - int num; - u32 id; - struct clk_hw *hw; - unsigned int num_parents; - const char *parent_names[PROG_SOURCE_MAX]; - const char *name; - struct device_node *progclknp; - struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents == 0 || num_parents > PROG_SOURCE_MAX) - return; - - of_clk_parent_fill(np, parent_names, num_parents); - - num = of_get_child_count(np); - if (!num || num > (PROG_ID_MAX + 1)) - return; - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - for_each_child_of_node(np, progclknp) { - if (of_property_read_u32(progclknp, "reg", &id)) - continue; - - if (of_property_read_string(np, "clock-output-names", &name)) - name = progclknp->name; - - hw = at91_clk_register_programmable(regmap, name, - parent_names, num_parents, - id, layout); - if (IS_ERR(hw)) - continue; - - of_clk_add_hw_provider(progclknp, of_clk_hw_simple_get, hw); - } -} - - -static void __init of_at91rm9200_clk_prog_setup(struct device_node *np) -{ - of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout); -} -CLK_OF_DECLARE(at91rm9200_clk_prog, "atmel,at91rm9200-clk-programmable", - of_at91rm9200_clk_prog_setup); - -static void __init of_at91sam9g45_clk_prog_setup(struct device_node *np) -{ - of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout); -} -CLK_OF_DECLARE(at91sam9g45_clk_prog, "atmel,at91sam9g45-clk-programmable", - of_at91sam9g45_clk_prog_setup); - -static void __init of_at91sam9x5_clk_prog_setup(struct device_node *np) -{ - of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout); -} -CLK_OF_DECLARE(at91sam9x5_clk_prog, "atmel,at91sam9x5-clk-programmable", - of_at91sam9x5_clk_prog_setup); diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c index 560a8b9abf93..cbb146912f7a 100644 --- a/drivers/clk/at91/clk-slow.c +++ b/drivers/clk/at91/clk-slow.c @@ -40,7 +40,7 @@ static const struct clk_ops sam9260_slow_ops = { .get_parent = clk_sam9260_slow_get_parent, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_sam9260_slow(struct regmap *regmap, const char *name, const char **parent_names, @@ -79,33 +79,3 @@ at91_clk_register_sam9260_slow(struct regmap *regmap, return hw; } - -static void __init of_at91sam9260_clk_slow_setup(struct device_node *np) -{ - struct clk_hw *hw; - const char *parent_names[2]; - unsigned int num_parents; - const char *name = np->name; - struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents != 2) - return; - - of_clk_parent_fill(np, parent_names, num_parents); - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - of_property_read_string(np, "clock-output-names", &name); - - hw = at91_clk_register_sam9260_slow(regmap, name, parent_names, - num_parents); - if (IS_ERR(hw)) - return; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); -} - -CLK_OF_DECLARE(at91sam9260_clk_slow, "atmel,at91sam9260-clk-slow", - of_at91sam9260_clk_slow_setup); diff --git a/drivers/clk/at91/clk-smd.c b/drivers/clk/at91/clk-smd.c index 965c662b90a5..75679fd8a9c7 100644 --- a/drivers/clk/at91/clk-smd.c +++ b/drivers/clk/at91/clk-smd.c @@ -17,8 +17,6 @@ #include "pmc.h" -#define SMD_SOURCE_MAX 2 - #define SMD_DIV_SHIFT 8 #define SMD_MAX_DIV 0xf @@ -111,7 +109,7 @@ static const struct clk_ops at91sam9x5_smd_ops = { .set_rate = at91sam9x5_clk_smd_set_rate, }; -static struct clk_hw * __init +struct clk_hw * __init at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name, const char **parent_names, u8 num_parents) { @@ -142,33 +140,3 @@ at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name, return hw; } - -static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np) -{ - struct clk_hw *hw; - unsigned int num_parents; - const char *parent_names[SMD_SOURCE_MAX]; - const char *name = np->name; - struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents == 0 || num_parents > SMD_SOURCE_MAX) - return; - - of_clk_parent_fill(np, parent_names, num_parents); - - of_property_read_string(np, "clock-output-names", &name); - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - hw = at91sam9x5_clk_register_smd(regmap, name, parent_names, - num_parents); - if (IS_ERR(hw)) - return; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); -} -CLK_OF_DECLARE(at91sam9x5_clk_smd, "atmel,at91sam9x5-clk-smd", - of_at91sam9x5_clk_smd_setup); diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c index 86a36809765d..47bfca933403 100644 --- a/drivers/clk/at91/clk-system.c +++ b/drivers/clk/at91/clk-system.c @@ -88,7 +88,7 @@ static const struct clk_ops system_ops = { .is_prepared = clk_system_is_prepared, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_system(struct regmap *regmap, const char *name, const char *parent_name, u8 id) { @@ -123,40 +123,3 @@ at91_clk_register_system(struct regmap *regmap, const char *name, return hw; } - -static void __init of_at91rm9200_clk_sys_setup(struct device_node *np) -{ - int num; - u32 id; - struct clk_hw *hw; - const char *name; - struct device_node *sysclknp; - const char *parent_name; - struct regmap *regmap; - - num = of_get_child_count(np); - if (num > (SYSTEM_MAX_ID + 1)) - return; - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - for_each_child_of_node(np, sysclknp) { - if (of_property_read_u32(sysclknp, "reg", &id)) - continue; - - if (of_property_read_string(np, "clock-output-names", &name)) - name = sysclknp->name; - - parent_name = of_clk_get_parent_name(sysclknp, 0); - - hw = at91_clk_register_system(regmap, name, parent_name, id); - if (IS_ERR(hw)) - continue; - - of_clk_add_hw_provider(sysclknp, of_clk_hw_simple_get, hw); - } -} -CLK_OF_DECLARE(at91rm9200_clk_sys, "atmel,at91rm9200-clk-system", - of_at91rm9200_clk_sys_setup); diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c index 791770a563fc..79ee1c760f2a 100644 --- a/drivers/clk/at91/clk-usb.c +++ b/drivers/clk/at91/clk-usb.c @@ -17,8 +17,6 @@ #include "pmc.h" -#define USB_SOURCE_MAX 2 - #define SAM9X5_USB_DIV_SHIFT 8 #define SAM9X5_USB_MAX_DIV 0xf @@ -192,7 +190,7 @@ static const struct clk_ops at91sam9n12_usb_ops = { .set_rate = at91sam9x5_clk_usb_set_rate, }; -static struct clk_hw * __init +struct clk_hw * __init at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name, const char **parent_names, u8 num_parents) { @@ -225,7 +223,7 @@ at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name, return hw; } -static struct clk_hw * __init +struct clk_hw * __init at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name, const char *parent_name) { @@ -342,7 +340,7 @@ static const struct clk_ops at91rm9200_usb_ops = { .set_rate = at91rm9200_clk_usb_set_rate, }; -static struct clk_hw * __init +struct clk_hw * __init at91rm9200_clk_register_usb(struct regmap *regmap, const char *name, const char *parent_name, const u32 *divisors) { @@ -374,89 +372,3 @@ at91rm9200_clk_register_usb(struct regmap *regmap, const char *name, return hw; } - -static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np) -{ - struct clk_hw *hw; - unsigned int num_parents; - const char *parent_names[USB_SOURCE_MAX]; - const char *name = np->name; - struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents == 0 || num_parents > USB_SOURCE_MAX) - return; - - of_clk_parent_fill(np, parent_names, num_parents); - - of_property_read_string(np, "clock-output-names", &name); - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - hw = at91sam9x5_clk_register_usb(regmap, name, parent_names, - num_parents); - if (IS_ERR(hw)) - return; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); -} -CLK_OF_DECLARE(at91sam9x5_clk_usb, "atmel,at91sam9x5-clk-usb", - of_at91sam9x5_clk_usb_setup); - -static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np) -{ - struct clk_hw *hw; - const char *parent_name; - const char *name = np->name; - struct regmap *regmap; - - parent_name = of_clk_get_parent_name(np, 0); - if (!parent_name) - return; - - of_property_read_string(np, "clock-output-names", &name); - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - - hw = at91sam9n12_clk_register_usb(regmap, name, parent_name); - if (IS_ERR(hw)) - return; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); -} -CLK_OF_DECLARE(at91sam9n12_clk_usb, "atmel,at91sam9n12-clk-usb", - of_at91sam9n12_clk_usb_setup); - -static void __init of_at91rm9200_clk_usb_setup(struct device_node *np) -{ - struct clk_hw *hw; - const char *parent_name; - const char *name = np->name; - u32 divisors[4] = {0, 0, 0, 0}; - struct regmap *regmap; - - parent_name = of_clk_get_parent_name(np, 0); - if (!parent_name) - return; - - of_property_read_u32_array(np, "atmel,clk-divisors", divisors, 4); - if (!divisors[0]) - return; - - of_property_read_string(np, "clock-output-names", &name); - - regmap = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap)) - return; - hw = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors); - if (IS_ERR(hw)) - return; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); -} -CLK_OF_DECLARE(at91rm9200_clk_usb, "atmel,at91rm9200-clk-usb", - of_at91rm9200_clk_usb_setup); diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c index cd8d689138ff..9a970abf3489 100644 --- a/drivers/clk/at91/clk-utmi.c +++ b/drivers/clk/at91/clk-utmi.c @@ -125,7 +125,7 @@ static const struct clk_ops utmi_ops = { .recalc_rate = clk_utmi_recalc_rate, }; -static struct clk_hw * __init +struct clk_hw * __init at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, const char *name, const char *parent_name) { @@ -157,46 +157,3 @@ at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, return hw; } - -static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np) -{ - struct clk_hw *hw; - const char *parent_name; - const char *name = np->name; - struct regmap *regmap_pmc, *regmap_sfr; - - parent_name = of_clk_get_parent_name(np, 0); - - of_property_read_string(np, "clock-output-names", &name); - - regmap_pmc = syscon_node_to_regmap(of_get_parent(np)); - if (IS_ERR(regmap_pmc)) - return; - - /* - * If the device supports different mainck rates, this value has to be - * set in the UTMI Clock Trimming register. - * - 9x5: mainck supports several rates but it is indicated that a - * 12 MHz is needed in case of USB. - * - sama5d3 and sama5d2: mainck supports several rates. Configuring - * the FREQ field of the UTMI Clock Trimming register is mandatory. - * - sama5d4: mainck is at 12 MHz. - * - * We only need to retrieve sama5d3 or sama5d2 sfr regmap. - */ - regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d3-sfr"); - if (IS_ERR(regmap_sfr)) { - regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr"); - if (IS_ERR(regmap_sfr)) - regmap_sfr = NULL; - } - - hw = at91_clk_register_utmi(regmap_pmc, regmap_sfr, name, parent_name); - if (IS_ERR(hw)) - return; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); - return; -} -CLK_OF_DECLARE(at91sam9x5_clk_utmi, "atmel,at91sam9x5-clk-utmi", - of_at91sam9x5_clk_utmi_setup); diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c new file mode 100644 index 000000000000..b95bb4e2a927 --- /dev/null +++ b/drivers/clk/at91/dt-compat.c @@ -0,0 +1,961 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/clk-provider.h> +#include <linux/clk/at91_pmc.h> +#include <linux/of.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#include "pmc.h" + +#define MASTER_SOURCE_MAX 4 + +#define PERIPHERAL_AT91RM9200 0 +#define PERIPHERAL_AT91SAM9X5 1 + +#define PERIPHERAL_MAX 64 + +#define PERIPHERAL_ID_MIN 2 + +#define PROG_SOURCE_MAX 5 +#define PROG_ID_MAX 7 + +#define SYSTEM_MAX_ID 31 + +#ifdef CONFIG_HAVE_AT91_AUDIO_PLL +static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np) +{ + struct clk_hw *hw; + const char *name = np->name; + const char *parent_name; + struct regmap *regmap; + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + parent_name = of_clk_get_parent_name(np, 0); + + hw = at91_clk_register_audio_pll_frac(regmap, name, parent_name); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_frac_setup, + "atmel,sama5d2-clk-audio-pll-frac", + of_sama5d2_clk_audio_pll_frac_setup); + +static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np) +{ + struct clk_hw *hw; + const char *name = np->name; + const char *parent_name; + struct regmap *regmap; + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + parent_name = of_clk_get_parent_name(np, 0); + + hw = at91_clk_register_audio_pll_pad(regmap, name, parent_name); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pad_setup, + "atmel,sama5d2-clk-audio-pll-pad", + of_sama5d2_clk_audio_pll_pad_setup); + +static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np) +{ + struct clk_hw *hw; + const char *name = np->name; + const char *parent_name; + struct regmap *regmap; + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + parent_name = of_clk_get_parent_name(np, 0); + + hw = at91_clk_register_audio_pll_pmc(regmap, name, parent_name); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pmc_setup, + "atmel,sama5d2-clk-audio-pll-pmc", + of_sama5d2_clk_audio_pll_pmc_setup); +#endif /* CONFIG_HAVE_AT91_AUDIO_PLL */ + +#ifdef CONFIG_HAVE_AT91_GENERATED_CLK +#define GENERATED_SOURCE_MAX 6 + +#define GCK_ID_I2S0 54 +#define GCK_ID_I2S1 55 +#define GCK_ID_CLASSD 59 + +static void __init of_sama5d2_clk_generated_setup(struct device_node *np) +{ + int num; + u32 id; + const char *name; + struct clk_hw *hw; + unsigned int num_parents; + const char *parent_names[GENERATED_SOURCE_MAX]; + struct device_node *gcknp; + struct clk_range range = CLK_RANGE(0, 0); + struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents == 0 || num_parents > GENERATED_SOURCE_MAX) + return; + + of_clk_parent_fill(np, parent_names, num_parents); + + num = of_get_child_count(np); + if (!num || num > PERIPHERAL_MAX) + return; + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + for_each_child_of_node(np, gcknp) { + bool pll_audio = false; + + if (of_property_read_u32(gcknp, "reg", &id)) + continue; + + if (id < PERIPHERAL_ID_MIN || id >= PERIPHERAL_MAX) + continue; + + if (of_property_read_string(np, "clock-output-names", &name)) + name = gcknp->name; + + of_at91_get_clk_range(gcknp, "atmel,clk-output-range", + &range); + + if (of_device_is_compatible(np, "atmel,sama5d2-clk-generated") && + (id == GCK_ID_I2S0 || id == GCK_ID_I2S1 || + id == GCK_ID_CLASSD)) + pll_audio = true; + + hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name, + parent_names, num_parents, + id, pll_audio, &range); + if (IS_ERR(hw)) + continue; + + of_clk_add_hw_provider(gcknp, of_clk_hw_simple_get, hw); + } +} +CLK_OF_DECLARE(of_sama5d2_clk_generated_setup, "atmel,sama5d2-clk-generated", + of_sama5d2_clk_generated_setup); +#endif /* CONFIG_HAVE_AT91_GENERATED_CLK */ + +#ifdef CONFIG_HAVE_AT91_H32MX +static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np) +{ + struct clk_hw *hw; + const char *name = np->name; + const char *parent_name; + struct regmap *regmap; + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + parent_name = of_clk_get_parent_name(np, 0); + + hw = at91_clk_register_h32mx(regmap, name, parent_name); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(of_sama5d4_clk_h32mx_setup, "atmel,sama5d4-clk-h32mx", + of_sama5d4_clk_h32mx_setup); +#endif /* CONFIG_HAVE_AT91_H32MX */ + +#ifdef CONFIG_HAVE_AT91_I2S_MUX_CLK +#define I2S_BUS_NR 2 + +static void __init of_sama5d2_clk_i2s_mux_setup(struct device_node *np) +{ + struct regmap *regmap_sfr; + u8 bus_id; + const char *parent_names[2]; + struct device_node *i2s_mux_np; + struct clk_hw *hw; + int ret; + + regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr"); + if (IS_ERR(regmap_sfr)) + return; + + for_each_child_of_node(np, i2s_mux_np) { + if (of_property_read_u8(i2s_mux_np, "reg", &bus_id)) + continue; + + if (bus_id > I2S_BUS_NR) + continue; + + ret = of_clk_parent_fill(i2s_mux_np, parent_names, 2); + if (ret != 2) + continue; + + hw = at91_clk_i2s_mux_register(regmap_sfr, i2s_mux_np->name, + parent_names, 2, bus_id); + if (IS_ERR(hw)) + continue; + + of_clk_add_hw_provider(i2s_mux_np, of_clk_hw_simple_get, hw); + } +} +CLK_OF_DECLARE(sama5d2_clk_i2s_mux, "atmel,sama5d2-clk-i2s-mux", + of_sama5d2_clk_i2s_mux_setup); +#endif /* CONFIG_HAVE_AT91_I2S_MUX_CLK */ + +static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np) +{ + struct clk_hw *hw; + const char *name = np->name; + const char *parent_name; + struct regmap *regmap; + bool bypass; + + of_property_read_string(np, "clock-output-names", &name); + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + parent_name = of_clk_get_parent_name(np, 0); + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + hw = at91_clk_register_main_osc(regmap, name, parent_name, bypass); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(at91rm9200_clk_main_osc, "atmel,at91rm9200-clk-main-osc", + of_at91rm9200_clk_main_osc_setup); + +static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np) +{ + struct clk_hw *hw; + u32 frequency = 0; + u32 accuracy = 0; + const char *name = np->name; + struct regmap *regmap; + + of_property_read_string(np, "clock-output-names", &name); + of_property_read_u32(np, "clock-frequency", &frequency); + of_property_read_u32(np, "clock-accuracy", &accuracy); + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + hw = at91_clk_register_main_rc_osc(regmap, name, frequency, accuracy); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(at91sam9x5_clk_main_rc_osc, "atmel,at91sam9x5-clk-main-rc-osc", + of_at91sam9x5_clk_main_rc_osc_setup); + +static void __init of_at91rm9200_clk_main_setup(struct device_node *np) +{ + struct clk_hw *hw; + const char *parent_name; + const char *name = np->name; + struct regmap *regmap; + + parent_name = of_clk_get_parent_name(np, 0); + of_property_read_string(np, "clock-output-names", &name); + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + hw = at91_clk_register_rm9200_main(regmap, name, parent_name); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(at91rm9200_clk_main, "atmel,at91rm9200-clk-main", + of_at91rm9200_clk_main_setup); + +static void __init of_at91sam9x5_clk_main_setup(struct device_node *np) +{ + struct clk_hw *hw; + const char *parent_names[2]; + unsigned int num_parents; + const char *name = np->name; + struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents == 0 || num_parents > 2) + return; + + of_clk_parent_fill(np, parent_names, num_parents); + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + of_property_read_string(np, "clock-output-names", &name); + + hw = at91_clk_register_sam9x5_main(regmap, name, parent_names, + num_parents); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(at91sam9x5_clk_main, "atmel,at91sam9x5-clk-main", + of_at91sam9x5_clk_main_setup); + +static struct clk_master_characteristics * __init +of_at91_clk_master_get_characteristics(struct device_node *np) +{ + struct clk_master_characteristics *characteristics; + + characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL); + if (!characteristics) + return NULL; + + if (of_at91_get_clk_range(np, "atmel,clk-output-range", &characteristics->output)) + goto out_free_characteristics; + + of_property_read_u32_array(np, "atmel,clk-divisors", + characteristics->divisors, 4); + + characteristics->have_div3_pres = + of_property_read_bool(np, "atmel,master-clk-have-div3-pres"); + + return characteristics; + +out_free_characteristics: + kfree(characteristics); + return NULL; +} + +static void __init +of_at91_clk_master_setup(struct device_node *np, + const struct clk_master_layout *layout) +{ + struct clk_hw *hw; + unsigned int num_parents; + const char *parent_names[MASTER_SOURCE_MAX]; + const char *name = np->name; + struct clk_master_characteristics *characteristics; + struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents == 0 || num_parents > MASTER_SOURCE_MAX) + return; + + of_clk_parent_fill(np, parent_names, num_parents); + + of_property_read_string(np, "clock-output-names", &name); + + characteristics = of_at91_clk_master_get_characteristics(np); + if (!characteristics) + return; + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + hw = at91_clk_register_master(regmap, name, num_parents, + parent_names, layout, + characteristics); + if (IS_ERR(hw)) + goto out_free_characteristics; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); + return; + +out_free_characteristics: + kfree(characteristics); +} + +static void __init of_at91rm9200_clk_master_setup(struct device_node *np) +{ + of_at91_clk_master_setup(np, &at91rm9200_master_layout); +} +CLK_OF_DECLARE(at91rm9200_clk_master, "atmel,at91rm9200-clk-master", + of_at91rm9200_clk_master_setup); + +static void __init of_at91sam9x5_clk_master_setup(struct device_node *np) +{ + of_at91_clk_master_setup(np, &at91sam9x5_master_layout); +} +CLK_OF_DECLARE(at91sam9x5_clk_master, "atmel,at91sam9x5-clk-master", + of_at91sam9x5_clk_master_setup); + +static void __init +of_at91_clk_periph_setup(struct device_node *np, u8 type) +{ + int num; + u32 id; + struct clk_hw *hw; + const char *parent_name; + const char *name; + struct device_node *periphclknp; + struct regmap *regmap; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) + return; + + num = of_get_child_count(np); + if (!num || num > PERIPHERAL_MAX) + return; + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + for_each_child_of_node(np, periphclknp) { + if (of_property_read_u32(periphclknp, "reg", &id)) + continue; + + if (id >= PERIPHERAL_MAX) + continue; + + if (of_property_read_string(np, "clock-output-names", &name)) + name = periphclknp->name; + + if (type == PERIPHERAL_AT91RM9200) { + hw = at91_clk_register_peripheral(regmap, name, + parent_name, id); + } else { + struct clk_range range = CLK_RANGE(0, 0); + + of_at91_get_clk_range(periphclknp, + "atmel,clk-output-range", + &range); + + hw = at91_clk_register_sam9x5_peripheral(regmap, + &pmc_pcr_lock, + name, + parent_name, + id, &range); + } + + if (IS_ERR(hw)) + continue; + + of_clk_add_hw_provider(periphclknp, of_clk_hw_simple_get, hw); + } +} + +static void __init of_at91rm9200_clk_periph_setup(struct device_node *np) +{ + of_at91_clk_periph_setup(np, PERIPHERAL_AT91RM9200); +} +CLK_OF_DECLARE(at91rm9200_clk_periph, "atmel,at91rm9200-clk-peripheral", + of_at91rm9200_clk_periph_setup); + +static void __init of_at91sam9x5_clk_periph_setup(struct device_node *np) +{ + of_at91_clk_periph_setup(np, PERIPHERAL_AT91SAM9X5); +} +CLK_OF_DECLARE(at91sam9x5_clk_periph, "atmel,at91sam9x5-clk-peripheral", + of_at91sam9x5_clk_periph_setup); + +static struct clk_pll_characteristics * __init +of_at91_clk_pll_get_characteristics(struct device_node *np) +{ + int i; + int offset; + u32 tmp; + int num_output; + u32 num_cells; + struct clk_range input; + struct clk_range *output; + u8 *out = NULL; + u16 *icpll = NULL; + struct clk_pll_characteristics *characteristics; + + if (of_at91_get_clk_range(np, "atmel,clk-input-range", &input)) + return NULL; + + if (of_property_read_u32(np, "#atmel,pll-clk-output-range-cells", + &num_cells)) + return NULL; + + if (num_cells < 2 || num_cells > 4) + return NULL; + + if (!of_get_property(np, "atmel,pll-clk-output-ranges", &tmp)) + return NULL; + num_output = tmp / (sizeof(u32) * num_cells); + + characteristics = kzalloc(sizeof(*characteristics), GFP_KERNEL); + if (!characteristics) + return NULL; + + output = kcalloc(num_output, sizeof(*output), GFP_KERNEL); + if (!output) + goto out_free_characteristics; + + if (num_cells > 2) { + out = kcalloc(num_output, sizeof(*out), GFP_KERNEL); + if (!out) + goto out_free_output; + } + + if (num_cells > 3) { + icpll = kcalloc(num_output, sizeof(*icpll), GFP_KERNEL); + if (!icpll) + goto out_free_output; + } + + for (i = 0; i < num_output; i++) { + offset = i * num_cells; + if (of_property_read_u32_index(np, + "atmel,pll-clk-output-ranges", + offset, &tmp)) + goto out_free_output; + output[i].min = tmp; + if (of_property_read_u32_index(np, + "atmel,pll-clk-output-ranges", + offset + 1, &tmp)) + goto out_free_output; + output[i].max = tmp; + + if (num_cells == 2) + continue; + + if (of_property_read_u32_index(np, + "atmel,pll-clk-output-ranges", + offset + 2, &tmp)) + goto out_free_output; + out[i] = tmp; + + if (num_cells == 3) + continue; + + if (of_property_read_u32_index(np, + "atmel,pll-clk-output-ranges", + offset + 3, &tmp)) + goto out_free_output; + icpll[i] = tmp; + } + + characteristics->input = input; + characteristics->num_output = num_output; + characteristics->output = output; + characteristics->out = out; + characteristics->icpll = icpll; + return characteristics; + +out_free_output: + kfree(icpll); + kfree(out); + kfree(output); +out_free_characteristics: + kfree(characteristics); + return NULL; +} + +static void __init +of_at91_clk_pll_setup(struct device_node *np, + const struct clk_pll_layout *layout) +{ + u32 id; + struct clk_hw *hw; + struct regmap *regmap; + const char *parent_name; + const char *name = np->name; + struct clk_pll_characteristics *characteristics; + + if (of_property_read_u32(np, "reg", &id)) + return; + + parent_name = of_clk_get_parent_name(np, 0); + + of_property_read_string(np, "clock-output-names", &name); + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + characteristics = of_at91_clk_pll_get_characteristics(np); + if (!characteristics) + return; + + hw = at91_clk_register_pll(regmap, name, parent_name, id, layout, + characteristics); + if (IS_ERR(hw)) + goto out_free_characteristics; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); + return; + +out_free_characteristics: + kfree(characteristics); +} + +static void __init of_at91rm9200_clk_pll_setup(struct device_node *np) +{ + of_at91_clk_pll_setup(np, &at91rm9200_pll_layout); +} +CLK_OF_DECLARE(at91rm9200_clk_pll, "atmel,at91rm9200-clk-pll", + of_at91rm9200_clk_pll_setup); + +static void __init of_at91sam9g45_clk_pll_setup(struct device_node *np) +{ + of_at91_clk_pll_setup(np, &at91sam9g45_pll_layout); +} +CLK_OF_DECLARE(at91sam9g45_clk_pll, "atmel,at91sam9g45-clk-pll", + of_at91sam9g45_clk_pll_setup); + +static void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np) +{ + of_at91_clk_pll_setup(np, &at91sam9g20_pllb_layout); +} +CLK_OF_DECLARE(at91sam9g20_clk_pllb, "atmel,at91sam9g20-clk-pllb", + of_at91sam9g20_clk_pllb_setup); + +static void __init of_sama5d3_clk_pll_setup(struct device_node *np) +{ + of_at91_clk_pll_setup(np, &sama5d3_pll_layout); +} +CLK_OF_DECLARE(sama5d3_clk_pll, "atmel,sama5d3-clk-pll", + of_sama5d3_clk_pll_setup); + +static void __init +of_at91sam9x5_clk_plldiv_setup(struct device_node *np) +{ + struct clk_hw *hw; + const char *parent_name; + const char *name = np->name; + struct regmap *regmap; + + parent_name = of_clk_get_parent_name(np, 0); + + of_property_read_string(np, "clock-output-names", &name); + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + hw = at91_clk_register_plldiv(regmap, name, parent_name); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv", + of_at91sam9x5_clk_plldiv_setup); + +static void __init +of_at91_clk_prog_setup(struct device_node *np, + const struct clk_programmable_layout *layout) +{ + int num; + u32 id; + struct clk_hw *hw; + unsigned int num_parents; + const char *parent_names[PROG_SOURCE_MAX]; + const char *name; + struct device_node *progclknp; + struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents == 0 || num_parents > PROG_SOURCE_MAX) + return; + + of_clk_parent_fill(np, parent_names, num_parents); + + num = of_get_child_count(np); + if (!num || num > (PROG_ID_MAX + 1)) + return; + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + for_each_child_of_node(np, progclknp) { + if (of_property_read_u32(progclknp, "reg", &id)) + continue; + + if (of_property_read_string(np, "clock-output-names", &name)) + name = progclknp->name; + + hw = at91_clk_register_programmable(regmap, name, + parent_names, num_parents, + id, layout); + if (IS_ERR(hw)) + continue; + + of_clk_add_hw_provider(progclknp, of_clk_hw_simple_get, hw); + } +} + +static void __init of_at91rm9200_clk_prog_setup(struct device_node *np) +{ + of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout); +} +CLK_OF_DECLARE(at91rm9200_clk_prog, "atmel,at91rm9200-clk-programmable", + of_at91rm9200_clk_prog_setup); + +static void __init of_at91sam9g45_clk_prog_setup(struct device_node *np) +{ + of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout); +} +CLK_OF_DECLARE(at91sam9g45_clk_prog, "atmel,at91sam9g45-clk-programmable", + of_at91sam9g45_clk_prog_setup); + +static void __init of_at91sam9x5_clk_prog_setup(struct device_node *np) +{ + of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout); +} +CLK_OF_DECLARE(at91sam9x5_clk_prog, "atmel,at91sam9x5-clk-programmable", + of_at91sam9x5_clk_prog_setup); + +static void __init of_at91sam9260_clk_slow_setup(struct device_node *np) +{ + struct clk_hw *hw; + const char *parent_names[2]; + unsigned int num_parents; + const char *name = np->name; + struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents != 2) + return; + + of_clk_parent_fill(np, parent_names, num_parents); + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + of_property_read_string(np, "clock-output-names", &name); + + hw = at91_clk_register_sam9260_slow(regmap, name, parent_names, + num_parents); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(at91sam9260_clk_slow, "atmel,at91sam9260-clk-slow", + of_at91sam9260_clk_slow_setup); + +#ifdef CONFIG_HAVE_AT91_SMD +#define SMD_SOURCE_MAX 2 + +static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np) +{ + struct clk_hw *hw; + unsigned int num_parents; + const char *parent_names[SMD_SOURCE_MAX]; + const char *name = np->name; + struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents == 0 || num_parents > SMD_SOURCE_MAX) + return; + + of_clk_parent_fill(np, parent_names, num_parents); + + of_property_read_string(np, "clock-output-names", &name); + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + hw = at91sam9x5_clk_register_smd(regmap, name, parent_names, + num_parents); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(at91sam9x5_clk_smd, "atmel,at91sam9x5-clk-smd", + of_at91sam9x5_clk_smd_setup); +#endif /* CONFIG_HAVE_AT91_SMD */ + +static void __init of_at91rm9200_clk_sys_setup(struct device_node *np) +{ + int num; + u32 id; + struct clk_hw *hw; + const char *name; + struct device_node *sysclknp; + const char *parent_name; + struct regmap *regmap; + + num = of_get_child_count(np); + if (num > (SYSTEM_MAX_ID + 1)) + return; + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + for_each_child_of_node(np, sysclknp) { + if (of_property_read_u32(sysclknp, "reg", &id)) + continue; + + if (of_property_read_string(np, "clock-output-names", &name)) + name = sysclknp->name; + + parent_name = of_clk_get_parent_name(sysclknp, 0); + + hw = at91_clk_register_system(regmap, name, parent_name, id); + if (IS_ERR(hw)) + continue; + + of_clk_add_hw_provider(sysclknp, of_clk_hw_simple_get, hw); + } +} +CLK_OF_DECLARE(at91rm9200_clk_sys, "atmel,at91rm9200-clk-system", + of_at91rm9200_clk_sys_setup); + +#ifdef CONFIG_HAVE_AT91_USB_CLK +#define USB_SOURCE_MAX 2 + +static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np) +{ + struct clk_hw *hw; + unsigned int num_parents; + const char *parent_names[USB_SOURCE_MAX]; + const char *name = np->name; + struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents == 0 || num_parents > USB_SOURCE_MAX) + return; + + of_clk_parent_fill(np, parent_names, num_parents); + + of_property_read_string(np, "clock-output-names", &name); + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + hw = at91sam9x5_clk_register_usb(regmap, name, parent_names, + num_parents); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(at91sam9x5_clk_usb, "atmel,at91sam9x5-clk-usb", + of_at91sam9x5_clk_usb_setup); + +static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np) +{ + struct clk_hw *hw; + const char *parent_name; + const char *name = np->name; + struct regmap *regmap; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) + return; + + of_property_read_string(np, "clock-output-names", &name); + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + + hw = at91sam9n12_clk_register_usb(regmap, name, parent_name); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(at91sam9n12_clk_usb, "atmel,at91sam9n12-clk-usb", + of_at91sam9n12_clk_usb_setup); + +static void __init of_at91rm9200_clk_usb_setup(struct device_node *np) +{ + struct clk_hw *hw; + const char *parent_name; + const char *name = np->name; + u32 divisors[4] = {0, 0, 0, 0}; + struct regmap *regmap; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) + return; + + of_property_read_u32_array(np, "atmel,clk-divisors", divisors, 4); + if (!divisors[0]) + return; + + of_property_read_string(np, "clock-output-names", &name); + + regmap = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap)) + return; + hw = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(at91rm9200_clk_usb, "atmel,at91rm9200-clk-usb", + of_at91rm9200_clk_usb_setup); +#endif /* CONFIG_HAVE_AT91_USB_CLK */ + +#ifdef CONFIG_HAVE_AT91_UTMI +static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np) +{ + struct clk_hw *hw; + const char *parent_name; + const char *name = np->name; + struct regmap *regmap_pmc, *regmap_sfr; + + parent_name = of_clk_get_parent_name(np, 0); + + of_property_read_string(np, "clock-output-names", &name); + + regmap_pmc = syscon_node_to_regmap(of_get_parent(np)); + if (IS_ERR(regmap_pmc)) + return; + + /* + * If the device supports different mainck rates, this value has to be + * set in the UTMI Clock Trimming register. + * - 9x5: mainck supports several rates but it is indicated that a + * 12 MHz is needed in case of USB. + * - sama5d3 and sama5d2: mainck supports several rates. Configuring + * the FREQ field of the UTMI Clock Trimming register is mandatory. + * - sama5d4: mainck is at 12 MHz. + * + * We only need to retrieve sama5d3 or sama5d2 sfr regmap. + */ + regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d3-sfr"); + if (IS_ERR(regmap_sfr)) { + regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr"); + if (IS_ERR(regmap_sfr)) + regmap_sfr = NULL; + } + + hw = at91_clk_register_utmi(regmap_pmc, regmap_sfr, name, parent_name); + if (IS_ERR(hw)) + return; + + of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); +} +CLK_OF_DECLARE(at91sam9x5_clk_utmi, "atmel,at91sam9x5-clk-utmi", + of_at91sam9x5_clk_utmi_setup); +#endif /* CONFIG_HAVE_AT91_UTMI */ diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index 1fa27f4ea538..db24539d5740 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -19,6 +19,8 @@ #include <asm/proc-fns.h> +#include <dt-bindings/clock/at91.h> + #include "pmc.h" #define PMC_MAX_IDS 128 @@ -47,6 +49,82 @@ int of_at91_get_clk_range(struct device_node *np, const char *propname, } EXPORT_SYMBOL_GPL(of_at91_get_clk_range); +struct clk_hw *of_clk_hw_pmc_get(struct of_phandle_args *clkspec, void *data) +{ + unsigned int type = clkspec->args[0]; + unsigned int idx = clkspec->args[1]; + struct pmc_data *pmc_data = data; + + switch (type) { + case PMC_TYPE_CORE: + if (idx < pmc_data->ncore) + return pmc_data->chws[idx]; + break; + case PMC_TYPE_SYSTEM: + if (idx < pmc_data->nsystem) + return pmc_data->shws[idx]; + break; + case PMC_TYPE_PERIPHERAL: + if (idx < pmc_data->nperiph) + return pmc_data->phws[idx]; + break; + case PMC_TYPE_GCK: + if (idx < pmc_data->ngck) + return pmc_data->ghws[idx]; + break; + default: + break; + } + + pr_err("%s: invalid type (%u) or index (%u)\n", __func__, type, idx); + + return ERR_PTR(-EINVAL); +} + +void pmc_data_free(struct pmc_data *pmc_data) +{ + kfree(pmc_data->chws); + kfree(pmc_data->shws); + kfree(pmc_data->phws); + kfree(pmc_data->ghws); +} + +struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem, + unsigned int nperiph, unsigned int ngck) +{ + struct pmc_data *pmc_data = kzalloc(sizeof(*pmc_data), GFP_KERNEL); + + if (!pmc_data) + return NULL; + + pmc_data->ncore = ncore; + pmc_data->chws = kcalloc(ncore, sizeof(struct clk_hw *), GFP_KERNEL); + if (!pmc_data->chws) + goto err; + + pmc_data->nsystem = nsystem; + pmc_data->shws = kcalloc(nsystem, sizeof(struct clk_hw *), GFP_KERNEL); + if (!pmc_data->shws) + goto err; + + pmc_data->nperiph = nperiph; + pmc_data->phws = kcalloc(nperiph, sizeof(struct clk_hw *), GFP_KERNEL); + if (!pmc_data->phws) + goto err; + + pmc_data->ngck = ngck; + pmc_data->ghws = kcalloc(ngck, sizeof(struct clk_hw *), GFP_KERNEL); + if (!pmc_data->ghws) + goto err; + + return pmc_data; + +err: + pmc_data_free(pmc_data); + + return NULL; +} + #ifdef CONFIG_PM static struct regmap *pmcreg; diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index d22b1fa9ecdc..672a79bda88c 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -19,6 +19,17 @@ extern spinlock_t pmc_pcr_lock; +struct pmc_data { + unsigned int ncore; + struct clk_hw **chws; + unsigned int nsystem; + struct clk_hw **shws; + unsigned int nperiph; + struct clk_hw **phws; + unsigned int ngck; + struct clk_hw **ghws; +}; + struct clk_range { unsigned long min; unsigned long max; @@ -26,9 +37,157 @@ struct clk_range { #define CLK_RANGE(MIN, MAX) {.min = MIN, .max = MAX,} +struct clk_master_layout { + u32 mask; + u8 pres_shift; +}; + +extern const struct clk_master_layout at91rm9200_master_layout; +extern const struct clk_master_layout at91sam9x5_master_layout; + +struct clk_master_characteristics { + struct clk_range output; + u32 divisors[4]; + u8 have_div3_pres; +}; + +struct clk_pll_layout { + u32 pllr_mask; + u16 mul_mask; + u8 mul_shift; +}; + +extern const struct clk_pll_layout at91rm9200_pll_layout; +extern const struct clk_pll_layout at91sam9g45_pll_layout; +extern const struct clk_pll_layout at91sam9g20_pllb_layout; +extern const struct clk_pll_layout sama5d3_pll_layout; + +struct clk_pll_characteristics { + struct clk_range input; + int num_output; + struct clk_range *output; + u16 *icpll; + u8 *out; +}; + +struct clk_programmable_layout { + u8 pres_shift; + u8 css_mask; + u8 have_slck_mck; +}; + +extern const struct clk_programmable_layout at91rm9200_programmable_layout; +extern const struct clk_programmable_layout at91sam9g45_programmable_layout; +extern const struct clk_programmable_layout at91sam9x5_programmable_layout; + +#define ndck(a, s) (a[s - 1].id + 1) +#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1) +struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem, + unsigned int nperiph, unsigned int ngck); +void pmc_data_free(struct pmc_data *pmc_data); + int of_at91_get_clk_range(struct device_node *np, const char *propname, struct clk_range *range); +struct clk_hw *of_clk_hw_pmc_get(struct of_phandle_args *clkspec, void *data); + +struct clk_hw * __init +at91_clk_register_audio_pll_frac(struct regmap *regmap, const char *name, + const char *parent_name); + +struct clk_hw * __init +at91_clk_register_audio_pll_pad(struct regmap *regmap, const char *name, + const char *parent_name); + +struct clk_hw * __init +at91_clk_register_audio_pll_pmc(struct regmap *regmap, const char *name, + const char *parent_name); + +struct clk_hw * __init +at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, + const char *name, const char **parent_names, + u8 num_parents, u8 id, bool pll_audio, + const struct clk_range *range); + +struct clk_hw * __init +at91_clk_register_h32mx(struct regmap *regmap, const char *name, + const char *parent_name); + +struct clk_hw * __init +at91_clk_i2s_mux_register(struct regmap *regmap, const char *name, + const char * const *parent_names, + unsigned int num_parents, u8 bus_id); + +struct clk_hw * __init +at91_clk_register_main_rc_osc(struct regmap *regmap, const char *name, + u32 frequency, u32 accuracy); +struct clk_hw * __init +at91_clk_register_main_osc(struct regmap *regmap, const char *name, + const char *parent_name, bool bypass); +struct clk_hw * __init +at91_clk_register_rm9200_main(struct regmap *regmap, + const char *name, + const char *parent_name); +struct clk_hw * __init +at91_clk_register_sam9x5_main(struct regmap *regmap, const char *name, + const char **parent_names, int num_parents); + +struct clk_hw * __init +at91_clk_register_master(struct regmap *regmap, const char *name, + int num_parents, const char **parent_names, + const struct clk_master_layout *layout, + const struct clk_master_characteristics *characteristics); + +struct clk_hw * __init +at91_clk_register_peripheral(struct regmap *regmap, const char *name, + const char *parent_name, u32 id); +struct clk_hw * __init +at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock, + const char *name, const char *parent_name, + u32 id, const struct clk_range *range); + +struct clk_hw * __init +at91_clk_register_pll(struct regmap *regmap, const char *name, + const char *parent_name, u8 id, + const struct clk_pll_layout *layout, + const struct clk_pll_characteristics *characteristics); +struct clk_hw * __init +at91_clk_register_plldiv(struct regmap *regmap, const char *name, + const char *parent_name); + +struct clk_hw * __init +at91_clk_register_programmable(struct regmap *regmap, const char *name, + const char **parent_names, u8 num_parents, u8 id, + const struct clk_programmable_layout *layout); + +struct clk_hw * __init +at91_clk_register_sam9260_slow(struct regmap *regmap, + const char *name, + const char **parent_names, + int num_parents); + +struct clk_hw * __init +at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name, + const char **parent_names, u8 num_parents); + +struct clk_hw * __init +at91_clk_register_system(struct regmap *regmap, const char *name, + const char *parent_name, u8 id); + +struct clk_hw * __init +at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name, + const char **parent_names, u8 num_parents); +struct clk_hw * __init +at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name, + const char *parent_name); +struct clk_hw * __init +at91rm9200_clk_register_usb(struct regmap *regmap, const char *name, + const char *parent_name, const u32 *divisors); + +struct clk_hw * __init +at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, + const char *name, const char *parent_name); + #ifdef CONFIG_PM void pmc_register_id(u8 id); void pmc_register_pck(u8 pck); diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c new file mode 100644 index 000000000000..d69ad96fe988 --- /dev/null +++ b/drivers/clk/at91/sama5d2.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/at91.h> + +#include "pmc.h" + +static const struct clk_master_characteristics mck_characteristics = { + .output = { .min = 124000000, .max = 166000000 }, + .divisors = { 1, 2, 4, 3 }, +}; + +static u8 plla_out[] = { 0 }; + +static u16 plla_icpll[] = { 0 }; + +static struct clk_range plla_outputs[] = { + { .min = 600000000, .max = 1200000000 }, +}; + +static const struct clk_pll_characteristics plla_characteristics = { + .input = { .min = 12000000, .max = 12000000 }, + .num_output = ARRAY_SIZE(plla_outputs), + .output = plla_outputs, + .icpll = plla_icpll, + .out = plla_out, +}; + +static const struct { + char *n; + char *p; + u8 id; +} sama5d2_systemck[] = { + { .n = "ddrck", .p = "masterck", .id = 2 }, + { .n = "lcdck", .p = "masterck", .id = 3 }, + { .n = "uhpck", .p = "usbck", .id = 6 }, + { .n = "udpck", .p = "usbck", .id = 7 }, + { .n = "pck0", .p = "prog0", .id = 8 }, + { .n = "pck1", .p = "prog1", .id = 9 }, + { .n = "pck2", .p = "prog2", .id = 10 }, + { .n = "iscck", .p = "masterck", .id = 18 }, +}; + +static const struct { + char *n; + u8 id; + struct clk_range r; +} sama5d2_periph32ck[] = { + { .n = "macb0_clk", .id = 5, .r = { .min = 0, .max = 83000000 }, }, + { .n = "tdes_clk", .id = 11, .r = { .min = 0, .max = 83000000 }, }, + { .n = "matrix1_clk", .id = 14, }, + { .n = "hsmc_clk", .id = 17, }, + { .n = "pioA_clk", .id = 18, .r = { .min = 0, .max = 83000000 }, }, + { .n = "flx0_clk", .id = 19, .r = { .min = 0, .max = 83000000 }, }, + { .n = "flx1_clk", .id = 20, .r = { .min = 0, .max = 83000000 }, }, + { .n = "flx2_clk", .id = 21, .r = { .min = 0, .max = 83000000 }, }, + { .n = "flx3_clk", .id = 22, .r = { .min = 0, .max = 83000000 }, }, + { .n = "flx4_clk", .id = 23, .r = { .min = 0, .max = 83000000 }, }, + { .n = "uart0_clk", .id = 24, .r = { .min = 0, .max = 83000000 }, }, + { .n = "uart1_clk", .id = 25, .r = { .min = 0, .max = 83000000 }, }, + { .n = "uart2_clk", .id = 26, .r = { .min = 0, .max = 83000000 }, }, + { .n = "uart3_clk", .id = 27, .r = { .min = 0, .max = 83000000 }, }, + { .n = "uart4_clk", .id = 28, .r = { .min = 0, .max = 83000000 }, }, + { .n = "twi0_clk", .id = 29, .r = { .min = 0, .max = 83000000 }, }, + { .n = "twi1_clk", .id = 30, .r = { .min = 0, .max = 83000000 }, }, + { .n = "spi0_clk", .id = 33, .r = { .min = 0, .max = 83000000 }, }, + { .n = "spi1_clk", .id = 34, .r = { .min = 0, .max = 83000000 }, }, + { .n = "tcb0_clk", .id = 35, .r = { .min = 0, .max = 83000000 }, }, + { .n = "tcb1_clk", .id = 36, .r = { .min = 0, .max = 83000000 }, }, + { .n = "pwm_clk", .id = 38, .r = { .min = 0, .max = 83000000 }, }, + { .n = "adc_clk", .id = 40, .r = { .min = 0, .max = 83000000 }, }, + { .n = "uhphs_clk", .id = 41, .r = { .min = 0, .max = 83000000 }, }, + { .n = "udphs_clk", .id = 42, .r = { .min = 0, .max = 83000000 }, }, + { .n = "ssc0_clk", .id = 43, .r = { .min = 0, .max = 83000000 }, }, + { .n = "ssc1_clk", .id = 44, .r = { .min = 0, .max = 83000000 }, }, + { .n = "trng_clk", .id = 47, .r = { .min = 0, .max = 83000000 }, }, + { .n = "pdmic_clk", .id = 48, .r = { .min = 0, .max = 83000000 }, }, + { .n = "securam_clk", .id = 51, }, + { .n = "i2s0_clk", .id = 54, .r = { .min = 0, .max = 83000000 }, }, + { .n = "i2s1_clk", .id = 55, .r = { .min = 0, .max = 83000000 }, }, + { .n = "can0_clk", .id = 56, .r = { .min = 0, .max = 83000000 }, }, + { .n = "can1_clk", .id = 57, .r = { .min = 0, .max = 83000000 }, }, + { .n = "classd_clk", .id = 59, .r = { .min = 0, .max = 83000000 }, }, +}; + +static const struct { + char *n; + u8 id; +} sama5d2_periphck[] = { + { .n = "dma0_clk", .id = 6, }, + { .n = "dma1_clk", .id = 7, }, + { .n = "aes_clk", .id = 9, }, + { .n = "aesb_clk", .id = 10, }, + { .n = "sha_clk", .id = 12, }, + { .n = "mpddr_clk", .id = 13, }, + { .n = "matrix0_clk", .id = 15, }, + { .n = "sdmmc0_hclk", .id = 31, }, + { .n = "sdmmc1_hclk", .id = 32, }, + { .n = "lcdc_clk", .id = 45, }, + { .n = "isc_clk", .id = 46, }, + { .n = "qspi0_clk", .id = 52, }, + { .n = "qspi1_clk", .id = 53, }, +}; + +static const struct { + char *n; + u8 id; + struct clk_range r; + bool pll; +} sama5d2_gck[] = { + { .n = "sdmmc0_gclk", .id = 31, }, + { .n = "sdmmc1_gclk", .id = 32, }, + { .n = "tcb0_gclk", .id = 35, .r = { .min = 0, .max = 83000000 }, }, + { .n = "tcb1_gclk", .id = 36, .r = { .min = 0, .max = 83000000 }, }, + { .n = "pwm_gclk", .id = 38, .r = { .min = 0, .max = 83000000 }, }, + { .n = "isc_gclk", .id = 46, }, + { .n = "pdmic_gclk", .id = 48, }, + { .n = "i2s0_gclk", .id = 54, .pll = true }, + { .n = "i2s1_gclk", .id = 55, .pll = true }, + { .n = "can0_gclk", .id = 56, .r = { .min = 0, .max = 80000000 }, }, + { .n = "can1_gclk", .id = 57, .r = { .min = 0, .max = 80000000 }, }, + { .n = "classd_gclk", .id = 59, .r = { .min = 0, .max = 100000000 }, + .pll = true }, +}; + +static void __init sama5d2_pmc_setup(struct device_node *np) +{ + struct clk_range range = CLK_RANGE(0, 0); + const char *slck_name, *mainxtal_name; + struct pmc_data *sama5d2_pmc; + const char *parent_names[6]; + struct regmap *regmap, *regmap_sfr; + struct clk_hw *hw; + int i; + bool bypass; + + i = of_property_match_string(np, "clock-names", "slow_clk"); + if (i < 0) + return; + + slck_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "main_xtal"); + if (i < 0) + return; + mainxtal_name = of_clk_get_parent_name(np, i); + + regmap = syscon_node_to_regmap(np); + if (IS_ERR(regmap)) + return; + + sama5d2_pmc = pmc_data_allocate(PMC_I2S1_MUX + 1, + nck(sama5d2_systemck), + nck(sama5d2_periph32ck), + nck(sama5d2_gck)); + if (!sama5d2_pmc) + return; + + hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000, + 100000000); + if (IS_ERR(hw)) + goto err_free; + + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + + hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, + bypass); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = "main_rc_osc"; + parent_names[1] = "main_osc"; + hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + sama5d2_pmc->chws[PMC_MAIN] = hw; + + hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0, + &sama5d3_pll_layout, &plla_characteristics); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack"); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_audio_pll_frac(regmap, "audiopll_fracck", + "mainck"); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_audio_pll_pad(regmap, "audiopll_padck", + "audiopll_fracck"); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_audio_pll_pmc(regmap, "audiopll_pmcck", + "audiopll_fracck"); + if (IS_ERR(hw)) + goto err_free; + + regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr"); + if (IS_ERR(regmap_sfr)) + regmap_sfr = NULL; + + hw = at91_clk_register_utmi(regmap, regmap_sfr, "utmick", "mainck"); + if (IS_ERR(hw)) + goto err_free; + + sama5d2_pmc->chws[PMC_UTMI] = hw; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "utmick"; + hw = at91_clk_register_master(regmap, "masterck", 4, parent_names, + &at91sam9x5_master_layout, + &mck_characteristics); + if (IS_ERR(hw)) + goto err_free; + + sama5d2_pmc->chws[PMC_MCK] = hw; + + hw = at91_clk_register_h32mx(regmap, "h32mxck", "masterck"); + if (IS_ERR(hw)) + goto err_free; + + sama5d2_pmc->chws[PMC_MCK2] = hw; + + parent_names[0] = "plladivck"; + parent_names[1] = "utmick"; + hw = at91sam9x5_clk_register_usb(regmap, "usbck", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "utmick"; + parent_names[4] = "mck"; + for (i = 0; i < 3; i++) { + char name[6]; + + snprintf(name, sizeof(name), "prog%d", i); + + hw = at91_clk_register_programmable(regmap, name, + parent_names, 5, i, + &at91sam9x5_programmable_layout); + if (IS_ERR(hw)) + goto err_free; + } + + for (i = 0; i < ARRAY_SIZE(sama5d2_systemck); i++) { + hw = at91_clk_register_system(regmap, sama5d2_systemck[i].n, + sama5d2_systemck[i].p, + sama5d2_systemck[i].id); + if (IS_ERR(hw)) + goto err_free; + + sama5d2_pmc->shws[sama5d2_systemck[i].id] = hw; + } + + for (i = 0; i < ARRAY_SIZE(sama5d2_periphck); i++) { + hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock, + sama5d2_periphck[i].n, + "masterck", + sama5d2_periphck[i].id, + &range); + if (IS_ERR(hw)) + goto err_free; + + sama5d2_pmc->phws[sama5d2_periphck[i].id] = hw; + } + + for (i = 0; i < ARRAY_SIZE(sama5d2_periph32ck); i++) { + hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock, + sama5d2_periph32ck[i].n, + "h32mxck", + sama5d2_periph32ck[i].id, + &sama5d2_periph32ck[i].r); + if (IS_ERR(hw)) + goto err_free; + + sama5d2_pmc->phws[sama5d2_periph32ck[i].id] = hw; + } + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "utmick"; + parent_names[4] = "mck"; + parent_names[5] = "audiopll_pmcck"; + for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) { + hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, + sama5d2_gck[i].n, + parent_names, 6, + sama5d2_gck[i].id, + sama5d2_gck[i].pll, + &sama5d2_gck[i].r); + if (IS_ERR(hw)) + goto err_free; + + sama5d2_pmc->ghws[sama5d2_gck[i].id] = hw; + } + + if (regmap_sfr) { + parent_names[0] = "i2s0_clk"; + parent_names[1] = "i2s0_gclk"; + hw = at91_clk_i2s_mux_register(regmap_sfr, "i2s0_muxclk", + parent_names, 2, 0); + if (IS_ERR(hw)) + goto err_free; + + sama5d2_pmc->chws[PMC_I2S0_MUX] = hw; + + parent_names[0] = "i2s1_clk"; + parent_names[1] = "i2s1_gclk"; + hw = at91_clk_i2s_mux_register(regmap_sfr, "i2s1_muxclk", + parent_names, 2, 1); + if (IS_ERR(hw)) + goto err_free; + + sama5d2_pmc->chws[PMC_I2S1_MUX] = hw; + } + + of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama5d2_pmc); + + return; + +err_free: + pmc_data_free(sama5d2_pmc); +} +CLK_OF_DECLARE_DRIVER(sama5d2_pmc, "atmel,sama5d2-pmc", sama5d2_pmc_setup); diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c new file mode 100644 index 000000000000..e358be7f6c8d --- /dev/null +++ b/drivers/clk/at91/sama5d4.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/at91.h> + +#include "pmc.h" + +static const struct clk_master_characteristics mck_characteristics = { + .output = { .min = 125000000, .max = 200000000 }, + .divisors = { 1, 2, 4, 3 }, +}; + +static u8 plla_out[] = { 0 }; + +static u16 plla_icpll[] = { 0 }; + +static struct clk_range plla_outputs[] = { + { .min = 600000000, .max = 1200000000 }, +}; + +static const struct clk_pll_characteristics plla_characteristics = { + .input = { .min = 12000000, .max = 12000000 }, + .num_output = ARRAY_SIZE(plla_outputs), + .output = plla_outputs, + .icpll = plla_icpll, + .out = plla_out, +}; + +static const struct { + char *n; + char *p; + u8 id; +} sama5d4_systemck[] = { + { .n = "ddrck", .p = "masterck", .id = 2 }, + { .n = "lcdck", .p = "masterck", .id = 3 }, + { .n = "smdck", .p = "smdclk", .id = 4 }, + { .n = "uhpck", .p = "usbck", .id = 6 }, + { .n = "udpck", .p = "usbck", .id = 7 }, + { .n = "pck0", .p = "prog0", .id = 8 }, + { .n = "pck1", .p = "prog1", .id = 9 }, + { .n = "pck2", .p = "prog2", .id = 10 }, +}; + +static const struct { + char *n; + u8 id; +} sama5d4_periph32ck[] = { + { .n = "pioD_clk", .id = 5 }, + { .n = "usart0_clk", .id = 6 }, + { .n = "usart1_clk", .id = 7 }, + { .n = "icm_clk", .id = 9 }, + { .n = "aes_clk", .id = 12 }, + { .n = "tdes_clk", .id = 14 }, + { .n = "sha_clk", .id = 15 }, + { .n = "matrix1_clk", .id = 17 }, + { .n = "hsmc_clk", .id = 22 }, + { .n = "pioA_clk", .id = 23 }, + { .n = "pioB_clk", .id = 24 }, + { .n = "pioC_clk", .id = 25 }, + { .n = "pioE_clk", .id = 26 }, + { .n = "uart0_clk", .id = 27 }, + { .n = "uart1_clk", .id = 28 }, + { .n = "usart2_clk", .id = 29 }, + { .n = "usart3_clk", .id = 30 }, + { .n = "usart4_clk", .id = 31 }, + { .n = "twi0_clk", .id = 32 }, + { .n = "twi1_clk", .id = 33 }, + { .n = "twi2_clk", .id = 34 }, + { .n = "mci0_clk", .id = 35 }, + { .n = "mci1_clk", .id = 36 }, + { .n = "spi0_clk", .id = 37 }, + { .n = "spi1_clk", .id = 38 }, + { .n = "spi2_clk", .id = 39 }, + { .n = "tcb0_clk", .id = 40 }, + { .n = "tcb1_clk", .id = 41 }, + { .n = "tcb2_clk", .id = 42 }, + { .n = "pwm_clk", .id = 43 }, + { .n = "adc_clk", .id = 44 }, + { .n = "dbgu_clk", .id = 45 }, + { .n = "uhphs_clk", .id = 46 }, + { .n = "udphs_clk", .id = 47 }, + { .n = "ssc0_clk", .id = 48 }, + { .n = "ssc1_clk", .id = 49 }, + { .n = "trng_clk", .id = 53 }, + { .n = "macb0_clk", .id = 54 }, + { .n = "macb1_clk", .id = 55 }, + { .n = "fuse_clk", .id = 57 }, + { .n = "securam_clk", .id = 59 }, + { .n = "smd_clk", .id = 61 }, + { .n = "twi3_clk", .id = 62 }, + { .n = "catb_clk", .id = 63 }, +}; + +static const struct { + char *n; + u8 id; +} sama5d4_periphck[] = { + { .n = "dma0_clk", .id = 8 }, + { .n = "cpkcc_clk", .id = 10 }, + { .n = "aesb_clk", .id = 13 }, + { .n = "mpddr_clk", .id = 16 }, + { .n = "matrix0_clk", .id = 18 }, + { .n = "vdec_clk", .id = 19 }, + { .n = "dma1_clk", .id = 50 }, + { .n = "lcdc_clk", .id = 51 }, + { .n = "isi_clk", .id = 52 }, +}; + +static void __init sama5d4_pmc_setup(struct device_node *np) +{ + struct clk_range range = CLK_RANGE(0, 0); + const char *slck_name, *mainxtal_name; + struct pmc_data *sama5d4_pmc; + const char *parent_names[5]; + struct regmap *regmap; + struct clk_hw *hw; + int i; + bool bypass; + + i = of_property_match_string(np, "clock-names", "slow_clk"); + if (i < 0) + return; + + slck_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "main_xtal"); + if (i < 0) + return; + mainxtal_name = of_clk_get_parent_name(np, i); + + regmap = syscon_node_to_regmap(np); + if (IS_ERR(regmap)) + return; + + sama5d4_pmc = pmc_data_allocate(PMC_MCK2 + 1, + nck(sama5d4_systemck), + nck(sama5d4_periph32ck), 0); + if (!sama5d4_pmc) + return; + + hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000, + 100000000); + if (IS_ERR(hw)) + goto err_free; + + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + + hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, + bypass); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = "main_rc_osc"; + parent_names[1] = "main_osc"; + hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0, + &sama5d3_pll_layout, &plla_characteristics); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack"); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck"); + if (IS_ERR(hw)) + goto err_free; + + sama5d4_pmc->chws[PMC_UTMI] = hw; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "utmick"; + hw = at91_clk_register_master(regmap, "masterck", 4, parent_names, + &at91sam9x5_master_layout, + &mck_characteristics); + if (IS_ERR(hw)) + goto err_free; + + sama5d4_pmc->chws[PMC_MCK] = hw; + + hw = at91_clk_register_h32mx(regmap, "h32mxck", "masterck"); + if (IS_ERR(hw)) + goto err_free; + + sama5d4_pmc->chws[PMC_MCK2] = hw; + + parent_names[0] = "plladivck"; + parent_names[1] = "utmick"; + hw = at91sam9x5_clk_register_usb(regmap, "usbck", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = "plladivck"; + parent_names[1] = "utmick"; + hw = at91sam9x5_clk_register_smd(regmap, "smdclk", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "utmick"; + parent_names[4] = "mck"; + for (i = 0; i < 3; i++) { + char name[6]; + + snprintf(name, sizeof(name), "prog%d", i); + + hw = at91_clk_register_programmable(regmap, name, + parent_names, 5, i, + &at91sam9x5_programmable_layout); + if (IS_ERR(hw)) + goto err_free; + } + + for (i = 0; i < ARRAY_SIZE(sama5d4_systemck); i++) { + hw = at91_clk_register_system(regmap, sama5d4_systemck[i].n, + sama5d4_systemck[i].p, + sama5d4_systemck[i].id); + if (IS_ERR(hw)) + goto err_free; + + sama5d4_pmc->shws[sama5d4_systemck[i].id] = hw; + } + + for (i = 0; i < ARRAY_SIZE(sama5d4_periphck); i++) { + hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock, + sama5d4_periphck[i].n, + "masterck", + sama5d4_periphck[i].id, + &range); + if (IS_ERR(hw)) + goto err_free; + + sama5d4_pmc->phws[sama5d4_periphck[i].id] = hw; + } + + for (i = 0; i < ARRAY_SIZE(sama5d4_periph32ck); i++) { + hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock, + sama5d4_periph32ck[i].n, + "h32mxck", + sama5d4_periph32ck[i].id, + &range); + if (IS_ERR(hw)) + goto err_free; + + sama5d4_pmc->phws[sama5d4_periph32ck[i].id] = hw; + } + + of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama5d4_pmc); + + return; + +err_free: + pmc_data_free(sama5d4_pmc); +} +CLK_OF_DECLARE_DRIVER(sama5d4_pmc, "atmel,sama5d4-pmc", sama5d4_pmc_setup); diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c index 25d8c240ddfb..c68dada97316 100644 --- a/drivers/clk/axs10x/pll_clock.c +++ b/drivers/clk/axs10x/pll_clock.c @@ -301,13 +301,13 @@ static void __init of_axs10x_pll_clk_setup(struct device_node *node) ret = clk_hw_register(NULL, &pll_clk->hw); if (ret) { - pr_err("failed to register %s clock\n", node->name); + pr_err("failed to register %pOFn clock\n", node); goto err_unmap_lock; } ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw); if (ret) { - pr_err("failed to add hw provider for %s clock\n", node->name); + pr_err("failed to add hw provider for %pOFn clock\n", node); goto err_unregister_clk; } diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c index 281f4322355c..e65eeef9cbaf 100644 --- a/drivers/clk/bcm/clk-kona-setup.c +++ b/drivers/clk/bcm/clk-kona-setup.c @@ -808,29 +808,29 @@ void __init kona_dt_ccu_setup(struct ccu_data *ccu, ret = of_address_to_resource(node, 0, &res); if (ret) { - pr_err("%s: no valid CCU registers found for %s\n", __func__, - node->name); + pr_err("%s: no valid CCU registers found for %pOFn\n", __func__, + node); goto out_err; } range = resource_size(&res); if (range > (resource_size_t)U32_MAX) { - pr_err("%s: address range too large for %s\n", __func__, - node->name); + pr_err("%s: address range too large for %pOFn\n", __func__, + node); goto out_err; } ccu->range = (u32)range; if (!ccu_data_valid(ccu)) { - pr_err("%s: ccu data not valid for %s\n", __func__, node->name); + pr_err("%s: ccu data not valid for %pOFn\n", __func__, node); goto out_err; } ccu->base = ioremap(res.start, ccu->range); if (!ccu->base) { - pr_err("%s: unable to map CCU registers for %s\n", __func__, - node->name); + pr_err("%s: unable to map CCU registers for %pOFn\n", __func__, + node); goto out_err; } ccu->node = of_node_get(node); @@ -848,16 +848,16 @@ void __init kona_dt_ccu_setup(struct ccu_data *ccu, ret = of_clk_add_hw_provider(node, of_clk_kona_onecell_get, ccu); if (ret) { - pr_err("%s: error adding ccu %s as provider (%d)\n", __func__, - node->name, ret); + pr_err("%s: error adding ccu %pOFn as provider (%d)\n", __func__, + node, ret); goto out_err; } if (!kona_ccu_init(ccu)) - pr_err("Broadcom %s initialization had errors\n", node->name); + pr_err("Broadcom %pOFn initialization had errors\n", node); return; out_err: kona_ccu_teardown(ccu); - pr_err("Broadcom %s setup aborted\n", node->name); + pr_err("Broadcom %pOFn setup aborted\n", node); } diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c index 44b544157121..d571a00b5282 100644 --- a/drivers/clk/clk-asm9260.c +++ b/drivers/clk/clk-asm9260.c @@ -281,7 +281,7 @@ static void __init asm9260_acc_init(struct device_node *np) base = of_io_request_and_map(np, 0, np->name); if (IS_ERR(base)) - panic("%s: unable to map resource", np->name); + panic("%pOFn: unable to map resource", np); /* register pll */ rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000; @@ -292,7 +292,7 @@ static void __init asm9260_acc_init(struct device_node *np) ref_clk, 0, rate, accuracy); if (IS_ERR(hw)) - panic("%s: can't register REFCLK. Check DT!", np->name); + panic("%pOFn: can't register REFCLK. Check DT!", np); for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) { const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n]; diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c index 6904ed6da504..6a7118d4250a 100644 --- a/drivers/clk/clk-bulk.c +++ b/drivers/clk/clk-bulk.c @@ -17,8 +17,65 @@ */ #include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/device.h> #include <linux/export.h> +#include <linux/of.h> +#include <linux/slab.h> + +static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks, + struct clk_bulk_data *clks) +{ + int ret; + int i; + + for (i = 0; i < num_clks; i++) + clks[i].clk = NULL; + + for (i = 0; i < num_clks; i++) { + clks[i].clk = of_clk_get(np, i); + if (IS_ERR(clks[i].clk)) { + ret = PTR_ERR(clks[i].clk); + pr_err("%pOF: Failed to get clk index: %d ret: %d\n", + np, i, ret); + clks[i].clk = NULL; + goto err; + } + } + + return 0; + +err: + clk_bulk_put(i, clks); + + return ret; +} + +static int __must_check of_clk_bulk_get_all(struct device_node *np, + struct clk_bulk_data **clks) +{ + struct clk_bulk_data *clk_bulk; + int num_clks; + int ret; + + num_clks = of_clk_get_parent_count(np); + if (!num_clks) + return 0; + + clk_bulk = kmalloc_array(num_clks, sizeof(*clk_bulk), GFP_KERNEL); + if (!clk_bulk) + return -ENOMEM; + + ret = of_clk_bulk_get(np, num_clks, clk_bulk); + if (ret) { + kfree(clk_bulk); + return ret; + } + + *clks = clk_bulk; + + return num_clks; +} void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) { @@ -59,6 +116,29 @@ err: } EXPORT_SYMBOL(clk_bulk_get); +void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) +{ + if (IS_ERR_OR_NULL(clks)) + return; + + clk_bulk_put(num_clks, clks); + + kfree(clks); +} +EXPORT_SYMBOL(clk_bulk_put_all); + +int __must_check clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks) +{ + struct device_node *np = dev_of_node(dev); + + if (!np) + return 0; + + return of_clk_bulk_get_all(np, clks); +} +EXPORT_SYMBOL(clk_bulk_get_all); + #ifdef CONFIG_HAVE_CLK_PREPARE /** diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c index 0a7e7d5a7506..23c9326ea48c 100644 --- a/drivers/clk/clk-cdce925.c +++ b/drivers/clk/clk-cdce925.c @@ -669,8 +669,8 @@ static int cdce925_probe(struct i2c_client *client, /* Register PLL clocks */ for (i = 0; i < data->chip_info->num_plls; ++i) { - pll_clk_name[i] = kasprintf(GFP_KERNEL, "%s.pll%d", - client->dev.of_node->name, i); + pll_clk_name[i] = kasprintf(GFP_KERNEL, "%pOFn.pll%d", + client->dev.of_node, i); init.name = pll_clk_name[i]; data->pll[i].chip = data; data->pll[i].hw.init = &init; @@ -703,6 +703,7 @@ static int cdce925_probe(struct i2c_client *client, 0x12 + (i*CDCE925_OFFSET_PLL), 0x07, value & 0x07); } + of_node_put(np_output); } /* Register output clock Y1 */ @@ -710,7 +711,7 @@ static int cdce925_probe(struct i2c_client *client, init.flags = 0; init.num_parents = 1; init.parent_names = &parent_name; /* Mux Y1 to input */ - init.name = kasprintf(GFP_KERNEL, "%s.Y1", client->dev.of_node->name); + init.name = kasprintf(GFP_KERNEL, "%pOFn.Y1", client->dev.of_node); data->clk[0].chip = data; data->clk[0].hw.init = &init; data->clk[0].index = 0; @@ -727,8 +728,8 @@ static int cdce925_probe(struct i2c_client *client, init.flags = CLK_SET_RATE_PARENT; init.num_parents = 1; for (i = 1; i < data->chip_info->num_outputs; ++i) { - init.name = kasprintf(GFP_KERNEL, "%s.Y%d", - client->dev.of_node->name, i+1); + init.name = kasprintf(GFP_KERNEL, "%pOFn.Y%d", + client->dev.of_node, i+1); data->clk[i].chip = data; data->clk[i].hw.init = &init; data->clk[i].index = i; diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c index d854e26a8ddb..12c87457eca1 100644 --- a/drivers/clk/clk-devres.c +++ b/drivers/clk/clk-devres.c @@ -70,6 +70,30 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, } EXPORT_SYMBOL_GPL(devm_clk_bulk_get); +int __must_check devm_clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks) +{ + struct clk_bulk_devres *devres; + int ret; + + devres = devres_alloc(devm_clk_bulk_release, + sizeof(*devres), GFP_KERNEL); + if (!devres) + return -ENOMEM; + + ret = clk_bulk_get_all(dev, &devres->clks); + if (ret > 0) { + *clks = devres->clks; + devres->num_clks = ret; + devres_add(dev, devres); + } else { + devres_free(devres); + } + + return ret; +} +EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all); + static int devm_clk_match(struct device *dev, void *res, void *data) { struct clk **c = res; diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index 20724abd38bd..ef0ca9414f37 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -158,14 +158,14 @@ static struct clk *_of_fixed_factor_clk_setup(struct device_node *node) int ret; if (of_property_read_u32(node, "clock-div", &div)) { - pr_err("%s Fixed factor clock <%s> must have a clock-div property\n", - __func__, node->name); + pr_err("%s Fixed factor clock <%pOFn> must have a clock-div property\n", + __func__, node); return ERR_PTR(-EIO); } if (of_property_read_u32(node, "clock-mult", &mult)) { - pr_err("%s Fixed factor clock <%s> must have a clock-mult property\n", - __func__, node->name); + pr_err("%s Fixed factor clock <%pOFn> must have a clock-mult property\n", + __func__, node); return ERR_PTR(-EIO); } diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c index b5c46b3f8764..6d6475c32ee5 100644 --- a/drivers/clk/clk-fixed-rate.c +++ b/drivers/clk/clk-fixed-rate.c @@ -200,6 +200,7 @@ static int of_fixed_clk_remove(struct platform_device *pdev) { struct clk *clk = platform_get_drvdata(pdev); + of_clk_del_provider(pdev->dev.of_node); clk_unregister_fixed_rate(clk); return 0; diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c index 40af4fbab4d2..6a43ce420492 100644 --- a/drivers/clk/clk-gpio.c +++ b/drivers/clk/clk-gpio.c @@ -233,11 +233,11 @@ static int gpio_clk_driver_probe(struct platform_device *pdev) if (IS_ERR(gpiod)) { ret = PTR_ERR(gpiod); if (ret == -EPROBE_DEFER) - pr_debug("%s: %s: GPIOs not yet available, retry later\n", - node->name, __func__); + pr_debug("%pOFn: %s: GPIOs not yet available, retry later\n", + node, __func__); else - pr_err("%s: %s: Can't get '%s' named GPIO property\n", - node->name, __func__, + pr_err("%pOFn: %s: Can't get '%s' named GPIO property\n", + node, __func__, gpio_name); return ret; } diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c index c4ee280f454d..a47c2b600f20 100644 --- a/drivers/clk/clk-hsdk-pll.c +++ b/drivers/clk/clk-hsdk-pll.c @@ -390,13 +390,13 @@ static void __init of_hsdk_pll_clk_setup(struct device_node *node) ret = clk_hw_register(NULL, &pll_clk->hw); if (ret) { - pr_err("failed to register %s clock\n", node->name); + pr_err("failed to register %pOFn clock\n", node); goto err_unmap_spec_regs; } ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw); if (ret) { - pr_err("failed to add hw provider for %s clock\n", node->name); + pr_err("failed to add hw provider for %pOFn clock\n", node); goto err_unmap_spec_regs; } diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c index eb953d3b0b69..02551fe4b87c 100644 --- a/drivers/clk/clk-max77686.c +++ b/drivers/clk/clk-max77686.c @@ -1,24 +1,9 @@ -/* - * clk-max77686.c - Clock driver for Maxim 77686/MAX77802 - * - * Copyright (C) 2012 Samsung Electornics - * Jonghwa Lee <jonghwa3.lee@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ +// SPDX-License-Identifier: GPL-2.0+ +// +// clk-max77686.c - Clock driver for Maxim 77686/MAX77802 +// +// Copyright (C) 2012 Samsung Electornics +// Jonghwa Lee <jonghwa3.lee@samsung.com> #include <linux/kernel.h> #include <linux/slab.h> diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c index 13ad6d1e5090..84a24875c629 100644 --- a/drivers/clk/clk-nomadik.c +++ b/drivers/clk/clk-nomadik.c @@ -97,8 +97,8 @@ static void __init nomadik_src_init(void) } src_base = of_iomap(np, 0); if (!src_base) { - pr_err("%s: must have src parent node with REGS (%s)\n", - __func__, np->name); + pr_err("%s: must have src parent node with REGS (%pOFn)\n", + __func__, np); return; } diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c index c5edf8f2fd19..27a86b7a34db 100644 --- a/drivers/clk/clk-npcm7xx.c +++ b/drivers/clk/clk-npcm7xx.c @@ -549,7 +549,7 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np) ret = of_address_to_resource(clk_np, 0, &res); if (ret) { - pr_err("%s: failed to get resource, ret %d\n", clk_np->name, + pr_err("%pOFn: failed to get resource, ret %d\n", clk_np, ret); return; } diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c index 7f51c01085ab..e9612e7068e9 100644 --- a/drivers/clk/clk-palmas.c +++ b/drivers/clk/clk-palmas.c @@ -195,8 +195,8 @@ static void palmas_clks_get_clk_data(struct platform_device *pdev, prop = PALMAS_EXT_CONTROL_NSLEEP; break; default: - dev_warn(&pdev->dev, "%s: Invalid ext control option: %u\n", - node->name, prop); + dev_warn(&pdev->dev, "%pOFn: Invalid ext control option: %u\n", + node, prop); prop = 0; break; } diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 3a1812f65e5d..4c30b6e799ed 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -945,8 +945,8 @@ static void __init core_mux_init(struct device_node *np) rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); if (rc) { - pr_err("%s: Couldn't register clk provider for node %s: %d\n", - __func__, np->name, rc); + pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n", + __func__, np, rc); return; } } @@ -1199,8 +1199,8 @@ static void __init legacy_pll_init(struct device_node *np, int idx) rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); if (rc) { - pr_err("%s: Couldn't register clk provider for node %s: %d\n", - __func__, np->name, rc); + pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n", + __func__, np, rc); goto err_cell; } @@ -1360,7 +1360,7 @@ static void __init clockgen_init(struct device_node *np) is_old_ls1021a = true; } if (!clockgen.regs) { - pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name); + pr_err("%s(): %pOFn: of_iomap() failed\n", __func__, np); return; } @@ -1406,8 +1406,8 @@ static void __init clockgen_init(struct device_node *np) ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen); if (ret) { - pr_err("%s: Couldn't register clk provider for node %s: %d\n", - __func__, np->name, ret); + pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n", + __func__, np, ret); } return; diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index d44e0eea31ec..5b419b82f7ca 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -1,19 +1,8 @@ -/* - * clk-s2mps11.c - Clock driver for S2MPS11. - * - * Copyright (C) 2013,2014 Samsung Electornics - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +// +// clk-s2mps11.c - Clock driver for S2MPS11. +// +// Copyright (C) 2013,2014 Samsung Electornics #include <linux/module.h> #include <linux/err.h> @@ -28,12 +17,7 @@ #include <linux/mfd/samsung/s5m8767.h> #include <linux/mfd/samsung/core.h> -enum { - S2MPS11_CLK_AP = 0, - S2MPS11_CLK_CP, - S2MPS11_CLK_BT, - S2MPS11_CLKS_NUM, -}; +#include <dt-bindings/clock/samsung,s2mps11.h> struct s2mps11_clk { struct sec_pmic_dev *iodev; @@ -245,6 +229,36 @@ static const struct platform_device_id s2mps11_clk_id[] = { }; MODULE_DEVICE_TABLE(platform, s2mps11_clk_id); +#ifdef CONFIG_OF +/* + * Device is instantiated through parent MFD device and device matching is done + * through platform_device_id. + * + * However if device's DT node contains proper clock compatible and driver is + * built as a module, then the *module* matching will be done trough DT aliases. + * This requires of_device_id table. In the same time this will not change the + * actual *device* matching so do not add .of_match_table. + */ +static const struct of_device_id s2mps11_dt_match[] __used = { + { + .compatible = "samsung,s2mps11-clk", + .data = (void *)S2MPS11X, + }, { + .compatible = "samsung,s2mps13-clk", + .data = (void *)S2MPS13X, + }, { + .compatible = "samsung,s2mps14-clk", + .data = (void *)S2MPS14X, + }, { + .compatible = "samsung,s5m8767-clk", + .data = (void *)S5M8767X, + }, { + /* Sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, s2mps11_dt_match); +#endif + static struct platform_driver s2mps11_clk_driver = { .driver = { .name = "s2mps11-clk", diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index a985bf5e1ac6..a2287c770d5c 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -132,7 +132,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev) count = handle->clk_ops->count_get(handle); if (count < 0) { - dev_err(dev, "%s: invalid clock output count\n", np->name); + dev_err(dev, "%pOFn: invalid clock output count\n", np); return -EINVAL; } diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c index 25854722810e..d3ccc1cfccd5 100644 --- a/drivers/clk/clk-scpi.c +++ b/drivers/clk/clk-scpi.c @@ -207,7 +207,7 @@ static int scpi_clk_add(struct device *dev, struct device_node *np, count = of_property_count_strings(np, "clock-output-names"); if (count < 0) { - dev_err(dev, "%s: invalid clock output count\n", np->name); + dev_err(dev, "%pOFn: invalid clock output count\n", np); return -EINVAL; } @@ -232,13 +232,13 @@ static int scpi_clk_add(struct device *dev, struct device_node *np, if (of_property_read_string_index(np, "clock-output-names", idx, &name)) { - dev_err(dev, "invalid clock name @ %s\n", np->name); + dev_err(dev, "invalid clock name @ %pOFn\n", np); return -EINVAL; } if (of_property_read_u32_index(np, "clock-indices", idx, &val)) { - dev_err(dev, "invalid clock index @ %s\n", np->name); + dev_err(dev, "invalid clock index @ %pOFn\n", np); return -EINVAL; } diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index 50e7c341e97e..8bdf91b56012 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -1215,8 +1215,8 @@ static int si5351_dt_parse(struct i2c_client *client, /* per clkout properties */ for_each_child_of_node(np, child) { if (of_property_read_u32(child, "reg", &num)) { - dev_err(&client->dev, "missing reg property of %s\n", - child->name); + dev_err(&client->dev, "missing reg property of %pOFn\n", + child); goto put_child; } diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c index 294850bdc195..cdaa567c8042 100644 --- a/drivers/clk/clk-stm32f4.c +++ b/drivers/clk/clk-stm32f4.c @@ -1433,7 +1433,7 @@ static void __init stm32f4_rcc_init(struct device_node *np) base = of_iomap(np, 0); if (!base) { - pr_err("%s: unable to map resource\n", np->name); + pr_err("%pOFn: unable to map resource\n", np); return; } diff --git a/drivers/clk/clk-stm32h7.c b/drivers/clk/clk-stm32h7.c index d3271eca3779..0ea7261d15e0 100644 --- a/drivers/clk/clk-stm32h7.c +++ b/drivers/clk/clk-stm32h7.c @@ -1216,7 +1216,7 @@ static void __init stm32h7_rcc_init(struct device_node *np) /* get RCC base @ from DT */ base = of_iomap(np, 0); if (!base) { - pr_err("%s: unable to map resource", np->name); + pr_err("%pOFn: unable to map resource", np); goto err_free_clks; } diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c index a907555b2a3d..4f48342bc280 100644 --- a/drivers/clk/clk-stm32mp1.c +++ b/drivers/clk/clk-stm32mp1.c @@ -2088,7 +2088,7 @@ static void stm32mp1_rcc_init(struct device_node *np) base = of_iomap(np, 0); if (!base) { - pr_err("%s: unable to map resource", np->name); + pr_err("%pOFn: unable to map resource", np); of_node_put(np); return; } diff --git a/drivers/clk/clk-tango4.c b/drivers/clk/clk-tango4.c index 34b22b7930fb..fe12a43f7a40 100644 --- a/drivers/clk/clk-tango4.c +++ b/drivers/clk/clk-tango4.c @@ -54,13 +54,13 @@ static void __init tango4_clkgen_setup(struct device_node *np) const char *parent = of_clk_get_parent_name(np, 0); if (!base) - panic("%s: invalid address\n", np->name); + panic("%pOFn: invalid address\n", np); if (readl(base + CPUCLK_DIV) & DIV_BYPASS) - panic("%s: unsupported cpuclk setup\n", np->name); + panic("%pOFn: unsupported cpuclk setup\n", np); if (readl(base + SYSCLK_DIV) & DIV_BYPASS) - panic("%s: unsupported sysclk setup\n", np->name); + panic("%pOFn: unsupported sysclk setup\n", np); writel(0x100, base + CPUCLK_DIV); /* disable frequency ramping */ @@ -77,9 +77,9 @@ static void __init tango4_clkgen_setup(struct device_node *np) pp[3] = clk_register_fixed_factor(NULL, "sdio_clk", "cd6", 0, 1, 2); if (IS_ERR(pp[0]) || IS_ERR(pp[1]) || IS_ERR(pp[2]) || IS_ERR(pp[3])) - panic("%s: clk registration failed\n", np->name); + panic("%pOFn: clk registration failed\n", np); if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data)) - panic("%s: clk provider registration failed\n", np->name); + panic("%pOFn: clk provider registration failed\n", np); } CLK_OF_DECLARE(tango4_clkgen, "sigma,tango4-clkgen", tango4_clkgen_setup); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index d31055ae6ec6..af011974d4ec 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -924,6 +924,101 @@ static int clk_core_enable_lock(struct clk_core *core) } /** + * clk_gate_restore_context - restore context for poweroff + * @hw: the clk_hw pointer of clock whose state is to be restored + * + * The clock gate restore context function enables or disables + * the gate clocks based on the enable_count. This is done in cases + * where the clock context is lost and based on the enable_count + * the clock either needs to be enabled/disabled. This + * helps restore the state of gate clocks. + */ +void clk_gate_restore_context(struct clk_hw *hw) +{ + struct clk_core *core = hw->core; + + if (core->enable_count) + core->ops->enable(hw); + else + core->ops->disable(hw); +} +EXPORT_SYMBOL_GPL(clk_gate_restore_context); + +static int clk_core_save_context(struct clk_core *core) +{ + struct clk_core *child; + int ret = 0; + + hlist_for_each_entry(child, &core->children, child_node) { + ret = clk_core_save_context(child); + if (ret < 0) + return ret; + } + + if (core->ops && core->ops->save_context) + ret = core->ops->save_context(core->hw); + + return ret; +} + +static void clk_core_restore_context(struct clk_core *core) +{ + struct clk_core *child; + + if (core->ops && core->ops->restore_context) + core->ops->restore_context(core->hw); + + hlist_for_each_entry(child, &core->children, child_node) + clk_core_restore_context(child); +} + +/** + * clk_save_context - save clock context for poweroff + * + * Saves the context of the clock register for powerstates in which the + * contents of the registers will be lost. Occurs deep within the suspend + * code. Returns 0 on success. + */ +int clk_save_context(void) +{ + struct clk_core *clk; + int ret; + + hlist_for_each_entry(clk, &clk_root_list, child_node) { + ret = clk_core_save_context(clk); + if (ret < 0) + return ret; + } + + hlist_for_each_entry(clk, &clk_orphan_list, child_node) { + ret = clk_core_save_context(clk); + if (ret < 0) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(clk_save_context); + +/** + * clk_restore_context - restore clock context after poweroff + * + * Restore the saved clock context upon resume. + * + */ +void clk_restore_context(void) +{ + struct clk_core *core; + + hlist_for_each_entry(core, &clk_root_list, child_node) + clk_core_restore_context(core); + + hlist_for_each_entry(core, &clk_orphan_list, child_node) + clk_core_restore_context(core); +} +EXPORT_SYMBOL_GPL(clk_restore_context); + +/** * clk_enable - ungate a clock * @clk: the clk being ungated * diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c index fffbed5e263b..5b69e24a224f 100644 --- a/drivers/clk/davinci/psc.c +++ b/drivers/clk/davinci/psc.c @@ -303,24 +303,6 @@ static int davinci_lpsc_clk_reset(struct clk *clk, bool reset) return 0; } -/* - * REVISIT: These exported functions can be removed after a non-DT lookup is - * added to the reset controller framework and the davinci-rproc driver is - * updated to use the generic reset controller framework. - */ - -int davinci_clk_reset_assert(struct clk *clk) -{ - return davinci_lpsc_clk_reset(clk, true); -} -EXPORT_SYMBOL(davinci_clk_reset_assert); - -int davinci_clk_reset_deassert(struct clk *clk) -{ - return davinci_lpsc_clk_reset(clk, false); -} -EXPORT_SYMBOL(davinci_clk_reset_deassert); - static int davinci_psc_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) { diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig index becdb1dd21b5..30fad7ab0d88 100644 --- a/drivers/clk/hisilicon/Kconfig +++ b/drivers/clk/hisilicon/Kconfig @@ -21,6 +21,13 @@ config COMMON_CLK_HI3660 help Build the clock driver for hi3660. +config COMMON_CLK_HI3670 + bool "Hi3670 Clock Driver" + depends on ARCH_HISI || COMPILE_TEST + default ARCH_HISI + help + Build the clock driver for hi3670. + config COMMON_CLK_HI3798CV200 tristate "Hi3798CV200 Clock Driver" depends on ARCH_HISI || COMPILE_TEST diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile index 2a714c0f9657..b2441b99f3d5 100644 --- a/drivers/clk/hisilicon/Makefile +++ b/drivers/clk/hisilicon/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o obj-$(CONFIG_COMMON_CLK_HI3516CV300) += crg-hi3516cv300.o obj-$(CONFIG_COMMON_CLK_HI3519) += clk-hi3519.o obj-$(CONFIG_COMMON_CLK_HI3660) += clk-hi3660.o +obj-$(CONFIG_COMMON_CLK_HI3670) += clk-hi3670.o obj-$(CONFIG_COMMON_CLK_HI3798CV200) += crg-hi3798cv200.o obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o obj-$(CONFIG_RESET_HISI) += reset.o diff --git a/drivers/clk/hisilicon/clk-hi3670.c b/drivers/clk/hisilicon/clk-hi3670.c new file mode 100644 index 000000000000..fd8c837a6ea3 --- /dev/null +++ b/drivers/clk/hisilicon/clk-hi3670.c @@ -0,0 +1,1016 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2001-2021, Huawei Tech. Co., Ltd. + * Author: chenjun <chenjun14@huawei.com> + * + * Copyright (c) 2018, Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <dt-bindings/clock/hi3670-clock.h> +#include <linux/clk-provider.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include "clk.h" + +static const struct hisi_fixed_rate_clock hi3670_fixed_rate_clks[] = { + { HI3670_CLKIN_SYS, "clkin_sys", NULL, 0, 19200000, }, + { HI3670_CLKIN_REF, "clkin_ref", NULL, 0, 32764, }, + { HI3670_CLK_FLL_SRC, "clk_fll_src", NULL, 0, 134400000, }, + { HI3670_CLK_PPLL0, "clk_ppll0", NULL, 0, 1660000000, }, + { HI3670_CLK_PPLL1, "clk_ppll1", NULL, 0, 1866000000, }, + { HI3670_CLK_PPLL2, "clk_ppll2", NULL, 0, 1920000000, }, + { HI3670_CLK_PPLL3, "clk_ppll3", NULL, 0, 1200000000, }, + { HI3670_CLK_PPLL4, "clk_ppll4", NULL, 0, 900000000, }, + { HI3670_CLK_PPLL6, "clk_ppll6", NULL, 0, 393216000, }, + { HI3670_CLK_PPLL7, "clk_ppll7", NULL, 0, 1008000000, }, + { HI3670_CLK_PPLL_PCIE, "clk_ppll_pcie", NULL, 0, 100000000, }, + { HI3670_CLK_PCIEPLL_REV, "clk_pciepll_rev", NULL, 0, 100000000, }, + { HI3670_CLK_SCPLL, "clk_scpll", NULL, 0, 245760000, }, + { HI3670_PCLK, "pclk", NULL, 0, 20000000, }, + { HI3670_CLK_UART0_DBG, "clk_uart0_dbg", NULL, 0, 19200000, }, + { HI3670_CLK_UART6, "clk_uart6", NULL, 0, 19200000, }, + { HI3670_OSC32K, "osc32k", NULL, 0, 32764, }, + { HI3670_OSC19M, "osc19m", NULL, 0, 19200000, }, + { HI3670_CLK_480M, "clk_480m", NULL, 0, 480000000, }, + { HI3670_CLK_INVALID, "clk_invalid", NULL, 0, 10000000, }, +}; + +/* crgctrl */ +static const struct hisi_fixed_factor_clock hi3670_crg_fixed_factor_clks[] = { + { HI3670_CLK_DIV_SYSBUS, "clk_div_sysbus", "clk_mux_sysbus", + 1, 7, 0, }, + { HI3670_CLK_FACTOR_MMC, "clk_factor_mmc", "clkin_sys", + 1, 6, 0, }, + { HI3670_CLK_SD_SYS, "clk_sd_sys", "clk_sd_sys_gt", + 1, 6, 0, }, + { HI3670_CLK_SDIO_SYS, "clk_sdio_sys", "clk_sdio_sys_gt", + 1, 6, 0, }, + { HI3670_CLK_DIV_A53HPM, "clk_div_a53hpm", "clk_a53hpm_andgt", + 1, 4, 0, }, + { HI3670_CLK_DIV_320M, "clk_div_320m", "clk_320m_pll_gt", + 1, 5, 0, }, + { HI3670_PCLK_GATE_UART0, "pclk_gate_uart0", "clk_mux_uartl", + 1, 1, 0, }, + { HI3670_CLK_FACTOR_UART0, "clk_factor_uart0", "clk_mux_uart0", + 1, 1, 0, }, + { HI3670_CLK_FACTOR_USB3PHY_PLL, "clk_factor_usb3phy_pll", "clk_ppll0", + 1, 60, 0, }, + { HI3670_CLK_GATE_ABB_USB, "clk_gate_abb_usb", "clk_gate_usb_tcxo_en", + 1, 1, 0, }, + { HI3670_CLK_GATE_UFSPHY_REF, "clk_gate_ufsphy_ref", "clkin_sys", + 1, 1, 0, }, + { HI3670_ICS_VOLT_HIGH, "ics_volt_high", "peri_volt_hold", + 1, 1, 0, }, + { HI3670_ICS_VOLT_MIDDLE, "ics_volt_middle", "peri_volt_middle", + 1, 1, 0, }, + { HI3670_VENC_VOLT_HOLD, "venc_volt_hold", "peri_volt_hold", + 1, 1, 0, }, + { HI3670_VDEC_VOLT_HOLD, "vdec_volt_hold", "peri_volt_hold", + 1, 1, 0, }, + { HI3670_EDC_VOLT_HOLD, "edc_volt_hold", "peri_volt_hold", + 1, 1, 0, }, + { HI3670_CLK_ISP_SNCLK_FAC, "clk_isp_snclk_fac", "clk_isp_snclk_angt", + 1, 10, 0, }, + { HI3670_CLK_FACTOR_RXDPHY, "clk_factor_rxdphy", "clk_andgt_rxdphy", + 1, 6, 0, }, +}; + +static const struct hisi_gate_clock hi3670_crgctrl_gate_sep_clks[] = { + { HI3670_PPLL1_EN_ACPU, "ppll1_en_acpu", "clk_ppll1", + CLK_SET_RATE_PARENT, 0x0, 0, 0, }, + { HI3670_PPLL2_EN_ACPU, "ppll2_en_acpu", "clk_ppll2", + CLK_SET_RATE_PARENT, 0x0, 3, 0, }, + { HI3670_PPLL3_EN_ACPU, "ppll3_en_acpu", "clk_ppll3", + CLK_SET_RATE_PARENT, 0x0, 27, 0, }, + { HI3670_PPLL1_GT_CPU, "ppll1_gt_cpu", "clk_ppll1", + CLK_SET_RATE_PARENT, 0x460, 16, 0, }, + { HI3670_PPLL2_GT_CPU, "ppll2_gt_cpu", "clk_ppll2", + CLK_SET_RATE_PARENT, 0x460, 18, 0, }, + { HI3670_PPLL3_GT_CPU, "ppll3_gt_cpu", "clk_ppll3", + CLK_SET_RATE_PARENT, 0x460, 20, 0, }, + { HI3670_CLK_GATE_PPLL2_MEDIA, "clk_gate_ppll2_media", "clk_ppll2", + CLK_SET_RATE_PARENT, 0x410, 27, 0, }, + { HI3670_CLK_GATE_PPLL3_MEDIA, "clk_gate_ppll3_media", "clk_ppll3", + CLK_SET_RATE_PARENT, 0x410, 28, 0, }, + { HI3670_CLK_GATE_PPLL4_MEDIA, "clk_gate_ppll4_media", "clk_ppll4", + CLK_SET_RATE_PARENT, 0x410, 26, 0, }, + { HI3670_CLK_GATE_PPLL6_MEDIA, "clk_gate_ppll6_media", "clk_ppll6", + CLK_SET_RATE_PARENT, 0x410, 30, 0, }, + { HI3670_CLK_GATE_PPLL7_MEDIA, "clk_gate_ppll7_media", "clk_ppll7", + CLK_SET_RATE_PARENT, 0x410, 29, 0, }, + { HI3670_PCLK_GPIO0, "pclk_gpio0", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 0, 0, }, + { HI3670_PCLK_GPIO1, "pclk_gpio1", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 1, 0, }, + { HI3670_PCLK_GPIO2, "pclk_gpio2", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 2, 0, }, + { HI3670_PCLK_GPIO3, "pclk_gpio3", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 3, 0, }, + { HI3670_PCLK_GPIO4, "pclk_gpio4", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 4, 0, }, + { HI3670_PCLK_GPIO5, "pclk_gpio5", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 5, 0, }, + { HI3670_PCLK_GPIO6, "pclk_gpio6", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 6, 0, }, + { HI3670_PCLK_GPIO7, "pclk_gpio7", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 7, 0, }, + { HI3670_PCLK_GPIO8, "pclk_gpio8", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 8, 0, }, + { HI3670_PCLK_GPIO9, "pclk_gpio9", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 9, 0, }, + { HI3670_PCLK_GPIO10, "pclk_gpio10", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 10, 0, }, + { HI3670_PCLK_GPIO11, "pclk_gpio11", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 11, 0, }, + { HI3670_PCLK_GPIO12, "pclk_gpio12", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 12, 0, }, + { HI3670_PCLK_GPIO13, "pclk_gpio13", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 13, 0, }, + { HI3670_PCLK_GPIO14, "pclk_gpio14", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 14, 0, }, + { HI3670_PCLK_GPIO15, "pclk_gpio15", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 15, 0, }, + { HI3670_PCLK_GPIO16, "pclk_gpio16", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 16, 0, }, + { HI3670_PCLK_GPIO17, "pclk_gpio17", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 17, 0, }, + { HI3670_PCLK_GPIO20, "pclk_gpio20", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 20, 0, }, + { HI3670_PCLK_GPIO21, "pclk_gpio21", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x10, 21, 0, }, + { HI3670_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x50, 28, 0, }, + { HI3670_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus", + CLK_SET_RATE_PARENT, 0x50, 29, 0, }, + { HI3670_HCLK_GATE_USB3OTG, "hclk_gate_usb3otg", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0x0, 25, 0, }, + { HI3670_ACLK_GATE_USB3DVFS, "aclk_gate_usb3dvfs", "autodiv_emmc0bus", + CLK_SET_RATE_PARENT, 0x40, 1, 0, }, + { HI3670_HCLK_GATE_SDIO, "hclk_gate_sdio", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0x0, 21, 0, }, + { HI3670_PCLK_GATE_PCIE_SYS, "pclk_gate_pcie_sys", "clk_div_mmc1bus", + CLK_SET_RATE_PARENT, 0x420, 7, 0, }, + { HI3670_PCLK_GATE_PCIE_PHY, "pclk_gate_pcie_phy", "pclk_gate_mmc1_pcie", + CLK_SET_RATE_PARENT, 0x420, 9, 0, }, + { HI3670_PCLK_GATE_MMC1_PCIE, "pclk_gate_mmc1_pcie", "pclk_div_mmc1_pcie", + CLK_SET_RATE_PARENT, 0x30, 12, 0, }, + { HI3670_PCLK_GATE_MMC0_IOC, "pclk_gate_mmc0_ioc", "clk_div_mmc0bus", + CLK_SET_RATE_PARENT, 0x40, 13, 0, }, + { HI3670_PCLK_GATE_MMC1_IOC, "pclk_gate_mmc1_ioc", "clk_div_mmc1bus", + CLK_SET_RATE_PARENT, 0x420, 21, 0, }, + { HI3670_CLK_GATE_DMAC, "clk_gate_dmac", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0x30, 1, 0, }, + { HI3670_CLK_GATE_VCODECBUS2DDR, "clk_gate_vcodecbus2ddr", "clk_div_vcodecbus", + CLK_SET_RATE_PARENT, 0x0, 5, 0, }, + { HI3670_CLK_CCI400_BYPASS, "clk_cci400_bypass", "clk_ddrc_freq", + CLK_SET_RATE_PARENT, 0x22C, 28, 0, }, + { HI3670_CLK_GATE_CCI400, "clk_gate_cci400", "clk_ddrc_freq", + CLK_SET_RATE_PARENT, 0x50, 14, 0, }, + { HI3670_CLK_GATE_SD, "clk_gate_sd", "clk_mux_sd_sys", + CLK_SET_RATE_PARENT, 0x40, 17, 0, }, + { HI3670_HCLK_GATE_SD, "hclk_gate_sd", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0x0, 30, 0, }, + { HI3670_CLK_GATE_SDIO, "clk_gate_sdio", "clk_mux_sdio_sys", + CLK_SET_RATE_PARENT, 0x40, 19, 0, }, + { HI3670_CLK_GATE_A57HPM, "clk_gate_a57hpm", "clk_div_a53hpm", + CLK_SET_RATE_PARENT, 0x050, 9, 0, }, + { HI3670_CLK_GATE_A53HPM, "clk_gate_a53hpm", "clk_div_a53hpm", + CLK_SET_RATE_PARENT, 0x050, 13, 0, }, + { HI3670_CLK_GATE_PA_A53, "clk_gate_pa_a53", "clk_div_a53hpm", + CLK_SET_RATE_PARENT, 0x480, 10, 0, }, + { HI3670_CLK_GATE_PA_A57, "clk_gate_pa_a57", "clk_div_a53hpm", + CLK_SET_RATE_PARENT, 0x480, 9, 0, }, + { HI3670_CLK_GATE_PA_G3D, "clk_gate_pa_g3d", "clk_div_a53hpm", + CLK_SET_RATE_PARENT, 0x480, 15, 0, }, + { HI3670_CLK_GATE_GPUHPM, "clk_gate_gpuhpm", "clk_div_a53hpm", + CLK_SET_RATE_PARENT, 0x050, 15, 0, }, + { HI3670_CLK_GATE_PERIHPM, "clk_gate_perihpm", "clk_div_a53hpm", + CLK_SET_RATE_PARENT, 0x050, 12, 0, }, + { HI3670_CLK_GATE_AOHPM, "clk_gate_aohpm", "clk_div_a53hpm", + CLK_SET_RATE_PARENT, 0x050, 11, 0, }, + { HI3670_CLK_GATE_UART1, "clk_gate_uart1", "clk_mux_uarth", + CLK_SET_RATE_PARENT, 0x20, 11, 0, }, + { HI3670_CLK_GATE_UART4, "clk_gate_uart4", "clk_mux_uarth", + CLK_SET_RATE_PARENT, 0x20, 14, 0, }, + { HI3670_PCLK_GATE_UART1, "pclk_gate_uart1", "clk_mux_uarth", + CLK_SET_RATE_PARENT, 0x20, 11, 0, }, + { HI3670_PCLK_GATE_UART4, "pclk_gate_uart4", "clk_mux_uarth", + CLK_SET_RATE_PARENT, 0x20, 14, 0, }, + { HI3670_CLK_GATE_UART2, "clk_gate_uart2", "clk_mux_uartl", + CLK_SET_RATE_PARENT, 0x20, 12, 0, }, + { HI3670_CLK_GATE_UART5, "clk_gate_uart5", "clk_mux_uartl", + CLK_SET_RATE_PARENT, 0x20, 15, 0, }, + { HI3670_PCLK_GATE_UART2, "pclk_gate_uart2", "clk_mux_uartl", + CLK_SET_RATE_PARENT, 0x20, 12, 0, }, + { HI3670_PCLK_GATE_UART5, "pclk_gate_uart5", "clk_mux_uartl", + CLK_SET_RATE_PARENT, 0x20, 15, 0, }, + { HI3670_CLK_GATE_UART0, "clk_gate_uart0", "clk_mux_uart0", + CLK_SET_RATE_PARENT, 0x20, 10, 0, }, + { HI3670_CLK_GATE_I2C3, "clk_gate_i2c3", "clk_mux_i2c", + CLK_SET_RATE_PARENT, 0x20, 7, 0, }, + { HI3670_CLK_GATE_I2C4, "clk_gate_i2c4", "clk_mux_i2c", + CLK_SET_RATE_PARENT, 0x20, 27, 0, }, + { HI3670_CLK_GATE_I2C7, "clk_gate_i2c7", "clk_mux_i2c", + CLK_SET_RATE_PARENT, 0x10, 31, 0, }, + { HI3670_PCLK_GATE_I2C3, "pclk_gate_i2c3", "clk_mux_i2c", + CLK_SET_RATE_PARENT, 0x20, 7, 0, }, + { HI3670_PCLK_GATE_I2C4, "pclk_gate_i2c4", "clk_mux_i2c", + CLK_SET_RATE_PARENT, 0x20, 27, 0, }, + { HI3670_PCLK_GATE_I2C7, "pclk_gate_i2c7", "clk_mux_i2c", + CLK_SET_RATE_PARENT, 0x10, 31, 0, }, + { HI3670_CLK_GATE_SPI1, "clk_gate_spi1", "clk_mux_spi", + CLK_SET_RATE_PARENT, 0x20, 9, 0, }, + { HI3670_CLK_GATE_SPI4, "clk_gate_spi4", "clk_mux_spi", + CLK_SET_RATE_PARENT, 0x40, 4, 0, }, + { HI3670_PCLK_GATE_SPI1, "pclk_gate_spi1", "clk_mux_spi", + CLK_SET_RATE_PARENT, 0x20, 9, 0, }, + { HI3670_PCLK_GATE_SPI4, "pclk_gate_spi4", "clk_mux_spi", + CLK_SET_RATE_PARENT, 0x40, 4, 0, }, + { HI3670_CLK_GATE_USB3OTG_REF, "clk_gate_usb3otg_ref", "clkin_sys", + CLK_SET_RATE_PARENT, 0x40, 0, 0, }, + { HI3670_CLK_GATE_USB2PHY_REF, "clk_gate_usb2phy_ref", "clkin_sys", + CLK_SET_RATE_PARENT, 0x410, 19, 0, }, + { HI3670_CLK_GATE_PCIEAUX, "clk_gate_pcieaux", "clkin_sys", + CLK_SET_RATE_PARENT, 0x420, 8, 0, }, + { HI3670_ACLK_GATE_PCIE, "aclk_gate_pcie", "clk_gate_mmc1_pcieaxi", + CLK_SET_RATE_PARENT, 0x420, 5, 0, }, + { HI3670_CLK_GATE_MMC1_PCIEAXI, "clk_gate_mmc1_pcieaxi", "clk_div_pcieaxi", + CLK_SET_RATE_PARENT, 0x050, 4, 0, }, + { HI3670_CLK_GATE_PCIEPHY_REF, "clk_gate_pciephy_ref", "clk_ppll_pcie", + CLK_SET_RATE_PARENT, 0x470, 14, 0, }, + { HI3670_CLK_GATE_PCIE_DEBOUNCE, "clk_gate_pcie_debounce", "clk_ppll_pcie", + CLK_SET_RATE_PARENT, 0x470, 12, 0, }, + { HI3670_CLK_GATE_PCIEIO, "clk_gate_pcieio", "clk_ppll_pcie", + CLK_SET_RATE_PARENT, 0x470, 13, 0, }, + { HI3670_CLK_GATE_PCIE_HP, "clk_gate_pcie_hp", "clk_ppll_pcie", + CLK_SET_RATE_PARENT, 0x470, 15, 0, }, + { HI3670_CLK_GATE_AO_ASP, "clk_gate_ao_asp", "clk_div_ao_asp", + CLK_SET_RATE_PARENT, 0x0, 26, 0, }, + { HI3670_PCLK_GATE_PCTRL, "pclk_gate_pctrl", "clk_div_ptp", + CLK_SET_RATE_PARENT, 0x20, 31, 0, }, + { HI3670_CLK_CSI_TRANS_GT, "clk_csi_trans_gt", "clk_div_csi_trans", + CLK_SET_RATE_PARENT, 0x30, 24, 0, }, + { HI3670_CLK_DSI_TRANS_GT, "clk_dsi_trans_gt", "clk_div_dsi_trans", + CLK_SET_RATE_PARENT, 0x30, 25, 0, }, + { HI3670_CLK_GATE_PWM, "clk_gate_pwm", "clk_div_ptp", + CLK_SET_RATE_PARENT, 0x20, 0, 0, }, + { HI3670_ABB_AUDIO_EN0, "abb_audio_en0", "clk_gate_abb_192", + CLK_SET_RATE_PARENT, 0x30, 8, 0, }, + { HI3670_ABB_AUDIO_EN1, "abb_audio_en1", "clk_gate_abb_192", + CLK_SET_RATE_PARENT, 0x30, 9, 0, }, + { HI3670_ABB_AUDIO_GT_EN0, "abb_audio_gt_en0", "abb_audio_en0", + CLK_SET_RATE_PARENT, 0x30, 19, 0, }, + { HI3670_ABB_AUDIO_GT_EN1, "abb_audio_gt_en1", "abb_audio_en1", + CLK_SET_RATE_PARENT, 0x40, 20, 0, }, + { HI3670_CLK_GATE_DP_AUDIO_PLL_AO, "clk_gate_dp_audio_pll_ao", "clkdiv_dp_audio_pll_ao", + CLK_SET_RATE_PARENT, 0x00, 13, 0, }, + { HI3670_PERI_VOLT_HOLD, "peri_volt_hold", "clkin_sys", + CLK_SET_RATE_PARENT, 0, 1, 0, }, + { HI3670_PERI_VOLT_MIDDLE, "peri_volt_middle", "clkin_sys", + CLK_SET_RATE_PARENT, 0, 1, 0, }, + { HI3670_CLK_GATE_ISP_SNCLK0, "clk_gate_isp_snclk0", "clk_isp_snclk_mux0", + CLK_SET_RATE_PARENT, 0x50, 16, 0, }, + { HI3670_CLK_GATE_ISP_SNCLK1, "clk_gate_isp_snclk1", "clk_isp_snclk_mux1", + CLK_SET_RATE_PARENT, 0x50, 17, 0, }, + { HI3670_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2", "clk_isp_snclk_mux2", + CLK_SET_RATE_PARENT, 0x50, 18, 0, }, + { HI3670_CLK_GATE_RXDPHY0_CFG, "clk_gate_rxdphy0_cfg", "clk_mux_rxdphy_cfg", + CLK_SET_RATE_PARENT, 0x030, 20, 0, }, + { HI3670_CLK_GATE_RXDPHY1_CFG, "clk_gate_rxdphy1_cfg", "clk_mux_rxdphy_cfg", + CLK_SET_RATE_PARENT, 0x030, 21, 0, }, + { HI3670_CLK_GATE_RXDPHY2_CFG, "clk_gate_rxdphy2_cfg", "clk_mux_rxdphy_cfg", + CLK_SET_RATE_PARENT, 0x030, 22, 0, }, + { HI3670_CLK_GATE_TXDPHY0_CFG, "clk_gate_txdphy0_cfg", "clkin_sys", + CLK_SET_RATE_PARENT, 0x030, 28, 0, }, + { HI3670_CLK_GATE_TXDPHY0_REF, "clk_gate_txdphy0_ref", "clkin_sys", + CLK_SET_RATE_PARENT, 0x030, 29, 0, }, + { HI3670_CLK_GATE_TXDPHY1_CFG, "clk_gate_txdphy1_cfg", "clkin_sys", + CLK_SET_RATE_PARENT, 0x030, 30, 0, }, + { HI3670_CLK_GATE_TXDPHY1_REF, "clk_gate_txdphy1_ref", "clkin_sys", + CLK_SET_RATE_PARENT, 0x030, 31, 0, }, + { HI3670_CLK_GATE_MEDIA_TCXO, "clk_gate_media_tcxo", "clkin_sys", + CLK_SET_RATE_PARENT, 0x40, 6, 0, }, +}; + +static const struct hisi_gate_clock hi3670_crgctrl_gate_clks[] = { + { HI3670_AUTODIV_SYSBUS, "autodiv_sysbus", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0x404, 5, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_AUTODIV_EMMC0BUS, "autodiv_emmc0bus", "autodiv_sysbus", + CLK_SET_RATE_PARENT, 0x404, 1, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_PCLK_ANDGT_MMC1_PCIE, "pclk_andgt_mmc1_pcie", "clk_div_320m", + CLK_SET_RATE_PARENT, 0xf8, 13, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_GATE_VCODECBUS_GT, "clk_gate_vcodecbus_gt", "clk_mux_vcodecbus", + CLK_SET_RATE_PARENT, 0x0F0, 8, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_SD, "clk_andgt_sd", "clk_mux_sd_pll", + CLK_SET_RATE_PARENT, 0xF4, 3, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_SD_SYS_GT, "clk_sd_sys_gt", "clkin_sys", + CLK_SET_RATE_PARENT, 0xF4, 5, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_SDIO, "clk_andgt_sdio", "clk_mux_sdio_pll", + CLK_SET_RATE_PARENT, 0xF4, 8, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_SDIO_SYS_GT, "clk_sdio_sys_gt", "clkin_sys", + CLK_SET_RATE_PARENT, 0xF4, 6, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_A53HPM_ANDGT, "clk_a53hpm_andgt", "clk_mux_a53hpm", + CLK_SET_RATE_PARENT, 0x0F4, 7, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_320M_PLL_GT, "clk_320m_pll_gt", "clk_mux_320m", + CLK_SET_RATE_PARENT, 0xF8, 10, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_UARTH, "clk_andgt_uarth", "clk_div_320m", + CLK_SET_RATE_PARENT, 0xF4, 11, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_UARTL, "clk_andgt_uartl", "clk_div_320m", + CLK_SET_RATE_PARENT, 0xF4, 10, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_UART0, "clk_andgt_uart0", "clk_div_320m", + CLK_SET_RATE_PARENT, 0xF4, 9, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_SPI, "clk_andgt_spi", "clk_div_320m", + CLK_SET_RATE_PARENT, 0xF4, 13, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_PCIEAXI, "clk_andgt_pcieaxi", "clk_mux_pcieaxi", + CLK_SET_RATE_PARENT, 0xfc, 15, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_AO_ASP_GT, "clk_div_ao_asp_gt", "clk_mux_ao_asp", + CLK_SET_RATE_PARENT, 0xF4, 4, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_GATE_CSI_TRANS, "clk_gate_csi_trans", "clk_ppll2", + CLK_SET_RATE_PARENT, 0xF4, 14, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_GATE_DSI_TRANS, "clk_gate_dsi_trans", "clk_ppll2", + CLK_SET_RATE_PARENT, 0xF4, 1, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_PTP, "clk_andgt_ptp", "clk_div_320m", + CLK_SET_RATE_PARENT, 0xF8, 5, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_OUT0, "clk_andgt_out0", "clk_ppll0", + CLK_SET_RATE_PARENT, 0xF0, 10, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_OUT1, "clk_andgt_out1", "clk_ppll0", + CLK_SET_RATE_PARENT, 0xF0, 11, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLKGT_DP_AUDIO_PLL_AO, "clkgt_dp_audio_pll_ao", "clk_ppll6", + CLK_SET_RATE_PARENT, 0xF8, 15, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_VDEC, "clk_andgt_vdec", "clk_mux_vdec", + CLK_SET_RATE_PARENT, 0xF0, 13, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_VENC, "clk_andgt_venc", "clk_mux_venc", + CLK_SET_RATE_PARENT, 0xF0, 9, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ISP_SNCLK_ANGT, "clk_isp_snclk_angt", "clk_div_a53hpm", + CLK_SET_RATE_PARENT, 0x108, 2, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_RXDPHY, "clk_andgt_rxdphy", "clk_div_a53hpm", + CLK_SET_RATE_PARENT, 0x0F0, 12, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_ICS, "clk_andgt_ics", "clk_mux_ics", + CLK_SET_RATE_PARENT, 0xf0, 14, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_AUTODIV_DMABUS, "autodiv_dmabus", "autodiv_sysbus", + CLK_SET_RATE_PARENT, 0x404, 3, CLK_GATE_HIWORD_MASK, 0, }, +}; + +static const char *const +clk_mux_sysbus_p[] = { "clk_ppll1", "clk_ppll0", }; +static const char *const +clk_mux_vcodecbus_p[] = { "clk_invalid", "clk_ppll4", "clk_ppll0", + "clk_invalid", "clk_ppll2", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_ppll3", + "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", }; +static const char *const +clk_mux_sd_sys_p[] = { "clk_sd_sys", "clk_div_sd", }; +static const char *const +clk_mux_sd_pll_p[] = { "clk_ppll0", "clk_ppll3", "clk_ppll2", "clk_ppll2", }; +static const char *const +clk_mux_sdio_sys_p[] = { "clk_sdio_sys", "clk_div_sdio", }; +static const char *const +clk_mux_sdio_pll_p[] = { "clk_ppll0", "clk_ppll3", "clk_ppll2", "clk_ppll2", }; +static const char *const +clk_mux_a53hpm_p[] = { "clk_ppll0", "clk_ppll2", }; +static const char *const +clk_mux_320m_p[] = { "clk_ppll2", "clk_ppll0", }; +static const char *const +clk_mux_uarth_p[] = { "clkin_sys", "clk_div_uarth", }; +static const char *const +clk_mux_uartl_p[] = { "clkin_sys", "clk_div_uartl", }; +static const char *const +clk_mux_uart0_p[] = { "clkin_sys", "clk_div_uart0", }; +static const char *const +clk_mux_i2c_p[] = { "clkin_sys", "clk_div_i2c", }; +static const char *const +clk_mux_spi_p[] = { "clkin_sys", "clk_div_spi", }; +static const char *const +clk_mux_pcieaxi_p[] = { "clkin_sys", "clk_ppll0", }; +static const char *const +clk_mux_ao_asp_p[] = { "clk_ppll2", "clk_ppll3", }; +static const char *const +clk_mux_vdec_p[] = { "clk_invalid", "clk_ppll4", "clk_ppll0", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", }; +static const char *const +clk_mux_venc_p[] = { "clk_invalid", "clk_ppll4", "clk_ppll0", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", }; +static const char *const +clk_isp_snclk_mux0_p[] = { "clkin_sys", "clk_isp_snclk_div0", }; +static const char *const +clk_isp_snclk_mux1_p[] = { "clkin_sys", "clk_isp_snclk_div1", }; +static const char *const +clk_isp_snclk_mux2_p[] = { "clkin_sys", "clk_isp_snclk_div2", }; +static const char *const +clk_mux_rxdphy_cfg_p[] = { "clk_factor_rxdphy", "clkin_sys", }; +static const char *const +clk_mux_ics_p[] = { "clk_invalid", "clk_ppll4", "clk_ppll0", "clk_invalid", + "clk_ppll2", "clk_invalid", "clk_invalid", "clk_invalid", + "clk_ppll3", "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", }; + +static const struct hisi_mux_clock hi3670_crgctrl_mux_clks[] = { + { HI3670_CLK_MUX_SYSBUS, "clk_mux_sysbus", clk_mux_sysbus_p, + ARRAY_SIZE(clk_mux_sysbus_p), CLK_SET_RATE_PARENT, + 0xAC, 0, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_VCODECBUS, "clk_mux_vcodecbus", clk_mux_vcodecbus_p, + ARRAY_SIZE(clk_mux_vcodecbus_p), CLK_SET_RATE_PARENT, + 0x0C8, 0, 4, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_SD_SYS, "clk_mux_sd_sys", clk_mux_sd_sys_p, + ARRAY_SIZE(clk_mux_sd_sys_p), CLK_SET_RATE_PARENT, + 0x0B8, 6, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_SD_PLL, "clk_mux_sd_pll", clk_mux_sd_pll_p, + ARRAY_SIZE(clk_mux_sd_pll_p), CLK_SET_RATE_PARENT, + 0x0B8, 4, 2, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_SDIO_SYS, "clk_mux_sdio_sys", clk_mux_sdio_sys_p, + ARRAY_SIZE(clk_mux_sdio_sys_p), CLK_SET_RATE_PARENT, + 0x0C0, 6, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_SDIO_PLL, "clk_mux_sdio_pll", clk_mux_sdio_pll_p, + ARRAY_SIZE(clk_mux_sdio_pll_p), CLK_SET_RATE_PARENT, + 0x0C0, 4, 2, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_A53HPM, "clk_mux_a53hpm", clk_mux_a53hpm_p, + ARRAY_SIZE(clk_mux_a53hpm_p), CLK_SET_RATE_PARENT, + 0x0D4, 9, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_320M, "clk_mux_320m", clk_mux_320m_p, + ARRAY_SIZE(clk_mux_320m_p), CLK_SET_RATE_PARENT, + 0x100, 0, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_UARTH, "clk_mux_uarth", clk_mux_uarth_p, + ARRAY_SIZE(clk_mux_uarth_p), CLK_SET_RATE_PARENT, + 0xAC, 4, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_UARTL, "clk_mux_uartl", clk_mux_uartl_p, + ARRAY_SIZE(clk_mux_uartl_p), CLK_SET_RATE_PARENT, + 0xAC, 3, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_UART0, "clk_mux_uart0", clk_mux_uart0_p, + ARRAY_SIZE(clk_mux_uart0_p), CLK_SET_RATE_PARENT, + 0xAC, 2, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_I2C, "clk_mux_i2c", clk_mux_i2c_p, + ARRAY_SIZE(clk_mux_i2c_p), CLK_SET_RATE_PARENT, + 0xAC, 13, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_SPI, "clk_mux_spi", clk_mux_spi_p, + ARRAY_SIZE(clk_mux_spi_p), CLK_SET_RATE_PARENT, + 0xAC, 8, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_PCIEAXI, "clk_mux_pcieaxi", clk_mux_pcieaxi_p, + ARRAY_SIZE(clk_mux_pcieaxi_p), CLK_SET_RATE_PARENT, + 0xb4, 5, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_AO_ASP, "clk_mux_ao_asp", clk_mux_ao_asp_p, + ARRAY_SIZE(clk_mux_ao_asp_p), CLK_SET_RATE_PARENT, + 0x100, 6, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_VDEC, "clk_mux_vdec", clk_mux_vdec_p, + ARRAY_SIZE(clk_mux_vdec_p), CLK_SET_RATE_PARENT, + 0xC8, 8, 4, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_VENC, "clk_mux_venc", clk_mux_venc_p, + ARRAY_SIZE(clk_mux_venc_p), CLK_SET_RATE_PARENT, + 0xC8, 4, 4, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_ISP_SNCLK_MUX0, "clk_isp_snclk_mux0", clk_isp_snclk_mux0_p, + ARRAY_SIZE(clk_isp_snclk_mux0_p), CLK_SET_RATE_PARENT, + 0x108, 3, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_ISP_SNCLK_MUX1, "clk_isp_snclk_mux1", clk_isp_snclk_mux1_p, + ARRAY_SIZE(clk_isp_snclk_mux1_p), CLK_SET_RATE_PARENT, + 0x10C, 13, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_ISP_SNCLK_MUX2, "clk_isp_snclk_mux2", clk_isp_snclk_mux2_p, + ARRAY_SIZE(clk_isp_snclk_mux2_p), CLK_SET_RATE_PARENT, + 0x10C, 10, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_RXDPHY_CFG, "clk_mux_rxdphy_cfg", clk_mux_rxdphy_cfg_p, + ARRAY_SIZE(clk_mux_rxdphy_cfg_p), CLK_SET_RATE_PARENT, + 0x0C4, 8, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_ICS, "clk_mux_ics", clk_mux_ics_p, + ARRAY_SIZE(clk_mux_ics_p), CLK_SET_RATE_PARENT, + 0xc8, 12, 4, CLK_MUX_HIWORD_MASK, }, +}; + +static const struct hisi_divider_clock hi3670_crgctrl_divider_clks[] = { + { HI3670_CLK_DIV_CFGBUS, "clk_div_cfgbus", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0xEC, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_MMC0BUS, "clk_div_mmc0bus", "autodiv_emmc0bus", + CLK_SET_RATE_PARENT, 0x0EC, 2, 1, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_MMC1BUS, "clk_div_mmc1bus", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0x0EC, 3, 1, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_PCLK_DIV_MMC1_PCIE, "pclk_div_mmc1_pcie", "pclk_andgt_mmc1_pcie", + CLK_SET_RATE_PARENT, 0xb4, 6, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_VCODECBUS, "clk_div_vcodecbus", "clk_gate_vcodecbus_gt", + CLK_SET_RATE_PARENT, 0x0BC, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_SD, "clk_div_sd", "clk_andgt_sd", + CLK_SET_RATE_PARENT, 0xB8, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_SDIO, "clk_div_sdio", "clk_andgt_sdio", + CLK_SET_RATE_PARENT, 0xC0, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_UARTH, "clk_div_uarth", "clk_andgt_uarth", + CLK_SET_RATE_PARENT, 0xB0, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_UARTL, "clk_div_uartl", "clk_andgt_uartl", + CLK_SET_RATE_PARENT, 0xB0, 8, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_UART0, "clk_div_uart0", "clk_andgt_uart0", + CLK_SET_RATE_PARENT, 0xB0, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_I2C, "clk_div_i2c", "clk_div_320m", + CLK_SET_RATE_PARENT, 0xE8, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_SPI, "clk_div_spi", "clk_andgt_spi", + CLK_SET_RATE_PARENT, 0xC4, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_PCIEAXI, "clk_div_pcieaxi", "clk_andgt_pcieaxi", + CLK_SET_RATE_PARENT, 0xb4, 0, 5, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_AO_ASP, "clk_div_ao_asp", "clk_div_ao_asp_gt", + CLK_SET_RATE_PARENT, 0x108, 6, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_CSI_TRANS, "clk_div_csi_trans", "clk_gate_csi_trans", + CLK_SET_RATE_PARENT, 0xD4, 0, 5, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_DSI_TRANS, "clk_div_dsi_trans", "clk_gate_dsi_trans", + CLK_SET_RATE_PARENT, 0xD4, 10, 5, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_PTP, "clk_div_ptp", "clk_andgt_ptp", + CLK_SET_RATE_PARENT, 0xD8, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_CLKOUT0_PLL, "clk_div_clkout0_pll", "clk_andgt_out0", + CLK_SET_RATE_PARENT, 0xe0, 4, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_CLKOUT1_PLL, "clk_div_clkout1_pll", "clk_andgt_out1", + CLK_SET_RATE_PARENT, 0xe0, 10, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLKDIV_DP_AUDIO_PLL_AO, "clkdiv_dp_audio_pll_ao", "clkgt_dp_audio_pll_ao", + CLK_SET_RATE_PARENT, 0xBC, 11, 4, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_VDEC, "clk_div_vdec", "clk_andgt_vdec", + CLK_SET_RATE_PARENT, 0xC4, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_VENC, "clk_div_venc", "clk_andgt_venc", + CLK_SET_RATE_PARENT, 0xC0, 8, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_ISP_SNCLK_DIV0, "clk_isp_snclk_div0", "clk_isp_snclk_fac", + CLK_SET_RATE_PARENT, 0x108, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_ISP_SNCLK_DIV1, "clk_isp_snclk_div1", "clk_isp_snclk_fac", + CLK_SET_RATE_PARENT, 0x10C, 14, 2, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_ISP_SNCLK_DIV2, "clk_isp_snclk_div2", "clk_isp_snclk_fac", + CLK_SET_RATE_PARENT, 0x10C, 11, 2, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_ICS, "clk_div_ics", "clk_andgt_ics", + CLK_SET_RATE_PARENT, 0xE4, 9, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, +}; + +/* clk_pmuctrl */ +static const struct hisi_gate_clock hi3670_pmu_gate_clks[] = { + { HI3670_GATE_ABB_192, "clk_gate_abb_192", "clkin_sys", + CLK_SET_RATE_PARENT, (0x037 << 2), 0, 0, }, +}; + +/* clk_pctrl */ +static const struct hisi_gate_clock hi3670_pctrl_gate_clks[] = { + { HI3670_GATE_UFS_TCXO_EN, "clk_gate_ufs_tcxo_en", "clk_gate_abb_192", + CLK_SET_RATE_PARENT, 0x10, 0, CLK_GATE_HIWORD_MASK, }, + { HI3670_GATE_USB_TCXO_EN, "clk_gate_usb_tcxo_en", "clk_gate_abb_192", + CLK_SET_RATE_PARENT, 0x10, 1, CLK_GATE_HIWORD_MASK, }, +}; + +/* clk_sctrl */ +static const struct hisi_gate_clock hi3670_sctrl_gate_sep_clks[] = { + { HI3670_PPLL0_EN_ACPU, "ppll0_en_acpu", "clk_ppll0", + CLK_SET_RATE_PARENT, 0x190, 26, 0, }, + { HI3670_PPLL0_GT_CPU, "ppll0_gt_cpu", "clk_ppll0", + CLK_SET_RATE_PARENT, 0x190, 15, 0, }, + { HI3670_CLK_GATE_PPLL0_MEDIA, "clk_gate_ppll0_media", "clk_ppll0", + CLK_SET_RATE_PARENT, 0x1b0, 6, 0, }, + { HI3670_PCLK_GPIO18, "pclk_gpio18", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x1B0, 9, 0, }, + { HI3670_PCLK_GPIO19, "pclk_gpio19", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x1B0, 8, 0, }, + { HI3670_CLK_GATE_SPI, "clk_gate_spi", "clk_div_ioperi", + CLK_SET_RATE_PARENT, 0x1B0, 10, 0, }, + { HI3670_PCLK_GATE_SPI, "pclk_gate_spi", "clk_div_ioperi", + CLK_SET_RATE_PARENT, 0x1B0, 10, 0, }, + { HI3670_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_ufs_subsys", + CLK_SET_RATE_PARENT, 0x1B0, 14, 0, }, + { HI3670_CLK_GATE_UFSIO_REF, "clk_gate_ufsio_ref", "clkin_sys", + CLK_SET_RATE_PARENT, 0x1b0, 12, 0, }, + { HI3670_PCLK_AO_GPIO0, "pclk_ao_gpio0", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 11, 0, }, + { HI3670_PCLK_AO_GPIO1, "pclk_ao_gpio1", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 12, 0, }, + { HI3670_PCLK_AO_GPIO2, "pclk_ao_gpio2", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 13, 0, }, + { HI3670_PCLK_AO_GPIO3, "pclk_ao_gpio3", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 14, 0, }, + { HI3670_PCLK_AO_GPIO4, "pclk_ao_gpio4", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 21, 0, }, + { HI3670_PCLK_AO_GPIO5, "pclk_ao_gpio5", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 22, 0, }, + { HI3670_PCLK_AO_GPIO6, "pclk_ao_gpio6", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 25, 0, }, + { HI3670_CLK_GATE_OUT0, "clk_gate_out0", "clk_mux_clkout0", + CLK_SET_RATE_PARENT, 0x160, 16, 0, }, + { HI3670_CLK_GATE_OUT1, "clk_gate_out1", "clk_mux_clkout1", + CLK_SET_RATE_PARENT, 0x160, 17, 0, }, + { HI3670_PCLK_GATE_SYSCNT, "pclk_gate_syscnt", "clk_div_aobus", + CLK_SET_RATE_PARENT, 0x160, 19, 0, }, + { HI3670_CLK_GATE_SYSCNT, "clk_gate_syscnt", "clkin_sys", + CLK_SET_RATE_PARENT, 0x160, 20, 0, }, + { HI3670_CLK_GATE_ASP_SUBSYS_PERI, "clk_gate_asp_subsys_peri", + "clk_mux_asp_subsys_peri", + CLK_SET_RATE_PARENT, 0x170, 6, 0, }, + { HI3670_CLK_GATE_ASP_SUBSYS, "clk_gate_asp_subsys", "clk_mux_asp_pll", + CLK_SET_RATE_PARENT, 0x170, 4, 0, }, + { HI3670_CLK_GATE_ASP_TCXO, "clk_gate_asp_tcxo", "clkin_sys", + CLK_SET_RATE_PARENT, 0x160, 27, 0, }, + { HI3670_CLK_GATE_DP_AUDIO_PLL, "clk_gate_dp_audio_pll", + "clk_gate_dp_audio_pll_ao", + CLK_SET_RATE_PARENT, 0x1B0, 7, 0, }, +}; + +static const struct hisi_gate_clock hi3670_sctrl_gate_clks[] = { + { HI3670_CLK_ANDGT_IOPERI, "clk_andgt_ioperi", "clk_ppll0", + CLK_SET_RATE_PARENT, 0x270, 6, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLKANDGT_ASP_SUBSYS_PERI, "clkandgt_asp_subsys_peri", + "clk_ppll0", + CLK_SET_RATE_PARENT, 0x268, 3, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANGT_ASP_SUBSYS, "clk_angt_asp_subsys", "clk_ppll0", + CLK_SET_RATE_PARENT, 0x258, 0, CLK_GATE_HIWORD_MASK, 0, }, +}; + +static const char *const +clk_mux_ufs_subsys_p[] = { "clkin_sys", "clk_ppll0", }; +static const char *const +clk_mux_clkout0_p[] = { "clkin_ref", "clk_div_clkout0_tcxo", + "clk_div_clkout0_pll", "clk_div_clkout0_pll", }; +static const char *const +clk_mux_clkout1_p[] = { "clkin_ref", "clk_div_clkout1_tcxo", + "clk_div_clkout1_pll", "clk_div_clkout1_pll", }; +static const char *const +clk_mux_asp_subsys_peri_p[] = { "clk_ppll0", "clk_fll_src", }; +static const char *const +clk_mux_asp_pll_p[] = { "clk_ppll0", "clk_fll_src", "clk_gate_ao_asp", + "clk_pciepll_rev", }; + +static const struct hisi_mux_clock hi3670_sctrl_mux_clks[] = { + { HI3670_CLK_MUX_UFS_SUBSYS, "clk_mux_ufs_subsys", clk_mux_ufs_subsys_p, + ARRAY_SIZE(clk_mux_ufs_subsys_p), CLK_SET_RATE_PARENT, + 0x274, 8, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_CLKOUT0, "clk_mux_clkout0", clk_mux_clkout0_p, + ARRAY_SIZE(clk_mux_clkout0_p), CLK_SET_RATE_PARENT, + 0x254, 12, 2, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_CLKOUT1, "clk_mux_clkout1", clk_mux_clkout1_p, + ARRAY_SIZE(clk_mux_clkout1_p), CLK_SET_RATE_PARENT, + 0x254, 14, 2, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_ASP_SUBSYS_PERI, "clk_mux_asp_subsys_peri", + clk_mux_asp_subsys_peri_p, ARRAY_SIZE(clk_mux_asp_subsys_peri_p), + CLK_SET_RATE_PARENT, 0x268, 8, 1, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_ASP_PLL, "clk_mux_asp_pll", clk_mux_asp_pll_p, + ARRAY_SIZE(clk_mux_asp_pll_p), CLK_SET_RATE_PARENT, + 0x268, 9, 2, CLK_MUX_HIWORD_MASK, }, +}; + +static const struct hisi_divider_clock hi3670_sctrl_divider_clks[] = { + { HI3670_CLK_DIV_AOBUS, "clk_div_aobus", "clk_ppll0", + CLK_SET_RATE_PARENT, 0x254, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_UFS_SUBSYS, "clk_div_ufs_subsys", "clk_mux_ufs_subsys", + CLK_SET_RATE_PARENT, 0x274, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_IOPERI, "clk_div_ioperi", "clk_andgt_ioperi", + CLK_SET_RATE_PARENT, 0x270, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_CLKOUT0_TCXO, "clk_div_clkout0_tcxo", "clkin_sys", + CLK_SET_RATE_PARENT, 0x254, 6, 3, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_CLKOUT1_TCXO, "clk_div_clkout1_tcxo", "clkin_sys", + CLK_SET_RATE_PARENT, 0x254, 9, 3, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_ASP_SUBSYS_PERI_DIV, "clk_asp_subsys_peri_div", "clkandgt_asp_subsys_peri", + CLK_SET_RATE_PARENT, 0x268, 0, 3, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_ASP_SUBSYS, "clk_div_asp_subsys", "clk_angt_asp_subsys", + CLK_SET_RATE_PARENT, 0x250, 0, 3, CLK_DIVIDER_HIWORD_MASK, 0, }, +}; + +/* clk_iomcu */ +static const struct hisi_fixed_factor_clock hi3670_iomcu_fixed_factor_clks[] = { + { HI3670_CLK_GATE_I2C0, "clk_gate_i2c0", "clk_i2c0_gate_iomcu", 1, 4, 0, }, + { HI3670_CLK_GATE_I2C1, "clk_gate_i2c1", "clk_i2c1_gate_iomcu", 1, 4, 0, }, + { HI3670_CLK_GATE_I2C2, "clk_gate_i2c2", "clk_i2c2_gate_iomcu", 1, 4, 0, }, + { HI3670_CLK_GATE_SPI0, "clk_gate_spi0", "clk_spi0_gate_iomcu", 1, 1, 0, }, + { HI3670_CLK_GATE_SPI2, "clk_gate_spi2", "clk_spi2_gate_iomcu", 1, 1, 0, }, + { HI3670_CLK_GATE_UART3, "clk_gate_uart3", "clk_uart3_gate_iomcu", 1, 16, 0, }, +}; + +static const struct hisi_gate_clock hi3670_iomcu_gate_sep_clks[] = { + { HI3670_CLK_I2C0_GATE_IOMCU, "clk_i2c0_gate_iomcu", "clk_fll_src", + CLK_SET_RATE_PARENT, 0x10, 3, 0, }, + { HI3670_CLK_I2C1_GATE_IOMCU, "clk_i2c1_gate_iomcu", "clk_fll_src", + CLK_SET_RATE_PARENT, 0x10, 4, 0, }, + { HI3670_CLK_I2C2_GATE_IOMCU, "clk_i2c2_gate_iomcu", "clk_fll_src", + CLK_SET_RATE_PARENT, 0x10, 5, 0, }, + { HI3670_CLK_SPI0_GATE_IOMCU, "clk_spi0_gate_iomcu", "clk_fll_src", + CLK_SET_RATE_PARENT, 0x10, 10, 0, }, + { HI3670_CLK_SPI2_GATE_IOMCU, "clk_spi2_gate_iomcu", "clk_fll_src", + CLK_SET_RATE_PARENT, 0x10, 30, 0, }, + { HI3670_CLK_UART3_GATE_IOMCU, "clk_uart3_gate_iomcu", "clk_gate_iomcu_peri0", + CLK_SET_RATE_PARENT, 0x10, 11, 0, }, + { HI3670_CLK_GATE_PERI0_IOMCU, "clk_gate_iomcu_peri0", "clk_ppll0", + CLK_SET_RATE_PARENT, 0x90, 0, 0, }, +}; + +/* clk_media1 */ +static const struct hisi_gate_clock hi3670_media1_gate_sep_clks[] = { + { HI3670_ACLK_GATE_NOC_DSS, "aclk_gate_noc_dss", "aclk_gate_disp_noc_subsys", + CLK_SET_RATE_PARENT, 0x10, 21, 0, }, + { HI3670_PCLK_GATE_NOC_DSS_CFG, "pclk_gate_noc_dss_cfg", "pclk_gate_disp_noc_subsys", + CLK_SET_RATE_PARENT, 0x10, 22, 0, }, + { HI3670_PCLK_GATE_MMBUF_CFG, "pclk_gate_mmbuf_cfg", "pclk_gate_disp_noc_subsys", + CLK_SET_RATE_PARENT, 0x20, 5, 0, }, + { HI3670_PCLK_GATE_DISP_NOC_SUBSYS, "pclk_gate_disp_noc_subsys", "clk_div_sysbus", + CLK_SET_RATE_PARENT, 0x10, 18, 0, }, + { HI3670_ACLK_GATE_DISP_NOC_SUBSYS, "aclk_gate_disp_noc_subsys", "clk_gate_vivobusfreq", + CLK_SET_RATE_PARENT, 0x10, 17, 0, }, + { HI3670_PCLK_GATE_DSS, "pclk_gate_dss", "pclk_gate_disp_noc_subsys", + CLK_SET_RATE_PARENT, 0x00, 14, 0, }, + { HI3670_ACLK_GATE_DSS, "aclk_gate_dss", "aclk_gate_disp_noc_subsys", + CLK_SET_RATE_PARENT, 0x00, 19, 0, }, + { HI3670_CLK_GATE_VIVOBUSFREQ, "clk_gate_vivobusfreq", "clk_div_vivobus", + CLK_SET_RATE_PARENT, 0x00, 18, 0, }, + { HI3670_CLK_GATE_EDC0, "clk_gate_edc0", "clk_div_edc0", + CLK_SET_RATE_PARENT, 0x00, 15, 0, }, + { HI3670_CLK_GATE_LDI0, "clk_gate_ldi0", "clk_div_ldi0", + CLK_SET_RATE_PARENT, 0x00, 16, 0, }, + { HI3670_CLK_GATE_LDI1FREQ, "clk_gate_ldi1freq", "clk_div_ldi1", + CLK_SET_RATE_PARENT, 0x00, 17, 0, }, + { HI3670_CLK_GATE_BRG, "clk_gate_brg", "clk_media_common_div", + CLK_SET_RATE_PARENT, 0x00, 29, 0, }, + { HI3670_ACLK_GATE_ASC, "aclk_gate_asc", "clk_gate_mmbuf", + CLK_SET_RATE_PARENT, 0x20, 3, 0, }, + { HI3670_CLK_GATE_DSS_AXI_MM, "clk_gate_dss_axi_mm", "clk_gate_mmbuf", + CLK_SET_RATE_PARENT, 0x20, 4, 0, }, + { HI3670_CLK_GATE_MMBUF, "clk_gate_mmbuf", "aclk_div_mmbuf", + CLK_SET_RATE_PARENT, 0x20, 0, 0, }, + { HI3670_PCLK_GATE_MMBUF, "pclk_gate_mmbuf", "pclk_div_mmbuf", + CLK_SET_RATE_PARENT, 0x20, 1, 0, }, + { HI3670_CLK_GATE_ATDIV_VIVO, "clk_gate_atdiv_vivo", "clk_div_vivobus", + CLK_SET_RATE_PARENT, 0x010, 1, 0, }, +}; + +static const struct hisi_gate_clock hi3670_media1_gate_clks[] = { + { HI3670_CLK_GATE_VIVOBUS_ANDGT, "clk_gate_vivobus_andgt", "clk_mux_vivobus", + CLK_SET_RATE_PARENT, 0x84, 3, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_EDC0, "clk_andgt_edc0", "clk_mux_edc0", + CLK_SET_RATE_PARENT, 0x84, 7, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_LDI0, "clk_andgt_ldi0", "clk_mux_ldi0", + CLK_SET_RATE_PARENT, 0x84, 9, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_ANDGT_LDI1, "clk_andgt_ldi1", "clk_mux_ldi1", + CLK_SET_RATE_PARENT, 0x84, 8, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_CLK_MMBUF_PLL_ANDGT, "clk_mmbuf_pll_andgt", "clk_sw_mmbuf", + CLK_SET_RATE_PARENT, 0x84, 14, CLK_GATE_HIWORD_MASK, 0, }, + { HI3670_PCLK_MMBUF_ANDGT, "pclk_mmbuf_andgt", "aclk_div_mmbuf", + CLK_SET_RATE_PARENT, 0x84, 15, CLK_GATE_HIWORD_MASK, 0, }, +}; + +static const char *const +clk_mux_vivobus_p[] = { "clk_invalid", "clk_invalid", "clk_gate_ppll0_media", + "clk_invalid", "clk_gate_ppll2_media", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_gate_ppll3_media", + "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", }; +static const char *const +clk_mux_edc0_p[] = { "clk_invalid", "clk_invalid", "clk_gate_ppll0_media", + "clk_invalid", "clk_gate_ppll2_media", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_gate_ppll3_media", + "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", }; +static const char *const +clk_mux_ldi0_p[] = { "clk_invalid", "clk_gate_ppll7_media", + "clk_gate_ppll0_media", "clk_invalid", + "clk_gate_ppll2_media", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_gate_ppll3_media", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_invalid", }; +static const char *const +clk_mux_ldi1_p[] = { "clk_invalid", "clk_gate_ppll7_media", + "clk_gate_ppll0_media", "clk_invalid", + "clk_gate_ppll2_media", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_gate_ppll3_media", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_invalid", }; +static const char *const +clk_sw_mmbuf_p[] = { "clk_invalid", "clk_invalid", "clk_gate_ppll0_media", + "clk_invalid", "clk_gate_ppll2_media", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_gate_ppll3_media", + "clk_invalid", "clk_invalid", "clk_invalid", "clk_invalid", + "clk_invalid", "clk_invalid", "clk_invalid", }; + +static const struct hisi_mux_clock hi3670_media1_mux_clks[] = { + { HI3670_CLK_MUX_VIVOBUS, "clk_mux_vivobus", clk_mux_vivobus_p, + ARRAY_SIZE(clk_mux_vivobus_p), CLK_SET_RATE_PARENT, + 0x74, 6, 4, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_EDC0, "clk_mux_edc0", clk_mux_edc0_p, + ARRAY_SIZE(clk_mux_edc0_p), CLK_SET_RATE_PARENT, + 0x68, 6, 4, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_LDI0, "clk_mux_ldi0", clk_mux_ldi0_p, + ARRAY_SIZE(clk_mux_ldi0_p), CLK_SET_RATE_PARENT, + 0x60, 6, 4, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_MUX_LDI1, "clk_mux_ldi1", clk_mux_ldi1_p, + ARRAY_SIZE(clk_mux_ldi1_p), CLK_SET_RATE_PARENT, + 0x64, 6, 4, CLK_MUX_HIWORD_MASK, }, + { HI3670_CLK_SW_MMBUF, "clk_sw_mmbuf", clk_sw_mmbuf_p, + ARRAY_SIZE(clk_sw_mmbuf_p), CLK_SET_RATE_PARENT, + 0x88, 0, 4, CLK_MUX_HIWORD_MASK, }, +}; + +static const struct hisi_divider_clock hi3670_media1_divider_clks[] = { + { HI3670_CLK_DIV_VIVOBUS, "clk_div_vivobus", "clk_gate_vivobus_andgt", + CLK_SET_RATE_PARENT, 0x74, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_EDC0, "clk_div_edc0", "clk_andgt_edc0", + CLK_SET_RATE_PARENT, 0x68, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_LDI0, "clk_div_ldi0", "clk_andgt_ldi0", + CLK_SET_RATE_PARENT, 0x60, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_CLK_DIV_LDI1, "clk_div_ldi1", "clk_andgt_ldi1", + CLK_SET_RATE_PARENT, 0x64, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_ACLK_DIV_MMBUF, "aclk_div_mmbuf", "clk_mmbuf_pll_andgt", + CLK_SET_RATE_PARENT, 0x7C, 10, 6, CLK_DIVIDER_HIWORD_MASK, 0, }, + { HI3670_PCLK_DIV_MMBUF, "pclk_div_mmbuf", "pclk_mmbuf_andgt", + CLK_SET_RATE_PARENT, 0x78, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, }, +}; + +/* clk_media2 */ +static const struct hisi_gate_clock hi3670_media2_gate_sep_clks[] = { + { HI3670_CLK_GATE_VDECFREQ, "clk_gate_vdecfreq", "clk_div_vdec", + CLK_SET_RATE_PARENT, 0x00, 8, 0, }, + { HI3670_CLK_GATE_VENCFREQ, "clk_gate_vencfreq", "clk_div_venc", + CLK_SET_RATE_PARENT, 0x00, 5, 0, }, + { HI3670_CLK_GATE_ICSFREQ, "clk_gate_icsfreq", "clk_div_ics", + CLK_SET_RATE_PARENT, 0x00, 2, 0, }, +}; + +static void hi3670_clk_crgctrl_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + + int nr = ARRAY_SIZE(hi3670_fixed_rate_clks) + + ARRAY_SIZE(hi3670_crgctrl_gate_sep_clks) + + ARRAY_SIZE(hi3670_crgctrl_gate_clks) + + ARRAY_SIZE(hi3670_crgctrl_mux_clks) + + ARRAY_SIZE(hi3670_crg_fixed_factor_clks) + + ARRAY_SIZE(hi3670_crgctrl_divider_clks); + + clk_data = hisi_clk_init(np, nr); + if (!clk_data) + return; + + hisi_clk_register_fixed_rate(hi3670_fixed_rate_clks, + ARRAY_SIZE(hi3670_fixed_rate_clks), + clk_data); + hisi_clk_register_gate_sep(hi3670_crgctrl_gate_sep_clks, + ARRAY_SIZE(hi3670_crgctrl_gate_sep_clks), + clk_data); + hisi_clk_register_gate(hi3670_crgctrl_gate_clks, + ARRAY_SIZE(hi3670_crgctrl_gate_clks), + clk_data); + hisi_clk_register_mux(hi3670_crgctrl_mux_clks, + ARRAY_SIZE(hi3670_crgctrl_mux_clks), + clk_data); + hisi_clk_register_fixed_factor(hi3670_crg_fixed_factor_clks, + ARRAY_SIZE(hi3670_crg_fixed_factor_clks), + clk_data); + hisi_clk_register_divider(hi3670_crgctrl_divider_clks, + ARRAY_SIZE(hi3670_crgctrl_divider_clks), + clk_data); +} + +static void hi3670_clk_pctrl_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + int nr = ARRAY_SIZE(hi3670_pctrl_gate_clks); + + clk_data = hisi_clk_init(np, nr); + if (!clk_data) + return; + hisi_clk_register_gate(hi3670_pctrl_gate_clks, + ARRAY_SIZE(hi3670_pctrl_gate_clks), clk_data); +} + +static void hi3670_clk_pmuctrl_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + int nr = ARRAY_SIZE(hi3670_pmu_gate_clks); + + clk_data = hisi_clk_init(np, nr); + if (!clk_data) + return; + + hisi_clk_register_gate(hi3670_pmu_gate_clks, + ARRAY_SIZE(hi3670_pmu_gate_clks), clk_data); +} + +static void hi3670_clk_sctrl_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + int nr = ARRAY_SIZE(hi3670_sctrl_gate_sep_clks) + + ARRAY_SIZE(hi3670_sctrl_gate_clks) + + ARRAY_SIZE(hi3670_sctrl_mux_clks) + + ARRAY_SIZE(hi3670_sctrl_divider_clks); + + clk_data = hisi_clk_init(np, nr); + if (!clk_data) + return; + + hisi_clk_register_gate_sep(hi3670_sctrl_gate_sep_clks, + ARRAY_SIZE(hi3670_sctrl_gate_sep_clks), + clk_data); + hisi_clk_register_gate(hi3670_sctrl_gate_clks, + ARRAY_SIZE(hi3670_sctrl_gate_clks), + clk_data); + hisi_clk_register_mux(hi3670_sctrl_mux_clks, + ARRAY_SIZE(hi3670_sctrl_mux_clks), + clk_data); + hisi_clk_register_divider(hi3670_sctrl_divider_clks, + ARRAY_SIZE(hi3670_sctrl_divider_clks), + clk_data); +} + +static void hi3670_clk_iomcu_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + int nr = ARRAY_SIZE(hi3670_iomcu_gate_sep_clks) + + ARRAY_SIZE(hi3670_iomcu_fixed_factor_clks); + + clk_data = hisi_clk_init(np, nr); + if (!clk_data) + return; + + hisi_clk_register_gate(hi3670_iomcu_gate_sep_clks, + ARRAY_SIZE(hi3670_iomcu_gate_sep_clks), clk_data); + + hisi_clk_register_fixed_factor(hi3670_iomcu_fixed_factor_clks, + ARRAY_SIZE(hi3670_iomcu_fixed_factor_clks), + clk_data); +} + +static void hi3670_clk_media1_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + + int nr = ARRAY_SIZE(hi3670_media1_gate_sep_clks) + + ARRAY_SIZE(hi3670_media1_gate_clks) + + ARRAY_SIZE(hi3670_media1_mux_clks) + + ARRAY_SIZE(hi3670_media1_divider_clks); + + clk_data = hisi_clk_init(np, nr); + if (!clk_data) + return; + + hisi_clk_register_gate_sep(hi3670_media1_gate_sep_clks, + ARRAY_SIZE(hi3670_media1_gate_sep_clks), + clk_data); + hisi_clk_register_gate(hi3670_media1_gate_clks, + ARRAY_SIZE(hi3670_media1_gate_clks), + clk_data); + hisi_clk_register_mux(hi3670_media1_mux_clks, + ARRAY_SIZE(hi3670_media1_mux_clks), + clk_data); + hisi_clk_register_divider(hi3670_media1_divider_clks, + ARRAY_SIZE(hi3670_media1_divider_clks), + clk_data); +} + +static void hi3670_clk_media2_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + + int nr = ARRAY_SIZE(hi3670_media2_gate_sep_clks); + + clk_data = hisi_clk_init(np, nr); + if (!clk_data) + return; + + hisi_clk_register_gate_sep(hi3670_media2_gate_sep_clks, + ARRAY_SIZE(hi3670_media2_gate_sep_clks), + clk_data); +} + +static const struct of_device_id hi3670_clk_match_table[] = { + { .compatible = "hisilicon,hi3670-crgctrl", + .data = hi3670_clk_crgctrl_init }, + { .compatible = "hisilicon,hi3670-pctrl", + .data = hi3670_clk_pctrl_init }, + { .compatible = "hisilicon,hi3670-pmuctrl", + .data = hi3670_clk_pmuctrl_init }, + { .compatible = "hisilicon,hi3670-sctrl", + .data = hi3670_clk_sctrl_init }, + { .compatible = "hisilicon,hi3670-iomcu", + .data = hi3670_clk_iomcu_init }, + { .compatible = "hisilicon,hi3670-media1-crg", + .data = hi3670_clk_media1_init }, + { .compatible = "hisilicon,hi3670-media2-crg", + .data = hi3670_clk_media2_init }, + { } +}; + +static int hi3670_clk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; + void (*init_func)(struct device_node *np); + + init_func = of_device_get_match_data(dev); + if (!init_func) + return -ENODEV; + + init_func(np); + + return 0; +} + +static struct platform_driver hi3670_clk_driver = { + .probe = hi3670_clk_probe, + .driver = { + .name = "hi3670-clk", + .of_match_table = hi3670_clk_match_table, + }, +}; + +static int __init hi3670_clk_init(void) +{ + return platform_driver_register(&hi3670_clk_driver); +} +core_initcall(hi3670_clk_init); diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c index 2a5015c736ce..43e82fa64422 100644 --- a/drivers/clk/hisilicon/reset.c +++ b/drivers/clk/hisilicon/reset.c @@ -109,9 +109,8 @@ struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev) return NULL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rstc->membase = devm_ioremap(&pdev->dev, - res->start, resource_size(res)); - if (!rstc->membase) + rstc->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rstc->membase)) return NULL; spin_lock_init(&rstc->lock); diff --git a/drivers/clk/imx/clk-cpu.c b/drivers/clk/imx/clk-cpu.c index 9d46eac87f45..ed1b7e97a0d3 100644 --- a/drivers/clk/imx/clk-cpu.c +++ b/drivers/clk/imx/clk-cpu.c @@ -94,7 +94,7 @@ struct clk *imx_clk_cpu(const char *name, const char *parent_name, init.name = name; init.ops = &clk_cpu_ops; - init.flags = 0; + init.flags = CLK_IS_CRITICAL; init.parent_names = &parent_name; init.num_parents = 1; diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index 8c7c2fcb8d94..bbe0c60f4d09 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -789,6 +789,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "axi", base + 0x74, 18); clk[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_gate2_flags("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20, CLK_IS_CRITICAL); clk[IMX6QDL_CLK_MMDC_CH1_AXI] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22); + clk[IMX6QDL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); clk[IMX6QDL_CLK_OCRAM] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28); clk[IMX6QDL_CLK_OPENVG_AXI] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30); clk[IMX6QDL_CLK_PCIE_AXI] = imx_clk_gate2("pcie_axi", "pcie_axi_sel", base + 0x78, 0); diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c index eb6bcbf345a3..6fcfbbd907a5 100644 --- a/drivers/clk/imx/clk-imx6sl.c +++ b/drivers/clk/imx/clk-imx6sl.c @@ -386,6 +386,8 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node) clks[IMX6SL_CLK_LCDIF_AXI] = imx_clk_gate2("lcdif_axi", "lcdif_axi_podf", base + 0x74, 6); clks[IMX6SL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_pix_podf", base + 0x74, 8); clks[IMX6SL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_pix_podf", base + 0x74, 10); + clks[IMX6SL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); + clks[IMX6SL_CLK_MMDC_P1_IPG] = imx_clk_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26); clks[IMX6SL_CLK_OCRAM] = imx_clk_gate2("ocram", "ocram_podf", base + 0x74, 28); clks[IMX6SL_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16); clks[IMX6SL_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18); diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c index 52379ee49aec..3bd2044cf25c 100644 --- a/drivers/clk/imx/clk-imx6sll.c +++ b/drivers/clk/imx/clk-imx6sll.c @@ -293,6 +293,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node) clks[IMX6SLL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16); clks[IMX6SLL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL); clks[IMX6SLL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); + clks[IMX6SLL_CLK_MMDC_P1_IPG] = imx_clk_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26); clks[IMX6SLL_CLK_OCRAM] = imx_clk_gate_flags("ocram","ahb", base + 0x74, 28, CLK_IS_CRITICAL); /* CCGR4 */ diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c index d9f2890ffe62..18527a335ace 100644 --- a/drivers/clk/imx/clk-imx6sx.c +++ b/drivers/clk/imx/clk-imx6sx.c @@ -431,6 +431,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node) clks[IMX6SX_CLK_MLB] = imx_clk_gate2("mlb", "ahb", base + 0x74, 18); clks[IMX6SX_CLK_MMDC_P0_FAST] = imx_clk_gate2_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL); clks[IMX6SX_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); + clks[IMX6SX_CLK_MMDC_P1_IPG] = imx_clk_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26); clks[IMX6SX_CLK_OCRAM] = imx_clk_gate2_flags("ocram", "ocram_podf", base + 0x74, 28, CLK_IS_CRITICAL); /* CCGR4 */ diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index 361b43f9742e..fd60d1549f71 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c @@ -408,6 +408,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) clks[IMX6UL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16); clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL); clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); + clks[IMX6UL_CLK_MMDC_P1_IPG] = imx_clk_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26); clks[IMX6UL_CLK_AXI] = imx_clk_gate_flags("axi", "axi_podf", base + 0x74, 28, CLK_IS_CRITICAL); /* CCGR4 */ diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index 881b772c4ac9..adb08f64c691 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -379,14 +379,6 @@ static const char *pll_enet_bypass_sel[] = { "pll_enet_main", "pll_enet_main_src static const char *pll_audio_bypass_sel[] = { "pll_audio_main", "pll_audio_main_src", }; static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_src", }; -static int const clks_init_on[] __initconst = { - IMX7D_ARM_A7_ROOT_CLK, IMX7D_MAIN_AXI_ROOT_CLK, - IMX7D_PLL_SYS_MAIN_480M_CLK, IMX7D_NAND_USDHC_BUS_ROOT_CLK, - IMX7D_DRAM_PHYM_ROOT_CLK, IMX7D_DRAM_ROOT_CLK, - IMX7D_DRAM_PHYM_ALT_ROOT_CLK, IMX7D_DRAM_ALT_ROOT_CLK, - IMX7D_AHB_CHANNEL_ROOT_CLK, IMX7D_IPG_ROOT_CLK, -}; - static struct clk_onecell_data clk_data; static struct clk ** const uart_clks[] __initconst = { @@ -404,7 +396,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) { struct device_node *np; void __iomem *base; - int i; clks[IMX7D_CLK_DUMMY] = imx_clk_fixed("dummy", 0); clks[IMX7D_OSC_24M_CLK] = of_clk_get_by_name(ccm_node, "osc"); @@ -467,7 +458,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clks[IMX7D_PLL_SYS_MAIN_120M] = imx_clk_fixed_factor("pll_sys_main_120m", "pll_sys_main_clk", 1, 4); clks[IMX7D_PLL_DRAM_MAIN_533M] = imx_clk_fixed_factor("pll_dram_533m", "pll_dram_main_clk", 1, 2); - clks[IMX7D_PLL_SYS_MAIN_480M_CLK] = imx_clk_gate_dis("pll_sys_main_480m_clk", "pll_sys_main_480m", base + 0xb0, 4); + clks[IMX7D_PLL_SYS_MAIN_480M_CLK] = imx_clk_gate_dis_flags("pll_sys_main_480m_clk", "pll_sys_main_480m", base + 0xb0, 4, CLK_IS_CRITICAL); clks[IMX7D_PLL_SYS_MAIN_240M_CLK] = imx_clk_gate_dis("pll_sys_main_240m_clk", "pll_sys_main_240m", base + 0xb0, 5); clks[IMX7D_PLL_SYS_MAIN_120M_CLK] = imx_clk_gate_dis("pll_sys_main_120m_clk", "pll_sys_main_120m", base + 0xb0, 6); clks[IMX7D_PLL_DRAM_MAIN_533M_CLK] = imx_clk_gate("pll_dram_533m_clk", "pll_dram_533m", base + 0x70, 12); @@ -720,7 +711,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clks[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_divider2("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6); clks[IMX7D_NAND_USDHC_BUS_ROOT_CLK] = imx_clk_divider2("nand_usdhc_root_clk", "nand_usdhc_pre_div", base + 0x8980, 0, 6); clks[IMX7D_AHB_CHANNEL_ROOT_DIV] = imx_clk_divider2("ahb_root_clk", "ahb_pre_div", base + 0x9000, 0, 6); - clks[IMX7D_IPG_ROOT_CLK] = imx_clk_divider2("ipg_root_clk", "ahb_root_clk", base + 0x9080, 0, 2); + clks[IMX7D_IPG_ROOT_CLK] = imx_clk_divider_flags("ipg_root_clk", "ahb_root_clk", base + 0x9080, 0, 2, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_PARENT); clks[IMX7D_DRAM_ROOT_DIV] = imx_clk_divider2("dram_post_div", "dram_cg", base + 0x9880, 0, 3); clks[IMX7D_DRAM_PHYM_ALT_ROOT_DIV] = imx_clk_divider2("dram_phym_alt_post_div", "dram_phym_alt_pre_div", base + 0xa000, 0, 3); clks[IMX7D_DRAM_ALT_ROOT_DIV] = imx_clk_divider2("dram_alt_post_div", "dram_alt_pre_div", base + 0xa080, 0, 3); @@ -784,17 +775,17 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clks[IMX7D_CLKO1_ROOT_DIV] = imx_clk_divider2("clko1_post_div", "clko1_pre_div", base + 0xbd80, 0, 6); clks[IMX7D_CLKO2_ROOT_DIV] = imx_clk_divider2("clko2_post_div", "clko2_pre_div", base + 0xbe00, 0, 6); - clks[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_gate4("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0); + clks[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_gate2_flags("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0, CLK_OPS_PARENT_ENABLE); clks[IMX7D_ARM_M4_ROOT_CLK] = imx_clk_gate4("arm_m4_root_clk", "arm_m4_div", base + 0x4010, 0); - clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate4("main_axi_root_clk", "axi_post_div", base + 0x4040, 0); + clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate2_flags("main_axi_root_clk", "axi_post_div", base + 0x4040, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0); clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0); clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "main_axi_root_clk", base + 0x4110, 0); clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_root_clk", base + 0x4120, 0); - clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate4("dram_root_clk", "dram_post_div", base + 0x4130, 0); - clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate4("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0); - clks[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_gate4("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0); - clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate4("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0); + clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate2_flags("dram_root_clk", "dram_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); + clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate2_flags("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); + clks[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_gate2_flags("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); + clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate2_flags("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); clks[IMX7D_OCOTP_CLK] = imx_clk_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0); clks[IMX7D_SNVS_CLK] = imx_clk_gate4("snvs_clk", "ipg_root_clk", base + 0x4250, 0); clks[IMX7D_MU_ROOT_CLK] = imx_clk_gate4("mu_root_clk", "ipg_root_clk", base + 0x4270, 0); @@ -883,9 +874,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clk_data.clk_num = ARRAY_SIZE(clks); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); - for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) - clk_prepare_enable(clks[clks_init_on[i]]); - clk_set_parent(clks[IMX7D_PLL_ARM_MAIN_BYPASS], clks[IMX7D_PLL_ARM_MAIN]); clk_set_parent(clks[IMX7D_PLL_DRAM_MAIN_BYPASS], clks[IMX7D_PLL_DRAM_MAIN]); clk_set_parent(clks[IMX7D_PLL_SYS_MAIN_BYPASS], clks[IMX7D_PLL_SYS_MAIN]); diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index 8076ec040f37..5895e2237b6c 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -137,6 +137,13 @@ static inline struct clk *imx_clk_gate_dis(const char *name, const char *parent, shift, CLK_GATE_SET_TO_DISABLE, &imx_ccm_lock); } +static inline struct clk *imx_clk_gate_dis_flags(const char *name, const char *parent, + void __iomem *reg, u8 shift, unsigned long flags) +{ + return clk_register_gate(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg, + shift, CLK_GATE_SET_TO_DISABLE, &imx_ccm_lock); +} + static inline struct clk *imx_clk_gate2(const char *name, const char *parent, void __iomem *reg, u8 shift) { diff --git a/drivers/clk/ingenic/Kconfig b/drivers/clk/ingenic/Kconfig new file mode 100644 index 000000000000..34dc0da79c39 --- /dev/null +++ b/drivers/clk/ingenic/Kconfig @@ -0,0 +1,47 @@ +menu "Ingenic JZ47xx CGU drivers" + depends on MIPS + +config INGENIC_CGU_COMMON + bool + +config INGENIC_CGU_JZ4740 + bool "Ingenic JZ4740 CGU driver" + default MACH_JZ4740 + select INGENIC_CGU_COMMON + help + Support the clocks provided by the CGU hardware on Ingenic JZ4740 + and compatible SoCs. + + If building for a JZ4740 SoC, you want to say Y here. + +config INGENIC_CGU_JZ4725B + bool "Ingenic JZ4725B CGU driver" + default MACH_JZ4725B + select INGENIC_CGU_COMMON + help + Support the clocks provided by the CGU hardware on Ingenic JZ4725B + and compatible SoCs. + + If building for a JZ4725B SoC, you want to say Y here. + +config INGENIC_CGU_JZ4770 + bool "Ingenic JZ4770 CGU driver" + default MACH_JZ4770 + select INGENIC_CGU_COMMON + help + Support the clocks provided by the CGU hardware on Ingenic JZ4770 + and compatible SoCs. + + If building for a JZ4770 SoC, you want to say Y here. + +config INGENIC_CGU_JZ4780 + bool "Ingenic JZ4780 CGU driver" + default MACH_JZ4780 + select INGENIC_CGU_COMMON + help + Support the clocks provided by the CGU hardware on Ingenic JZ4780 + and compatible SoCs. + + If building for a JZ4780 SoC, you want to say Y here. + +endmenu diff --git a/drivers/clk/ingenic/Makefile b/drivers/clk/ingenic/Makefile index 1456e4cdb562..00a79b2fba10 100644 --- a/drivers/clk/ingenic/Makefile +++ b/drivers/clk/ingenic/Makefile @@ -1,4 +1,5 @@ -obj-y += cgu.o -obj-$(CONFIG_MACH_JZ4740) += jz4740-cgu.o -obj-$(CONFIG_MACH_JZ4770) += jz4770-cgu.o -obj-$(CONFIG_MACH_JZ4780) += jz4780-cgu.o +obj-$(CONFIG_INGENIC_CGU_COMMON) += cgu.o +obj-$(CONFIG_INGENIC_CGU_JZ4740) += jz4740-cgu.o +obj-$(CONFIG_INGENIC_CGU_JZ4725B) += jz4725b-cgu.o +obj-$(CONFIG_INGENIC_CGU_JZ4770) += jz4770-cgu.o +obj-$(CONFIG_INGENIC_CGU_JZ4780) += jz4780-cgu.o diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c new file mode 100644 index 000000000000..584ff4ff81c7 --- /dev/null +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Ingenic JZ4725B SoC CGU driver + * + * Copyright (C) 2018 Paul Cercueil + * Author: Paul Cercueil <paul@crapouillou.net> + */ + +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/of.h> +#include <dt-bindings/clock/jz4725b-cgu.h> +#include "cgu.h" + +/* CGU register offsets */ +#define CGU_REG_CPCCR 0x00 +#define CGU_REG_LCR 0x04 +#define CGU_REG_CPPCR 0x10 +#define CGU_REG_CLKGR 0x20 +#define CGU_REG_OPCR 0x24 +#define CGU_REG_I2SCDR 0x60 +#define CGU_REG_LPCDR 0x64 +#define CGU_REG_MSCCDR 0x68 +#define CGU_REG_SSICDR 0x74 +#define CGU_REG_CIMCDR 0x78 + +/* bits within the LCR register */ +#define LCR_SLEEP BIT(0) + +static struct ingenic_cgu *cgu; + +static const s8 pll_od_encoding[4] = { + 0x0, 0x1, -1, 0x3, +}; + +static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { + + /* External clocks */ + + [JZ4725B_CLK_EXT] = { "ext", CGU_CLK_EXT }, + [JZ4725B_CLK_OSC32K] = { "osc32k", CGU_CLK_EXT }, + + [JZ4725B_CLK_PLL] = { + "pll", CGU_CLK_PLL, + .parents = { JZ4725B_CLK_EXT, -1, -1, -1 }, + .pll = { + .reg = CGU_REG_CPPCR, + .m_shift = 23, + .m_bits = 9, + .m_offset = 2, + .n_shift = 18, + .n_bits = 5, + .n_offset = 2, + .od_shift = 16, + .od_bits = 2, + .od_max = 4, + .od_encoding = pll_od_encoding, + .stable_bit = 10, + .bypass_bit = 9, + .enable_bit = 8, + }, + }, + + /* Muxes & dividers */ + + [JZ4725B_CLK_PLL_HALF] = { + "pll half", CGU_CLK_DIV, + .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, + .div = { CGU_REG_CPCCR, 21, 1, 1, -1, -1, -1 }, + }, + + [JZ4725B_CLK_CCLK] = { + "cclk", CGU_CLK_DIV, + .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, + .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 }, + }, + + [JZ4725B_CLK_HCLK] = { + "hclk", CGU_CLK_DIV, + .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, + .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 }, + }, + + [JZ4725B_CLK_PCLK] = { + "pclk", CGU_CLK_DIV, + .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, + .div = { CGU_REG_CPCCR, 8, 1, 4, 22, -1, -1 }, + }, + + [JZ4725B_CLK_MCLK] = { + "mclk", CGU_CLK_DIV, + .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, + .div = { CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1 }, + }, + + [JZ4725B_CLK_IPU] = { + "ipu", CGU_CLK_DIV | CGU_CLK_GATE, + .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, + .div = { CGU_REG_CPCCR, 16, 1, 4, 22, -1, -1 }, + .gate = { CGU_REG_CLKGR, 13 }, + }, + + [JZ4725B_CLK_LCD] = { + "lcd", CGU_CLK_DIV | CGU_CLK_GATE, + .parents = { JZ4725B_CLK_PLL_HALF, -1, -1, -1 }, + .div = { CGU_REG_LPCDR, 0, 1, 11, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 9 }, + }, + + [JZ4725B_CLK_I2S] = { + "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 }, + .mux = { CGU_REG_CPCCR, 31, 1 }, + .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 6 }, + }, + + [JZ4725B_CLK_SPI] = { + "spi", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL, -1, -1 }, + .mux = { CGU_REG_SSICDR, 31, 1 }, + .div = { CGU_REG_SSICDR, 0, 1, 4, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 4 }, + }, + + [JZ4725B_CLK_MMC_MUX] = { + "mmc_mux", CGU_CLK_DIV, + .parents = { JZ4725B_CLK_PLL_HALF, -1, -1, -1 }, + .div = { CGU_REG_MSCCDR, 0, 1, 5, -1, -1, -1 }, + }, + + [JZ4725B_CLK_UDC] = { + "udc", CGU_CLK_MUX | CGU_CLK_DIV, + .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 }, + .mux = { CGU_REG_CPCCR, 29, 1 }, + .div = { CGU_REG_CPCCR, 23, 1, 6, -1, -1, -1 }, + }, + + /* Gate-only clocks */ + + [JZ4725B_CLK_UART] = { + "uart", CGU_CLK_GATE, + .parents = { JZ4725B_CLK_EXT, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 0 }, + }, + + [JZ4725B_CLK_DMA] = { + "dma", CGU_CLK_GATE, + .parents = { JZ4725B_CLK_PCLK, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 12 }, + }, + + [JZ4725B_CLK_ADC] = { + "adc", CGU_CLK_GATE, + .parents = { JZ4725B_CLK_EXT, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 7 }, + }, + + [JZ4725B_CLK_I2C] = { + "i2c", CGU_CLK_GATE, + .parents = { JZ4725B_CLK_EXT, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 3 }, + }, + + [JZ4725B_CLK_AIC] = { + "aic", CGU_CLK_GATE, + .parents = { JZ4725B_CLK_EXT, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 5 }, + }, + + [JZ4725B_CLK_MMC0] = { + "mmc0", CGU_CLK_GATE, + .parents = { JZ4725B_CLK_MMC_MUX, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 6 }, + }, + + [JZ4725B_CLK_MMC1] = { + "mmc1", CGU_CLK_GATE, + .parents = { JZ4725B_CLK_MMC_MUX, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 16 }, + }, + + [JZ4725B_CLK_BCH] = { + "bch", CGU_CLK_GATE, + .parents = { JZ4725B_CLK_MCLK/* not sure */, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 11 }, + }, + + [JZ4725B_CLK_TCU] = { + "tcu", CGU_CLK_GATE, + .parents = { JZ4725B_CLK_EXT/* not sure */, -1, -1, -1 }, + .gate = { CGU_REG_CLKGR, 1 }, + }, + + [JZ4725B_CLK_EXT512] = { + "ext/512", CGU_CLK_FIXDIV, + .parents = { JZ4725B_CLK_EXT }, + + /* Doc calls it EXT512, but it seems to be /256... */ + .fixdiv = { 256 }, + }, + + [JZ4725B_CLK_RTC] = { + "rtc", CGU_CLK_MUX, + .parents = { JZ4725B_CLK_EXT512, JZ4725B_CLK_OSC32K, -1, -1 }, + .mux = { CGU_REG_OPCR, 2, 1}, + }, +}; + +static void __init jz4725b_cgu_init(struct device_node *np) +{ + int retval; + + cgu = ingenic_cgu_new(jz4725b_cgu_clocks, + ARRAY_SIZE(jz4725b_cgu_clocks), np); + if (!cgu) { + pr_err("%s: failed to initialise CGU\n", __func__); + return; + } + + retval = ingenic_cgu_register_clocks(cgu); + if (retval) + pr_err("%s: failed to register CGU Clocks\n", __func__); +} +CLK_OF_DECLARE(jz4725b_cgu, "ingenic,jz4725b-cgu", jz4725b_cgu_init); diff --git a/drivers/clk/keystone/Kconfig b/drivers/clk/keystone/Kconfig index 7e9f0176578a..b04927d06cd1 100644 --- a/drivers/clk/keystone/Kconfig +++ b/drivers/clk/keystone/Kconfig @@ -7,7 +7,7 @@ config COMMON_CLK_KEYSTONE config TI_SCI_CLK tristate "TI System Control Interface clock drivers" - depends on (ARCH_KEYSTONE || COMPILE_TEST) && OF + depends on (ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST) && OF depends on TI_SCI_PROTOCOL default ARCH_KEYSTONE ---help--- diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c index aed5af23895b..4ed9b29ba438 100644 --- a/drivers/clk/keystone/gate.c +++ b/drivers/clk/keystone/gate.c @@ -245,7 +245,7 @@ static void __init of_psc_clk_init(struct device_node *node, spinlock_t *lock) return; } - pr_err("%s: error registering clk %s\n", __func__, node->name); + pr_err("%s: error registering clk %pOFn\n", __func__, node); unmap_domain: iounmap(data->domain_base); @@ -266,3 +266,8 @@ static void __init of_keystone_psc_clk_init(struct device_node *node) } CLK_OF_DECLARE(keystone_gate_clk, "ti,keystone,psc-clock", of_keystone_psc_clk_init); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Clock driver for Keystone 2 based devices"); +MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>"); +MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>"); diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c index e7e840fb74ea..349540469fc0 100644 --- a/drivers/clk/keystone/pll.c +++ b/drivers/clk/keystone/pll.c @@ -219,7 +219,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl) } out: - pr_err("%s: error initializing pll %s\n", __func__, node->name); + pr_err("%s: error initializing pll %pOFn\n", __func__, node); kfree(pll_data); } @@ -338,3 +338,8 @@ static void __init of_pll_mux_clk_init(struct device_node *node) pr_err("%s: error registering mux %s\n", __func__, clk_name); } CLK_OF_DECLARE(pll_mux_clock, "ti,keystone,pll-mux-clock", of_pll_mux_clk_init); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("PLL clock driver for Keystone devices"); +MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>"); +MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>"); diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c index 4dda8988b2f0..ab6ab07f53e6 100644 --- a/drivers/clk/mediatek/clk-mt2701.c +++ b/drivers/clk/mediatek/clk-mt2701.c @@ -249,11 +249,6 @@ static const char * const msdc30_parents[] = { "univpll2_d4" }; -static const char * const audio_parents[] = { - "clk26m", - "syspll1_d16" -}; - static const char * const aud_intbus_parents[] = { "clk26m", "syspll1_d4", diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c index a0ed41e73bde..5f6c860aa122 100644 --- a/drivers/clk/meson/axg-audio.c +++ b/drivers/clk/meson/axg-audio.c @@ -101,10 +101,16 @@ static const char * const mst_mux_parent_names[] = { "axg_mst_in4", "axg_mst_in5", "axg_mst_in6", "axg_mst_in7", }; -#define AXG_MST_MCLK_MUX(_name, _reg) \ - AXG_AUD_MUX(_name##_sel, _reg, 0x7, 24, CLK_MUX_ROUND_CLOSEST, \ +#define AXG_MST_MUX(_name, _reg, _flag) \ + AXG_AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \ mst_mux_parent_names, CLK_SET_RATE_PARENT) +#define AXG_MST_MCLK_MUX(_name, _reg) \ + AXG_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST) + +#define AXG_MST_SYS_MUX(_name, _reg) \ + AXG_MST_MUX(_name, _reg, 0) + static AXG_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL); static AXG_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL); static AXG_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL); @@ -112,13 +118,19 @@ static AXG_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL); static AXG_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL); static AXG_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL); static AXG_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL); -static AXG_MST_MCLK_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL); static AXG_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0); -static AXG_MST_MCLK_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1); +static AXG_MST_SYS_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL); +static AXG_MST_SYS_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1); + +#define AXG_MST_DIV(_name, _reg, _flag) \ + AXG_AUD_DIV(_name##_div, _reg, 0, 16, _flag, \ + "axg_"#_name"_sel", CLK_SET_RATE_PARENT) \ + +#define AXG_MST_MCLK_DIV(_name, _reg) \ + AXG_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST) -#define AXG_MST_MCLK_DIV(_name, _reg) \ - AXG_AUD_DIV(_name##_div, _reg, 0, 16, CLK_DIVIDER_ROUND_CLOSEST, \ - "axg_"#_name"_sel", CLK_SET_RATE_PARENT) \ +#define AXG_MST_SYS_DIV(_name, _reg) \ + AXG_MST_DIV(_name, _reg, 0) static AXG_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL); static AXG_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL); @@ -127,12 +139,12 @@ static AXG_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL); static AXG_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL); static AXG_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL); static AXG_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL); -static AXG_MST_MCLK_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL); static AXG_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0); -static AXG_MST_MCLK_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1); +static AXG_MST_SYS_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL); +static AXG_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1); -#define AXG_MST_MCLK_GATE(_name, _reg) \ - AXG_AUD_GATE(_name, _reg, 31, "axg_"#_name"_div", \ +#define AXG_MST_MCLK_GATE(_name, _reg) \ + AXG_AUD_GATE(_name, _reg, 31, "axg_"#_name"_div", \ CLK_SET_RATE_PARENT) static AXG_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL); diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c index 00ce62ad6416..c981159b02c0 100644 --- a/drivers/clk/meson/axg.c +++ b/drivers/clk/meson/axg.c @@ -22,8 +22,13 @@ static DEFINE_SPINLOCK(meson_clk_lock); -static struct clk_regmap axg_fixed_pll = { +static struct clk_regmap axg_fixed_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_MPLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_MPLL_CNTL, .shift = 0, @@ -34,11 +39,6 @@ static struct clk_regmap axg_fixed_pll = { .shift = 9, .width = 5, }, - .od = { - .reg_off = HHI_MPLL_CNTL, - .shift = 16, - .width = 2, - }, .frac = { .reg_off = HHI_MPLL_CNTL2, .shift = 0, @@ -56,15 +56,39 @@ static struct clk_regmap axg_fixed_pll = { }, }, .hw.init = &(struct clk_init_data){ - .name = "fixed_pll", + .name = "fixed_pll_dco", .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, }, }; -static struct clk_regmap axg_sys_pll = { +static struct clk_regmap axg_fixed_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_MPLL_CNTL, + .shift = 16, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "fixed_pll", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "fixed_pll_dco" }, + .num_parents = 1, + /* + * This clock won't ever change at runtime so + * CLK_SET_RATE_PARENT is not required + */ + }, +}; + +static struct clk_regmap axg_sys_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_SYS_PLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_SYS_PLL_CNTL, .shift = 0, @@ -75,11 +99,6 @@ static struct clk_regmap axg_sys_pll = { .shift = 9, .width = 5, }, - .od = { - .reg_off = HHI_SYS_PLL_CNTL, - .shift = 16, - .width = 2, - }, .l = { .reg_off = HHI_SYS_PLL_CNTL, .shift = 31, @@ -92,102 +111,59 @@ static struct clk_regmap axg_sys_pll = { }, }, .hw.init = &(struct clk_init_data){ - .name = "sys_pll", + .name = "sys_pll_dco", .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; -static const struct pll_rate_table axg_gp0_pll_rate_table[] = { - PLL_RATE(240000000, 40, 1, 2), - PLL_RATE(246000000, 41, 1, 2), - PLL_RATE(252000000, 42, 1, 2), - PLL_RATE(258000000, 43, 1, 2), - PLL_RATE(264000000, 44, 1, 2), - PLL_RATE(270000000, 45, 1, 2), - PLL_RATE(276000000, 46, 1, 2), - PLL_RATE(282000000, 47, 1, 2), - PLL_RATE(288000000, 48, 1, 2), - PLL_RATE(294000000, 49, 1, 2), - PLL_RATE(300000000, 50, 1, 2), - PLL_RATE(306000000, 51, 1, 2), - PLL_RATE(312000000, 52, 1, 2), - PLL_RATE(318000000, 53, 1, 2), - PLL_RATE(324000000, 54, 1, 2), - PLL_RATE(330000000, 55, 1, 2), - PLL_RATE(336000000, 56, 1, 2), - PLL_RATE(342000000, 57, 1, 2), - PLL_RATE(348000000, 58, 1, 2), - PLL_RATE(354000000, 59, 1, 2), - PLL_RATE(360000000, 60, 1, 2), - PLL_RATE(366000000, 61, 1, 2), - PLL_RATE(372000000, 62, 1, 2), - PLL_RATE(378000000, 63, 1, 2), - PLL_RATE(384000000, 64, 1, 2), - PLL_RATE(390000000, 65, 1, 3), - PLL_RATE(396000000, 66, 1, 3), - PLL_RATE(402000000, 67, 1, 3), - PLL_RATE(408000000, 68, 1, 3), - PLL_RATE(480000000, 40, 1, 1), - PLL_RATE(492000000, 41, 1, 1), - PLL_RATE(504000000, 42, 1, 1), - PLL_RATE(516000000, 43, 1, 1), - PLL_RATE(528000000, 44, 1, 1), - PLL_RATE(540000000, 45, 1, 1), - PLL_RATE(552000000, 46, 1, 1), - PLL_RATE(564000000, 47, 1, 1), - PLL_RATE(576000000, 48, 1, 1), - PLL_RATE(588000000, 49, 1, 1), - PLL_RATE(600000000, 50, 1, 1), - PLL_RATE(612000000, 51, 1, 1), - PLL_RATE(624000000, 52, 1, 1), - PLL_RATE(636000000, 53, 1, 1), - PLL_RATE(648000000, 54, 1, 1), - PLL_RATE(660000000, 55, 1, 1), - PLL_RATE(672000000, 56, 1, 1), - PLL_RATE(684000000, 57, 1, 1), - PLL_RATE(696000000, 58, 1, 1), - PLL_RATE(708000000, 59, 1, 1), - PLL_RATE(720000000, 60, 1, 1), - PLL_RATE(732000000, 61, 1, 1), - PLL_RATE(744000000, 62, 1, 1), - PLL_RATE(756000000, 63, 1, 1), - PLL_RATE(768000000, 64, 1, 1), - PLL_RATE(780000000, 65, 1, 1), - PLL_RATE(792000000, 66, 1, 1), - PLL_RATE(804000000, 67, 1, 1), - PLL_RATE(816000000, 68, 1, 1), - PLL_RATE(960000000, 40, 1, 0), - PLL_RATE(984000000, 41, 1, 0), - PLL_RATE(1008000000, 42, 1, 0), - PLL_RATE(1032000000, 43, 1, 0), - PLL_RATE(1056000000, 44, 1, 0), - PLL_RATE(1080000000, 45, 1, 0), - PLL_RATE(1104000000, 46, 1, 0), - PLL_RATE(1128000000, 47, 1, 0), - PLL_RATE(1152000000, 48, 1, 0), - PLL_RATE(1176000000, 49, 1, 0), - PLL_RATE(1200000000, 50, 1, 0), - PLL_RATE(1224000000, 51, 1, 0), - PLL_RATE(1248000000, 52, 1, 0), - PLL_RATE(1272000000, 53, 1, 0), - PLL_RATE(1296000000, 54, 1, 0), - PLL_RATE(1320000000, 55, 1, 0), - PLL_RATE(1344000000, 56, 1, 0), - PLL_RATE(1368000000, 57, 1, 0), - PLL_RATE(1392000000, 58, 1, 0), - PLL_RATE(1416000000, 59, 1, 0), - PLL_RATE(1440000000, 60, 1, 0), - PLL_RATE(1464000000, 61, 1, 0), - PLL_RATE(1488000000, 62, 1, 0), - PLL_RATE(1512000000, 63, 1, 0), - PLL_RATE(1536000000, 64, 1, 0), - PLL_RATE(1560000000, 65, 1, 0), - PLL_RATE(1584000000, 66, 1, 0), - PLL_RATE(1608000000, 67, 1, 0), - PLL_RATE(1632000000, 68, 1, 0), +static struct clk_regmap axg_sys_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_PLL_CNTL, + .shift = 16, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "sys_pll", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "sys_pll_dco" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static const struct pll_params_table axg_gp0_pll_params_table[] = { + PLL_PARAMS(40, 1), + PLL_PARAMS(41, 1), + PLL_PARAMS(42, 1), + PLL_PARAMS(43, 1), + PLL_PARAMS(44, 1), + PLL_PARAMS(45, 1), + PLL_PARAMS(46, 1), + PLL_PARAMS(47, 1), + PLL_PARAMS(48, 1), + PLL_PARAMS(49, 1), + PLL_PARAMS(50, 1), + PLL_PARAMS(51, 1), + PLL_PARAMS(52, 1), + PLL_PARAMS(53, 1), + PLL_PARAMS(54, 1), + PLL_PARAMS(55, 1), + PLL_PARAMS(56, 1), + PLL_PARAMS(57, 1), + PLL_PARAMS(58, 1), + PLL_PARAMS(59, 1), + PLL_PARAMS(60, 1), + PLL_PARAMS(61, 1), + PLL_PARAMS(62, 1), + PLL_PARAMS(63, 1), + PLL_PARAMS(64, 1), + PLL_PARAMS(65, 1), + PLL_PARAMS(66, 1), + PLL_PARAMS(67, 1), + PLL_PARAMS(68, 1), { /* sentinel */ }, }; @@ -197,11 +173,15 @@ static const struct reg_sequence axg_gp0_init_regs[] = { { .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a59a288 }, { .reg = HHI_GP0_PLL_CNTL4, .def = 0xc000004d }, { .reg = HHI_GP0_PLL_CNTL5, .def = 0x00078000 }, - { .reg = HHI_GP0_PLL_CNTL, .def = 0x40010250 }, }; -static struct clk_regmap axg_gp0_pll = { +static struct clk_regmap axg_gp0_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_GP0_PLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_GP0_PLL_CNTL, .shift = 0, @@ -212,11 +192,6 @@ static struct clk_regmap axg_gp0_pll = { .shift = 9, .width = 5, }, - .od = { - .reg_off = HHI_GP0_PLL_CNTL, - .shift = 16, - .width = 2, - }, .frac = { .reg_off = HHI_GP0_PLL_CNTL1, .shift = 0, @@ -232,29 +207,49 @@ static struct clk_regmap axg_gp0_pll = { .shift = 29, .width = 1, }, - .table = axg_gp0_pll_rate_table, + .table = axg_gp0_pll_params_table, .init_regs = axg_gp0_init_regs, .init_count = ARRAY_SIZE(axg_gp0_init_regs), }, .hw.init = &(struct clk_init_data){ - .name = "gp0_pll", + .name = "gp0_pll_dco", .ops = &meson_clk_pll_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, }, }; +static struct clk_regmap axg_gp0_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_GP0_PLL_CNTL, + .shift = 16, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "gp0_pll", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "gp0_pll_dco" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + static const struct reg_sequence axg_hifi_init_regs[] = { { .reg = HHI_HIFI_PLL_CNTL1, .def = 0xc084b000 }, { .reg = HHI_HIFI_PLL_CNTL2, .def = 0xb75020be }, { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x0a6a3a88 }, { .reg = HHI_HIFI_PLL_CNTL4, .def = 0xc000004d }, { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x00058000 }, - { .reg = HHI_HIFI_PLL_CNTL, .def = 0x40010250 }, }; -static struct clk_regmap axg_hifi_pll = { +static struct clk_regmap axg_hifi_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_HIFI_PLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_HIFI_PLL_CNTL, .shift = 0, @@ -265,11 +260,6 @@ static struct clk_regmap axg_hifi_pll = { .shift = 9, .width = 5, }, - .od = { - .reg_off = HHI_HIFI_PLL_CNTL, - .shift = 16, - .width = 2, - }, .frac = { .reg_off = HHI_HIFI_PLL_CNTL5, .shift = 0, @@ -285,19 +275,35 @@ static struct clk_regmap axg_hifi_pll = { .shift = 29, .width = 1, }, - .table = axg_gp0_pll_rate_table, + .table = axg_gp0_pll_params_table, .init_regs = axg_hifi_init_regs, .init_count = ARRAY_SIZE(axg_hifi_init_regs), .flags = CLK_MESON_PLL_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ - .name = "hifi_pll", + .name = "hifi_pll_dco", .ops = &meson_clk_pll_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, }, }; +static struct clk_regmap axg_hifi_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_HIFI_PLL_CNTL, + .shift = 16, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "hifi_pll", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "hifi_pll_dco" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + static struct clk_fixed_factor axg_fclk_div2_div = { .mult = 1, .div = 2, @@ -625,29 +631,31 @@ static struct clk_regmap axg_mpll3 = { }, }; -static const struct pll_rate_table axg_pcie_pll_rate_table[] = { +static const struct pll_params_table axg_pcie_pll_params_table[] = { { - .rate = 100000000, - .m = 200, - .n = 3, - .od = 1, - .od2 = 3, + .m = 200, + .n = 3, }, { /* sentinel */ }, }; static const struct reg_sequence axg_pcie_init_regs[] = { - { .reg = HHI_PCIE_PLL_CNTL, .def = 0x400106c8 }, { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x0084a2aa }, { .reg = HHI_PCIE_PLL_CNTL2, .def = 0xb75020be }, { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x0a47488e }, { .reg = HHI_PCIE_PLL_CNTL4, .def = 0xc000004d }, { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x00078000 }, { .reg = HHI_PCIE_PLL_CNTL6, .def = 0x002323c6 }, + { .reg = HHI_PCIE_PLL_CNTL, .def = 0x400106c8 }, }; -static struct clk_regmap axg_pcie_pll = { +static struct clk_regmap axg_pcie_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_PCIE_PLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_PCIE_PLL_CNTL, .shift = 0, @@ -658,16 +666,6 @@ static struct clk_regmap axg_pcie_pll = { .shift = 9, .width = 5, }, - .od = { - .reg_off = HHI_PCIE_PLL_CNTL, - .shift = 16, - .width = 2, - }, - .od2 = { - .reg_off = HHI_PCIE_PLL_CNTL6, - .shift = 6, - .width = 2, - }, .frac = { .reg_off = HHI_PCIE_PLL_CNTL1, .shift = 0, @@ -683,29 +681,63 @@ static struct clk_regmap axg_pcie_pll = { .shift = 29, .width = 1, }, - .table = axg_pcie_pll_rate_table, + .table = axg_pcie_pll_params_table, .init_regs = axg_pcie_init_regs, .init_count = ARRAY_SIZE(axg_pcie_init_regs), }, .hw.init = &(struct clk_init_data){ - .name = "pcie_pll", + .name = "pcie_pll_dco", .ops = &meson_clk_pll_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, }, }; +static struct clk_regmap axg_pcie_pll_od = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_PCIE_PLL_CNTL, + .shift = 16, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "pcie_pll_od", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "pcie_pll_dco" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap axg_pcie_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_PCIE_PLL_CNTL6, + .shift = 6, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "pcie_pll", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "pcie_pll_od" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + static struct clk_regmap axg_pcie_mux = { .data = &(struct clk_regmap_mux_data){ .offset = HHI_PCIE_PLL_CNTL6, .mask = 0x1, .shift = 2, + /* skip the parent mpll3, reserved for debug */ + .table = (u32[]){ 1 }, }, .hw.init = &(struct clk_init_data){ .name = "pcie_mux", .ops = &clk_regmap_mux_ops, - .parent_names = (const char *[]){ "mpll3", "pcie_pll" }, - .num_parents = 2, + .parent_names = (const char *[]){ "pcie_pll" }, + .num_parents = 1, .flags = CLK_SET_RATE_PARENT, }, }; @@ -1107,6 +1139,12 @@ static struct clk_hw_onecell_data axg_hw_onecell_data = { [CLKID_GEN_CLK_SEL] = &axg_gen_clk_sel.hw, [CLKID_GEN_CLK_DIV] = &axg_gen_clk_div.hw, [CLKID_GEN_CLK] = &axg_gen_clk.hw, + [CLKID_SYS_PLL_DCO] = &axg_sys_pll_dco.hw, + [CLKID_FIXED_PLL_DCO] = &axg_fixed_pll_dco.hw, + [CLKID_GP0_PLL_DCO] = &axg_gp0_pll_dco.hw, + [CLKID_HIFI_PLL_DCO] = &axg_hifi_pll_dco.hw, + [CLKID_PCIE_PLL_DCO] = &axg_pcie_pll_dco.hw, + [CLKID_PCIE_PLL_OD] = &axg_pcie_pll_od.hw, [NR_CLKS] = NULL, }, .num = NR_CLKS, @@ -1185,6 +1223,8 @@ static struct clk_regmap *const axg_clk_regmaps[] = { &axg_fclk_div4, &axg_fclk_div5, &axg_fclk_div7, + &axg_pcie_pll_dco, + &axg_pcie_pll_od, &axg_pcie_pll, &axg_pcie_mux, &axg_pcie_ref, @@ -1194,6 +1234,12 @@ static struct clk_regmap *const axg_clk_regmaps[] = { &axg_gen_clk_sel, &axg_gen_clk_div, &axg_gen_clk, + &axg_fixed_pll_dco, + &axg_sys_pll_dco, + &axg_gp0_pll_dco, + &axg_hifi_pll_dco, + &axg_pcie_pll_dco, + &axg_pcie_pll_od, }; static const struct of_device_id clkc_match_table[] = { diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h index 1d04144a1b2c..0431dabac629 100644 --- a/drivers/clk/meson/axg.h +++ b/drivers/clk/meson/axg.h @@ -133,8 +133,14 @@ #define CLKID_PCIE_REF 78 #define CLKID_GEN_CLK_SEL 82 #define CLKID_GEN_CLK_DIV 83 +#define CLKID_SYS_PLL_DCO 85 +#define CLKID_FIXED_PLL_DCO 86 +#define CLKID_GP0_PLL_DCO 87 +#define CLKID_HIFI_PLL_DCO 88 +#define CLKID_PCIE_PLL_DCO 89 +#define CLKID_PCIE_PLL_OD 90 -#define NR_CLKS 85 +#define NR_CLKS 91 /* include the CLKIDs that have been made part of the DT binding */ #include <dt-bindings/clock/axg-clkc.h> diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 3e04617ac47f..f5b5b3fabe3c 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -11,15 +11,19 @@ * In the most basic form, a Meson PLL is composed as follows: * * PLL - * +------------------------------+ - * | | - * in -----[ /N ]---[ *M ]---[ >>OD ]----->> out - * | ^ ^ | - * +------------------------------+ - * | | - * FREF VCO + * +--------------------------------+ + * | | + * | +--+ | + * in >>-----[ /N ]--->| | +-----+ | + * | | |------| DCO |---->> out + * | +--------->| | +--v--+ | + * | | +--+ | | + * | | | | + * | +--[ *(M + (F/Fmax) ]<--+ | + * | | + * +--------------------------------+ * - * out = in * (m + frac / frac_max) / (n << sum(ods)) + * out = in * (m + frac / frac_max) / n */ #include <linux/clk-provider.h> @@ -41,12 +45,11 @@ meson_clk_pll_data(struct clk_regmap *clk) } static unsigned long __pll_params_to_rate(unsigned long parent_rate, - const struct pll_rate_table *pllt, + const struct pll_params_table *pllt, u16 frac, struct meson_clk_pll_data *pll) { u64 rate = (u64)parent_rate * pllt->m; - unsigned int od = pllt->od + pllt->od2 + pllt->od3; if (frac && MESON_PARM_APPLICABLE(&pll->frac)) { u64 frac_rate = (u64)parent_rate * frac; @@ -55,7 +58,7 @@ static unsigned long __pll_params_to_rate(unsigned long parent_rate, (1 << pll->frac.width)); } - return DIV_ROUND_UP_ULL(rate, pllt->n << od); + return DIV_ROUND_UP_ULL(rate, pllt->n); } static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw, @@ -63,20 +66,11 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw, { struct clk_regmap *clk = to_clk_regmap(hw); struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); - struct pll_rate_table pllt; + struct pll_params_table pllt; u16 frac; pllt.n = meson_parm_read(clk->map, &pll->n); pllt.m = meson_parm_read(clk->map, &pll->m); - pllt.od = meson_parm_read(clk->map, &pll->od); - - pllt.od2 = MESON_PARM_APPLICABLE(&pll->od2) ? - meson_parm_read(clk->map, &pll->od2) : - 0; - - pllt.od3 = MESON_PARM_APPLICABLE(&pll->od3) ? - meson_parm_read(clk->map, &pll->od3) : - 0; frac = MESON_PARM_APPLICABLE(&pll->frac) ? meson_parm_read(clk->map, &pll->frac) : @@ -87,14 +81,12 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw, static u16 __pll_params_with_frac(unsigned long rate, unsigned long parent_rate, - const struct pll_rate_table *pllt, + const struct pll_params_table *pllt, struct meson_clk_pll_data *pll) { u16 frac_max = (1 << pll->frac.width); u64 val = (u64)rate * pllt->n; - val <<= pllt->od + pllt->od2 + pllt->od3; - if (pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) val = DIV_ROUND_CLOSEST_ULL(val * frac_max, parent_rate); else @@ -105,29 +97,50 @@ static u16 __pll_params_with_frac(unsigned long rate, return min((u16)val, (u16)(frac_max - 1)); } -static const struct pll_rate_table * +static bool meson_clk_pll_is_better(unsigned long rate, + unsigned long best, + unsigned long now, + struct meson_clk_pll_data *pll) +{ + if (!(pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) || + MESON_PARM_APPLICABLE(&pll->frac)) { + /* Round down */ + if (now < rate && best < now) + return true; + } else { + /* Round Closest */ + if (abs(now - rate) < abs(best - rate)) + return true; + } + + return false; +} + +static const struct pll_params_table * meson_clk_get_pll_settings(unsigned long rate, + unsigned long parent_rate, struct meson_clk_pll_data *pll) { - const struct pll_rate_table *table = pll->table; - unsigned int i = 0; + const struct pll_params_table *table = pll->table; + unsigned long best = 0, now = 0; + unsigned int i, best_i = 0; if (!table) return NULL; - /* Find the first table element exceeding rate */ - while (table[i].rate && table[i].rate <= rate) - i++; + for (i = 0; table[i].n; i++) { + now = __pll_params_to_rate(parent_rate, &table[i], 0, pll); - if (i != 0) { - if (MESON_PARM_APPLICABLE(&pll->frac) || - !(pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) || - (abs(rate - table[i - 1].rate) < - abs(rate - table[i].rate))) - i--; + /* If we get an exact match, don't bother any further */ + if (now == rate) { + return &table[i]; + } else if (meson_clk_pll_is_better(rate, best, now, pll)) { + best = now; + best_i = i; + } } - return (struct pll_rate_table *)&table[i]; + return (struct pll_params_table *)&table[best_i]; } static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, @@ -135,16 +148,18 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, { struct clk_regmap *clk = to_clk_regmap(hw); struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); - const struct pll_rate_table *pllt = - meson_clk_get_pll_settings(rate, pll); + const struct pll_params_table *pllt = + meson_clk_get_pll_settings(rate, *parent_rate, pll); + unsigned long round; u16 frac; if (!pllt) return meson_clk_pll_recalc_rate(hw, *parent_rate); - if (!MESON_PARM_APPLICABLE(&pll->frac) - || rate == pllt->rate) - return pllt->rate; + round = __pll_params_to_rate(*parent_rate, pllt, 0, pll); + + if (!MESON_PARM_APPLICABLE(&pll->frac) || rate == round) + return round; /* * The rate provided by the setting is not an exact match, let's @@ -185,12 +200,45 @@ static void meson_clk_pll_init(struct clk_hw *hw) } } +static int meson_clk_pll_enable(struct clk_hw *hw) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); + + /* Make sure the pll is in reset */ + meson_parm_write(clk->map, &pll->rst, 1); + + /* Enable the pll */ + meson_parm_write(clk->map, &pll->en, 1); + + /* Take the pll out reset */ + meson_parm_write(clk->map, &pll->rst, 0); + + if (meson_clk_pll_wait_lock(hw)) + return -EIO; + + return 0; +} + +static void meson_clk_pll_disable(struct clk_hw *hw) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); + + /* Put the pll is in reset */ + meson_parm_write(clk->map, &pll->rst, 1); + + /* Disable the pll */ + meson_parm_write(clk->map, &pll->en, 0); +} + static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_regmap *clk = to_clk_regmap(hw); struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); - const struct pll_rate_table *pllt; + const struct pll_params_table *pllt; + unsigned int enabled; unsigned long old_rate; u16 frac = 0; @@ -199,32 +247,28 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, old_rate = rate; - pllt = meson_clk_get_pll_settings(rate, pll); + pllt = meson_clk_get_pll_settings(rate, parent_rate, pll); if (!pllt) return -EINVAL; - /* Put the pll in reset to write the params */ - meson_parm_write(clk->map, &pll->rst, 1); + enabled = meson_parm_read(clk->map, &pll->en); + if (enabled) + meson_clk_pll_disable(hw); meson_parm_write(clk->map, &pll->n, pllt->n); meson_parm_write(clk->map, &pll->m, pllt->m); - meson_parm_write(clk->map, &pll->od, pllt->od); - if (MESON_PARM_APPLICABLE(&pll->od2)) - meson_parm_write(clk->map, &pll->od2, pllt->od2); - - if (MESON_PARM_APPLICABLE(&pll->od3)) - meson_parm_write(clk->map, &pll->od3, pllt->od3); if (MESON_PARM_APPLICABLE(&pll->frac)) { frac = __pll_params_with_frac(rate, parent_rate, pllt, pll); meson_parm_write(clk->map, &pll->frac, frac); } - /* make sure the reset is cleared at this point */ - meson_parm_write(clk->map, &pll->rst, 0); + /* If the pll is stopped, bail out now */ + if (!enabled) + return 0; - if (meson_clk_pll_wait_lock(hw)) { + if (meson_clk_pll_enable(hw)) { pr_warn("%s: pll did not lock, trying to restore old rate %lu\n", __func__, old_rate); /* @@ -244,6 +288,8 @@ const struct clk_ops meson_clk_pll_ops = { .recalc_rate = meson_clk_pll_recalc_rate, .round_rate = meson_clk_pll_round_rate, .set_rate = meson_clk_pll_set_rate, + .enable = meson_clk_pll_enable, + .disable = meson_clk_pll_disable }; const struct clk_ops meson_clk_pll_ro_ops = { diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h index 24cec16b6038..6b96d55c047d 100644 --- a/drivers/clk/meson/clkc.h +++ b/drivers/clk/meson/clkc.h @@ -43,37 +43,29 @@ static inline void meson_parm_write(struct regmap *map, struct parm *p, } -struct pll_rate_table { - unsigned long rate; +struct pll_params_table { u16 m; u16 n; - u16 od; - u16 od2; - u16 od3; }; -#define PLL_RATE(_r, _m, _n, _od) \ +#define PLL_PARAMS(_m, _n) \ { \ - .rate = (_r), \ .m = (_m), \ .n = (_n), \ - .od = (_od), \ } #define CLK_MESON_PLL_ROUND_CLOSEST BIT(0) struct meson_clk_pll_data { + struct parm en; struct parm m; struct parm n; struct parm frac; - struct parm od; - struct parm od2; - struct parm od3; struct parm l; struct parm rst; const struct reg_sequence *init_regs; unsigned int init_count; - const struct pll_rate_table *table; + const struct pll_params_table *table; u8 flags; }; diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 86d3ae58e84c..9309cfaaa464 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -18,165 +18,77 @@ static DEFINE_SPINLOCK(meson_clk_lock); -static const struct pll_rate_table gxbb_gp0_pll_rate_table[] = { - PLL_RATE(96000000, 32, 1, 3), - PLL_RATE(99000000, 33, 1, 3), - PLL_RATE(102000000, 34, 1, 3), - PLL_RATE(105000000, 35, 1, 3), - PLL_RATE(108000000, 36, 1, 3), - PLL_RATE(111000000, 37, 1, 3), - PLL_RATE(114000000, 38, 1, 3), - PLL_RATE(117000000, 39, 1, 3), - PLL_RATE(120000000, 40, 1, 3), - PLL_RATE(123000000, 41, 1, 3), - PLL_RATE(126000000, 42, 1, 3), - PLL_RATE(129000000, 43, 1, 3), - PLL_RATE(132000000, 44, 1, 3), - PLL_RATE(135000000, 45, 1, 3), - PLL_RATE(138000000, 46, 1, 3), - PLL_RATE(141000000, 47, 1, 3), - PLL_RATE(144000000, 48, 1, 3), - PLL_RATE(147000000, 49, 1, 3), - PLL_RATE(150000000, 50, 1, 3), - PLL_RATE(153000000, 51, 1, 3), - PLL_RATE(156000000, 52, 1, 3), - PLL_RATE(159000000, 53, 1, 3), - PLL_RATE(162000000, 54, 1, 3), - PLL_RATE(165000000, 55, 1, 3), - PLL_RATE(168000000, 56, 1, 3), - PLL_RATE(171000000, 57, 1, 3), - PLL_RATE(174000000, 58, 1, 3), - PLL_RATE(177000000, 59, 1, 3), - PLL_RATE(180000000, 60, 1, 3), - PLL_RATE(183000000, 61, 1, 3), - PLL_RATE(186000000, 62, 1, 3), - PLL_RATE(192000000, 32, 1, 2), - PLL_RATE(198000000, 33, 1, 2), - PLL_RATE(204000000, 34, 1, 2), - PLL_RATE(210000000, 35, 1, 2), - PLL_RATE(216000000, 36, 1, 2), - PLL_RATE(222000000, 37, 1, 2), - PLL_RATE(228000000, 38, 1, 2), - PLL_RATE(234000000, 39, 1, 2), - PLL_RATE(240000000, 40, 1, 2), - PLL_RATE(246000000, 41, 1, 2), - PLL_RATE(252000000, 42, 1, 2), - PLL_RATE(258000000, 43, 1, 2), - PLL_RATE(264000000, 44, 1, 2), - PLL_RATE(270000000, 45, 1, 2), - PLL_RATE(276000000, 46, 1, 2), - PLL_RATE(282000000, 47, 1, 2), - PLL_RATE(288000000, 48, 1, 2), - PLL_RATE(294000000, 49, 1, 2), - PLL_RATE(300000000, 50, 1, 2), - PLL_RATE(306000000, 51, 1, 2), - PLL_RATE(312000000, 52, 1, 2), - PLL_RATE(318000000, 53, 1, 2), - PLL_RATE(324000000, 54, 1, 2), - PLL_RATE(330000000, 55, 1, 2), - PLL_RATE(336000000, 56, 1, 2), - PLL_RATE(342000000, 57, 1, 2), - PLL_RATE(348000000, 58, 1, 2), - PLL_RATE(354000000, 59, 1, 2), - PLL_RATE(360000000, 60, 1, 2), - PLL_RATE(366000000, 61, 1, 2), - PLL_RATE(372000000, 62, 1, 2), - PLL_RATE(384000000, 32, 1, 1), - PLL_RATE(396000000, 33, 1, 1), - PLL_RATE(408000000, 34, 1, 1), - PLL_RATE(420000000, 35, 1, 1), - PLL_RATE(432000000, 36, 1, 1), - PLL_RATE(444000000, 37, 1, 1), - PLL_RATE(456000000, 38, 1, 1), - PLL_RATE(468000000, 39, 1, 1), - PLL_RATE(480000000, 40, 1, 1), - PLL_RATE(492000000, 41, 1, 1), - PLL_RATE(504000000, 42, 1, 1), - PLL_RATE(516000000, 43, 1, 1), - PLL_RATE(528000000, 44, 1, 1), - PLL_RATE(540000000, 45, 1, 1), - PLL_RATE(552000000, 46, 1, 1), - PLL_RATE(564000000, 47, 1, 1), - PLL_RATE(576000000, 48, 1, 1), - PLL_RATE(588000000, 49, 1, 1), - PLL_RATE(600000000, 50, 1, 1), - PLL_RATE(612000000, 51, 1, 1), - PLL_RATE(624000000, 52, 1, 1), - PLL_RATE(636000000, 53, 1, 1), - PLL_RATE(648000000, 54, 1, 1), - PLL_RATE(660000000, 55, 1, 1), - PLL_RATE(672000000, 56, 1, 1), - PLL_RATE(684000000, 57, 1, 1), - PLL_RATE(696000000, 58, 1, 1), - PLL_RATE(708000000, 59, 1, 1), - PLL_RATE(720000000, 60, 1, 1), - PLL_RATE(732000000, 61, 1, 1), - PLL_RATE(744000000, 62, 1, 1), - PLL_RATE(768000000, 32, 1, 0), - PLL_RATE(792000000, 33, 1, 0), - PLL_RATE(816000000, 34, 1, 0), - PLL_RATE(840000000, 35, 1, 0), - PLL_RATE(864000000, 36, 1, 0), - PLL_RATE(888000000, 37, 1, 0), - PLL_RATE(912000000, 38, 1, 0), - PLL_RATE(936000000, 39, 1, 0), - PLL_RATE(960000000, 40, 1, 0), - PLL_RATE(984000000, 41, 1, 0), - PLL_RATE(1008000000, 42, 1, 0), - PLL_RATE(1032000000, 43, 1, 0), - PLL_RATE(1056000000, 44, 1, 0), - PLL_RATE(1080000000, 45, 1, 0), - PLL_RATE(1104000000, 46, 1, 0), - PLL_RATE(1128000000, 47, 1, 0), - PLL_RATE(1152000000, 48, 1, 0), - PLL_RATE(1176000000, 49, 1, 0), - PLL_RATE(1200000000, 50, 1, 0), - PLL_RATE(1224000000, 51, 1, 0), - PLL_RATE(1248000000, 52, 1, 0), - PLL_RATE(1272000000, 53, 1, 0), - PLL_RATE(1296000000, 54, 1, 0), - PLL_RATE(1320000000, 55, 1, 0), - PLL_RATE(1344000000, 56, 1, 0), - PLL_RATE(1368000000, 57, 1, 0), - PLL_RATE(1392000000, 58, 1, 0), - PLL_RATE(1416000000, 59, 1, 0), - PLL_RATE(1440000000, 60, 1, 0), - PLL_RATE(1464000000, 61, 1, 0), - PLL_RATE(1488000000, 62, 1, 0), +static const struct pll_params_table gxbb_gp0_pll_params_table[] = { + PLL_PARAMS(32, 1), + PLL_PARAMS(33, 1), + PLL_PARAMS(34, 1), + PLL_PARAMS(35, 1), + PLL_PARAMS(36, 1), + PLL_PARAMS(37, 1), + PLL_PARAMS(38, 1), + PLL_PARAMS(39, 1), + PLL_PARAMS(40, 1), + PLL_PARAMS(41, 1), + PLL_PARAMS(42, 1), + PLL_PARAMS(43, 1), + PLL_PARAMS(44, 1), + PLL_PARAMS(45, 1), + PLL_PARAMS(46, 1), + PLL_PARAMS(47, 1), + PLL_PARAMS(48, 1), + PLL_PARAMS(49, 1), + PLL_PARAMS(50, 1), + PLL_PARAMS(51, 1), + PLL_PARAMS(52, 1), + PLL_PARAMS(53, 1), + PLL_PARAMS(54, 1), + PLL_PARAMS(55, 1), + PLL_PARAMS(56, 1), + PLL_PARAMS(57, 1), + PLL_PARAMS(58, 1), + PLL_PARAMS(59, 1), + PLL_PARAMS(60, 1), + PLL_PARAMS(61, 1), + PLL_PARAMS(62, 1), { /* sentinel */ }, }; -static const struct pll_rate_table gxl_gp0_pll_rate_table[] = { - PLL_RATE(504000000, 42, 1, 1), - PLL_RATE(516000000, 43, 1, 1), - PLL_RATE(528000000, 44, 1, 1), - PLL_RATE(540000000, 45, 1, 1), - PLL_RATE(552000000, 46, 1, 1), - PLL_RATE(564000000, 47, 1, 1), - PLL_RATE(576000000, 48, 1, 1), - PLL_RATE(588000000, 49, 1, 1), - PLL_RATE(600000000, 50, 1, 1), - PLL_RATE(612000000, 51, 1, 1), - PLL_RATE(624000000, 52, 1, 1), - PLL_RATE(636000000, 53, 1, 1), - PLL_RATE(648000000, 54, 1, 1), - PLL_RATE(660000000, 55, 1, 1), - PLL_RATE(672000000, 56, 1, 1), - PLL_RATE(684000000, 57, 1, 1), - PLL_RATE(696000000, 58, 1, 1), - PLL_RATE(708000000, 59, 1, 1), - PLL_RATE(720000000, 60, 1, 1), - PLL_RATE(732000000, 61, 1, 1), - PLL_RATE(744000000, 62, 1, 1), - PLL_RATE(756000000, 63, 1, 1), - PLL_RATE(768000000, 64, 1, 1), - PLL_RATE(780000000, 65, 1, 1), - PLL_RATE(792000000, 66, 1, 1), +static const struct pll_params_table gxl_gp0_pll_params_table[] = { + PLL_PARAMS(42, 1), + PLL_PARAMS(43, 1), + PLL_PARAMS(44, 1), + PLL_PARAMS(45, 1), + PLL_PARAMS(46, 1), + PLL_PARAMS(47, 1), + PLL_PARAMS(48, 1), + PLL_PARAMS(49, 1), + PLL_PARAMS(50, 1), + PLL_PARAMS(51, 1), + PLL_PARAMS(52, 1), + PLL_PARAMS(53, 1), + PLL_PARAMS(54, 1), + PLL_PARAMS(55, 1), + PLL_PARAMS(56, 1), + PLL_PARAMS(57, 1), + PLL_PARAMS(58, 1), + PLL_PARAMS(59, 1), + PLL_PARAMS(60, 1), + PLL_PARAMS(61, 1), + PLL_PARAMS(62, 1), + PLL_PARAMS(63, 1), + PLL_PARAMS(64, 1), + PLL_PARAMS(65, 1), + PLL_PARAMS(66, 1), { /* sentinel */ }, }; -static struct clk_regmap gxbb_fixed_pll = { +static struct clk_regmap gxbb_fixed_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_MPLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_MPLL_CNTL, .shift = 0, @@ -187,11 +99,6 @@ static struct clk_regmap gxbb_fixed_pll = { .shift = 9, .width = 5, }, - .od = { - .reg_off = HHI_MPLL_CNTL, - .shift = 16, - .width = 2, - }, .frac = { .reg_off = HHI_MPLL_CNTL2, .shift = 0, @@ -209,11 +116,29 @@ static struct clk_regmap gxbb_fixed_pll = { }, }, .hw.init = &(struct clk_init_data){ - .name = "fixed_pll", + .name = "fixed_pll_dco", .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +static struct clk_regmap gxbb_fixed_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_MPLL_CNTL, + .shift = 16, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "fixed_pll", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "fixed_pll_dco" }, + .num_parents = 1, + /* + * This clock won't ever change at runtime so + * CLK_SET_RATE_PARENT is not required + */ }, }; @@ -228,8 +153,13 @@ static struct clk_fixed_factor gxbb_hdmi_pll_pre_mult = { }, }; -static struct clk_regmap gxbb_hdmi_pll = { +static struct clk_regmap gxbb_hdmi_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_HDMI_PLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_HDMI_PLL_CNTL, .shift = 0, @@ -245,21 +175,6 @@ static struct clk_regmap gxbb_hdmi_pll = { .shift = 0, .width = 12, }, - .od = { - .reg_off = HHI_HDMI_PLL_CNTL2, - .shift = 16, - .width = 2, - }, - .od2 = { - .reg_off = HHI_HDMI_PLL_CNTL2, - .shift = 22, - .width = 2, - }, - .od3 = { - .reg_off = HHI_HDMI_PLL_CNTL2, - .shift = 18, - .width = 2, - }, .l = { .reg_off = HHI_HDMI_PLL_CNTL, .shift = 31, @@ -272,74 +187,121 @@ static struct clk_regmap gxbb_hdmi_pll = { }, }, .hw.init = &(struct clk_init_data){ - .name = "hdmi_pll", + .name = "hdmi_pll_dco", .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "hdmi_pll_pre_mult" }, .num_parents = 1, + /* + * Display directly handle hdmi pll registers ATM, we need + * NOCACHE to keep our view of the clock as accurate as possible + */ .flags = CLK_GET_RATE_NOCACHE, }, }; +static struct clk_regmap gxbb_hdmi_pll_od = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_HDMI_PLL_CNTL2, + .shift = 16, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "hdmi_pll_od", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "hdmi_pll_dco" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap gxbb_hdmi_pll_od2 = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_HDMI_PLL_CNTL2, + .shift = 22, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "hdmi_pll_od2", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "hdmi_pll_od" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap gxbb_hdmi_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_HDMI_PLL_CNTL2, + .shift = 18, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "hdmi_pll", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "hdmi_pll_od2" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap gxl_hdmi_pll_od = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_HDMI_PLL_CNTL + 8, + .shift = 21, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "hdmi_pll_od", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "hdmi_pll_dco" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap gxl_hdmi_pll_od2 = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_HDMI_PLL_CNTL + 8, + .shift = 23, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "hdmi_pll_od2", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "hdmi_pll_od" }, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT, + }, +}; + static struct clk_regmap gxl_hdmi_pll = { - .data = &(struct meson_clk_pll_data){ - .m = { - .reg_off = HHI_HDMI_PLL_CNTL, - .shift = 0, - .width = 9, - }, - .n = { - .reg_off = HHI_HDMI_PLL_CNTL, - .shift = 9, - .width = 5, - }, - .frac = { - /* - * On gxl, there is a register shift due to - * HHI_HDMI_PLL_CNTL1 which does not exist on gxbb, - * so we compute the register offset based on the PLL - * base to get it right - */ - .reg_off = HHI_HDMI_PLL_CNTL + 4, - .shift = 0, - .width = 12, - }, - .od = { - .reg_off = HHI_HDMI_PLL_CNTL + 8, - .shift = 21, - .width = 2, - }, - .od2 = { - .reg_off = HHI_HDMI_PLL_CNTL + 8, - .shift = 23, - .width = 2, - }, - .od3 = { - .reg_off = HHI_HDMI_PLL_CNTL + 8, - .shift = 19, - .width = 2, - }, - .l = { - .reg_off = HHI_HDMI_PLL_CNTL, - .shift = 31, - .width = 1, - }, - .rst = { - .reg_off = HHI_HDMI_PLL_CNTL, - .shift = 29, - .width = 1, - }, + .data = &(struct clk_regmap_div_data){ + .offset = HHI_HDMI_PLL_CNTL + 8, + .shift = 19, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, }, .hw.init = &(struct clk_init_data){ .name = "hdmi_pll", - .ops = &meson_clk_pll_ro_ops, - .parent_names = (const char *[]){ "xtal" }, + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "hdmi_pll_od2" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, + .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT, }, }; -static struct clk_regmap gxbb_sys_pll = { +static struct clk_regmap gxbb_sys_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_SYS_PLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_SYS_PLL_CNTL, .shift = 0, @@ -350,11 +312,6 @@ static struct clk_regmap gxbb_sys_pll = { .shift = 9, .width = 5, }, - .od = { - .reg_off = HHI_SYS_PLL_CNTL, - .shift = 10, - .width = 2, - }, .l = { .reg_off = HHI_SYS_PLL_CNTL, .shift = 31, @@ -367,11 +324,26 @@ static struct clk_regmap gxbb_sys_pll = { }, }, .hw.init = &(struct clk_init_data){ - .name = "sys_pll", + .name = "sys_pll_dco", .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +static struct clk_regmap gxbb_sys_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_PLL_CNTL, + .shift = 10, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "sys_pll", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "sys_pll_dco" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -379,11 +351,15 @@ static const struct reg_sequence gxbb_gp0_init_regs[] = { { .reg = HHI_GP0_PLL_CNTL2, .def = 0x69c80000 }, { .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a5590c4 }, { .reg = HHI_GP0_PLL_CNTL4, .def = 0x0000500d }, - { .reg = HHI_GP0_PLL_CNTL, .def = 0x4a000228 }, }; -static struct clk_regmap gxbb_gp0_pll = { +static struct clk_regmap gxbb_gp0_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_GP0_PLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_GP0_PLL_CNTL, .shift = 0, @@ -394,11 +370,6 @@ static struct clk_regmap gxbb_gp0_pll = { .shift = 9, .width = 5, }, - .od = { - .reg_off = HHI_GP0_PLL_CNTL, - .shift = 16, - .width = 2, - }, .l = { .reg_off = HHI_GP0_PLL_CNTL, .shift = 31, @@ -409,16 +380,15 @@ static struct clk_regmap gxbb_gp0_pll = { .shift = 29, .width = 1, }, - .table = gxbb_gp0_pll_rate_table, + .table = gxbb_gp0_pll_params_table, .init_regs = gxbb_gp0_init_regs, .init_count = ARRAY_SIZE(gxbb_gp0_init_regs), }, .hw.init = &(struct clk_init_data){ - .name = "gp0_pll", + .name = "gp0_pll_dco", .ops = &meson_clk_pll_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; @@ -428,11 +398,15 @@ static const struct reg_sequence gxl_gp0_init_regs[] = { { .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a59a288 }, { .reg = HHI_GP0_PLL_CNTL4, .def = 0xc000004d }, { .reg = HHI_GP0_PLL_CNTL5, .def = 0x00078000 }, - { .reg = HHI_GP0_PLL_CNTL, .def = 0x40010250 }, }; -static struct clk_regmap gxl_gp0_pll = { +static struct clk_regmap gxl_gp0_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_GP0_PLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_GP0_PLL_CNTL, .shift = 0, @@ -443,11 +417,6 @@ static struct clk_regmap gxl_gp0_pll = { .shift = 9, .width = 5, }, - .od = { - .reg_off = HHI_GP0_PLL_CNTL, - .shift = 16, - .width = 2, - }, .frac = { .reg_off = HHI_GP0_PLL_CNTL1, .shift = 0, @@ -463,16 +432,31 @@ static struct clk_regmap gxl_gp0_pll = { .shift = 29, .width = 1, }, - .table = gxl_gp0_pll_rate_table, + .table = gxl_gp0_pll_params_table, .init_regs = gxl_gp0_init_regs, .init_count = ARRAY_SIZE(gxl_gp0_init_regs), }, .hw.init = &(struct clk_init_data){ - .name = "gp0_pll", + .name = "gp0_pll_dco", .ops = &meson_clk_pll_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +static struct clk_regmap gxbb_gp0_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_GP0_PLL_CNTL, + .shift = 16, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "gp0_pll", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "gp0_pll_dco" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -1933,6 +1917,12 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = { [CLKID_GEN_CLK_SEL] = &gxbb_gen_clk_sel.hw, [CLKID_GEN_CLK_DIV] = &gxbb_gen_clk_div.hw, [CLKID_GEN_CLK] = &gxbb_gen_clk.hw, + [CLKID_FIXED_PLL_DCO] = &gxbb_fixed_pll_dco.hw, + [CLKID_HDMI_PLL_DCO] = &gxbb_hdmi_pll_dco.hw, + [CLKID_HDMI_PLL_OD] = &gxbb_hdmi_pll_od.hw, + [CLKID_HDMI_PLL_OD2] = &gxbb_hdmi_pll_od2.hw, + [CLKID_SYS_PLL_DCO] = &gxbb_sys_pll_dco.hw, + [CLKID_GP0_PLL_DCO] = &gxbb_gp0_pll_dco.hw, [NR_CLKS] = NULL, }, .num = NR_CLKS, @@ -1948,7 +1938,7 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = { [CLKID_FCLK_DIV4] = &gxbb_fclk_div4.hw, [CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw, [CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw, - [CLKID_GP0_PLL] = &gxl_gp0_pll.hw, + [CLKID_GP0_PLL] = &gxbb_gp0_pll.hw, [CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw, [CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw, [CLKID_CLK81] = &gxbb_clk81.hw, @@ -2098,19 +2088,29 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = { [CLKID_GEN_CLK_SEL] = &gxbb_gen_clk_sel.hw, [CLKID_GEN_CLK_DIV] = &gxbb_gen_clk_div.hw, [CLKID_GEN_CLK] = &gxbb_gen_clk.hw, + [CLKID_FIXED_PLL_DCO] = &gxbb_fixed_pll_dco.hw, + [CLKID_HDMI_PLL_DCO] = &gxbb_hdmi_pll_dco.hw, + [CLKID_HDMI_PLL_OD] = &gxl_hdmi_pll_od.hw, + [CLKID_HDMI_PLL_OD2] = &gxl_hdmi_pll_od2.hw, + [CLKID_SYS_PLL_DCO] = &gxbb_sys_pll_dco.hw, + [CLKID_GP0_PLL_DCO] = &gxl_gp0_pll_dco.hw, [NR_CLKS] = NULL, }, .num = NR_CLKS, }; static struct clk_regmap *const gxbb_clk_regmaps[] = { - &gxbb_gp0_pll, + &gxbb_gp0_pll_dco, &gxbb_hdmi_pll, + &gxbb_hdmi_pll_od, + &gxbb_hdmi_pll_od2, }; static struct clk_regmap *const gxl_clk_regmaps[] = { - &gxl_gp0_pll, + &gxl_gp0_pll_dco, &gxl_hdmi_pll, + &gxl_hdmi_pll_od, + &gxl_hdmi_pll_od2, }; static struct clk_regmap *const gx_clk_regmaps[] = { @@ -2265,6 +2265,10 @@ static struct clk_regmap *const gx_clk_regmaps[] = { &gxbb_gen_clk_sel, &gxbb_gen_clk_div, &gxbb_gen_clk, + &gxbb_fixed_pll_dco, + &gxbb_hdmi_pll_dco, + &gxbb_sys_pll_dco, + &gxbb_gp0_pll, }; struct clkc_data { diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h index 20dfb1daf5b8..72bc077d9663 100644 --- a/drivers/clk/meson/gxbb.h +++ b/drivers/clk/meson/gxbb.h @@ -159,8 +159,14 @@ #define CLKID_VDEC_HEVC_DIV 155 #define CLKID_GEN_CLK_SEL 157 #define CLKID_GEN_CLK_DIV 158 - -#define NR_CLKS 160 +#define CLKID_FIXED_PLL_DCO 160 +#define CLKID_HDMI_PLL_DCO 161 +#define CLKID_HDMI_PLL_OD 162 +#define CLKID_HDMI_PLL_OD2 163 +#define CLKID_SYS_PLL_DCO 164 +#define CLKID_GP0_PLL_DCO 165 + +#define NR_CLKS 166 /* include the CLKIDs that have been made part of the DT binding */ #include <dt-bindings/clock/gxbb-clkc.h> diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 7447d96a265f..346b9e165b7a 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -11,7 +11,6 @@ #include <linux/clk-provider.h> #include <linux/init.h> #include <linux/of_address.h> -#include <linux/platform_device.h> #include <linux/reset-controller.h> #include <linux/slab.h> #include <linux/regmap.h> @@ -22,66 +21,27 @@ static DEFINE_SPINLOCK(meson_clk_lock); -static void __iomem *clk_base; - struct meson8b_clk_reset { struct reset_controller_dev reset; - void __iomem *base; + struct regmap *regmap; }; -static const struct pll_rate_table sys_pll_rate_table[] = { - PLL_RATE(312000000, 52, 1, 2), - PLL_RATE(336000000, 56, 1, 2), - PLL_RATE(360000000, 60, 1, 2), - PLL_RATE(384000000, 64, 1, 2), - PLL_RATE(408000000, 68, 1, 2), - PLL_RATE(432000000, 72, 1, 2), - PLL_RATE(456000000, 76, 1, 2), - PLL_RATE(480000000, 80, 1, 2), - PLL_RATE(504000000, 84, 1, 2), - PLL_RATE(528000000, 88, 1, 2), - PLL_RATE(552000000, 92, 1, 2), - PLL_RATE(576000000, 96, 1, 2), - PLL_RATE(600000000, 50, 1, 1), - PLL_RATE(624000000, 52, 1, 1), - PLL_RATE(648000000, 54, 1, 1), - PLL_RATE(672000000, 56, 1, 1), - PLL_RATE(696000000, 58, 1, 1), - PLL_RATE(720000000, 60, 1, 1), - PLL_RATE(744000000, 62, 1, 1), - PLL_RATE(768000000, 64, 1, 1), - PLL_RATE(792000000, 66, 1, 1), - PLL_RATE(816000000, 68, 1, 1), - PLL_RATE(840000000, 70, 1, 1), - PLL_RATE(864000000, 72, 1, 1), - PLL_RATE(888000000, 74, 1, 1), - PLL_RATE(912000000, 76, 1, 1), - PLL_RATE(936000000, 78, 1, 1), - PLL_RATE(960000000, 80, 1, 1), - PLL_RATE(984000000, 82, 1, 1), - PLL_RATE(1008000000, 84, 1, 1), - PLL_RATE(1032000000, 86, 1, 1), - PLL_RATE(1056000000, 88, 1, 1), - PLL_RATE(1080000000, 90, 1, 1), - PLL_RATE(1104000000, 92, 1, 1), - PLL_RATE(1128000000, 94, 1, 1), - PLL_RATE(1152000000, 96, 1, 1), - PLL_RATE(1176000000, 98, 1, 1), - PLL_RATE(1200000000, 50, 1, 0), - PLL_RATE(1224000000, 51, 1, 0), - PLL_RATE(1248000000, 52, 1, 0), - PLL_RATE(1272000000, 53, 1, 0), - PLL_RATE(1296000000, 54, 1, 0), - PLL_RATE(1320000000, 55, 1, 0), - PLL_RATE(1344000000, 56, 1, 0), - PLL_RATE(1368000000, 57, 1, 0), - PLL_RATE(1392000000, 58, 1, 0), - PLL_RATE(1416000000, 59, 1, 0), - PLL_RATE(1440000000, 60, 1, 0), - PLL_RATE(1464000000, 61, 1, 0), - PLL_RATE(1488000000, 62, 1, 0), - PLL_RATE(1512000000, 63, 1, 0), - PLL_RATE(1536000000, 64, 1, 0), +static const struct pll_params_table sys_pll_params_table[] = { + PLL_PARAMS(50, 1), + PLL_PARAMS(51, 1), + PLL_PARAMS(52, 1), + PLL_PARAMS(53, 1), + PLL_PARAMS(54, 1), + PLL_PARAMS(55, 1), + PLL_PARAMS(56, 1), + PLL_PARAMS(57, 1), + PLL_PARAMS(58, 1), + PLL_PARAMS(59, 1), + PLL_PARAMS(60, 1), + PLL_PARAMS(61, 1), + PLL_PARAMS(62, 1), + PLL_PARAMS(63, 1), + PLL_PARAMS(64, 1), { /* sentinel */ }, }; @@ -94,8 +54,13 @@ static struct clk_fixed_rate meson8b_xtal = { }, }; -static struct clk_regmap meson8b_fixed_pll = { +static struct clk_regmap meson8b_fixed_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_MPLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_MPLL_CNTL, .shift = 0, @@ -106,11 +71,6 @@ static struct clk_regmap meson8b_fixed_pll = { .shift = 9, .width = 5, }, - .od = { - .reg_off = HHI_MPLL_CNTL, - .shift = 16, - .width = 2, - }, .frac = { .reg_off = HHI_MPLL_CNTL2, .shift = 0, @@ -128,16 +88,39 @@ static struct clk_regmap meson8b_fixed_pll = { }, }, .hw.init = &(struct clk_init_data){ - .name = "fixed_pll", + .name = "fixed_pll_dco", .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; -static struct clk_regmap meson8b_vid_pll = { +static struct clk_regmap meson8b_fixed_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_MPLL_CNTL, + .shift = 16, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "fixed_pll", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "fixed_pll_dco" }, + .num_parents = 1, + /* + * This clock won't ever change at runtime so + * CLK_SET_RATE_PARENT is not required + */ + }, +}; + +static struct clk_regmap meson8b_vid_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_VID_PLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_VID_PLL_CNTL, .shift = 0, @@ -148,11 +131,6 @@ static struct clk_regmap meson8b_vid_pll = { .shift = 9, .width = 5, }, - .od = { - .reg_off = HHI_VID_PLL_CNTL, - .shift = 16, - .width = 2, - }, .l = { .reg_off = HHI_VID_PLL_CNTL, .shift = 31, @@ -165,16 +143,36 @@ static struct clk_regmap meson8b_vid_pll = { }, }, .hw.init = &(struct clk_init_data){ - .name = "vid_pll", + .name = "vid_pll_dco", .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, }, }; -static struct clk_regmap meson8b_sys_pll = { +static struct clk_regmap meson8b_vid_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_VID_PLL_CNTL, + .shift = 16, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "vid_pll", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "vid_pll_dco" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_sys_pll_dco = { .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_SYS_PLL_CNTL, + .shift = 30, + .width = 1, + }, .m = { .reg_off = HHI_SYS_PLL_CNTL, .shift = 0, @@ -185,11 +183,6 @@ static struct clk_regmap meson8b_sys_pll = { .shift = 9, .width = 5, }, - .od = { - .reg_off = HHI_SYS_PLL_CNTL, - .shift = 16, - .width = 2, - }, .l = { .reg_off = HHI_SYS_PLL_CNTL, .shift = 31, @@ -200,14 +193,29 @@ static struct clk_regmap meson8b_sys_pll = { .shift = 29, .width = 1, }, - .table = sys_pll_rate_table, + .table = sys_pll_params_table, }, .hw.init = &(struct clk_init_data){ - .name = "sys_pll", + .name = "sys_pll_dco", .ops = &meson_clk_pll_ro_ops, .parent_names = (const char *[]){ "xtal" }, .num_parents = 1, - .flags = CLK_GET_RATE_NOCACHE, + }, +}; + +static struct clk_regmap meson8b_sys_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_PLL_CNTL, + .shift = 16, + .width = 2, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "sys_pll", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "sys_pll_dco" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -879,6 +887,9 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = { [CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw, [CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw, [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw, + [CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw, + [CLKID_PLL_VID_DCO] = &meson8b_vid_pll_dco.hw, + [CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw, [CLK_NR_CLKS] = NULL, }, .num = CLK_NR_CLKS, @@ -987,6 +998,9 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = { &meson8b_nand_clk_sel, &meson8b_nand_clk_div, &meson8b_nand_clk_gate, + &meson8b_fixed_pll_dco, + &meson8b_vid_pll_dco, + &meson8b_sys_pll_dco, }; static const struct meson8b_clk_reset_line { @@ -1050,7 +1064,6 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev, container_of(rcdev, struct meson8b_clk_reset, reset); unsigned long flags; const struct meson8b_clk_reset_line *reset; - u32 val; if (id >= ARRAY_SIZE(meson8b_clk_reset_bits)) return -EINVAL; @@ -1059,12 +1072,12 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev, spin_lock_irqsave(&meson_clk_lock, flags); - val = readl(meson8b_clk_reset->base + reset->reg); if (assert) - val |= BIT(reset->bit_idx); + regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, + BIT(reset->bit_idx), BIT(reset->bit_idx)); else - val &= ~BIT(reset->bit_idx); - writel(val, meson8b_clk_reset->base + reset->reg); + regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, + BIT(reset->bit_idx), 0); spin_unlock_irqrestore(&meson_clk_lock, flags); @@ -1094,62 +1107,12 @@ static const struct regmap_config clkc_regmap_config = { .reg_stride = 4, }; -static int meson8b_clkc_probe(struct platform_device *pdev) -{ - int ret, i; - struct device *dev = &pdev->dev; - struct regmap *map; - - if (!clk_base) - return -ENXIO; - - map = devm_regmap_init_mmio(dev, clk_base, &clkc_regmap_config); - if (IS_ERR(map)) - return PTR_ERR(map); - - /* Populate regmap for the regmap backed clocks */ - for (i = 0; i < ARRAY_SIZE(meson8b_clk_regmaps); i++) - meson8b_clk_regmaps[i]->map = map; - - /* - * register all clks - * CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1 - */ - for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) { - /* array might be sparse */ - if (!meson8b_hw_onecell_data.hws[i]) - continue; - - ret = devm_clk_hw_register(dev, meson8b_hw_onecell_data.hws[i]); - if (ret) - return ret; - } - - return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, - &meson8b_hw_onecell_data); -} - -static const struct of_device_id meson8b_clkc_match_table[] = { - { .compatible = "amlogic,meson8-clkc" }, - { .compatible = "amlogic,meson8b-clkc" }, - { .compatible = "amlogic,meson8m2-clkc" }, - { } -}; - -static struct platform_driver meson8b_driver = { - .probe = meson8b_clkc_probe, - .driver = { - .name = "meson8b-clkc", - .of_match_table = meson8b_clkc_match_table, - }, -}; - -builtin_platform_driver(meson8b_driver); - -static void __init meson8b_clkc_reset_init(struct device_node *np) +static void __init meson8b_clkc_init(struct device_node *np) { struct meson8b_clk_reset *rstc; - int ret; + void __iomem *clk_base; + struct regmap *map; + int i, ret; /* Generic clocks, PLLs and some of the reset-bits */ clk_base = of_iomap(np, 1); @@ -1158,12 +1121,16 @@ static void __init meson8b_clkc_reset_init(struct device_node *np) return; } + map = regmap_init_mmio(NULL, clk_base, &clkc_regmap_config); + if (IS_ERR(map)) + return; + rstc = kzalloc(sizeof(*rstc), GFP_KERNEL); if (!rstc) return; /* Reset Controller */ - rstc->base = clk_base; + rstc->regmap = map; rstc->reset.ops = &meson8b_clk_reset_ops; rstc->reset.nr_resets = ARRAY_SIZE(meson8b_clk_reset_bits); rstc->reset.of_node = np; @@ -1173,11 +1140,34 @@ static void __init meson8b_clkc_reset_init(struct device_node *np) __func__, ret); return; } + + /* Populate regmap for the regmap backed clocks */ + for (i = 0; i < ARRAY_SIZE(meson8b_clk_regmaps); i++) + meson8b_clk_regmaps[i]->map = map; + + /* + * register all clks + * CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1 + */ + for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) { + /* array might be sparse */ + if (!meson8b_hw_onecell_data.hws[i]) + continue; + + ret = clk_hw_register(NULL, meson8b_hw_onecell_data.hws[i]); + if (ret) + return; + } + + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, + &meson8b_hw_onecell_data); + if (ret) + pr_err("%s: failed to register clock provider\n", __func__); } CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc", - meson8b_clkc_reset_init); + meson8b_clkc_init); CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc", - meson8b_clkc_reset_init); + meson8b_clkc_init); CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc", - meson8b_clkc_reset_init); + meson8b_clkc_init); diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h index 5d09412b5084..1c6fb180e6a2 100644 --- a/drivers/clk/meson/meson8b.h +++ b/drivers/clk/meson/meson8b.h @@ -75,8 +75,11 @@ #define CLKID_FCLK_DIV7_DIV 109 #define CLKID_NAND_SEL 110 #define CLKID_NAND_DIV 111 +#define CLKID_PLL_FIXED_DCO 113 +#define CLKID_PLL_VID_DCO 114 +#define CLKID_PLL_SYS_DCO 115 -#define CLK_NR_CLKS 113 +#define CLK_NR_CLKS 116 /* * include the CLKID and RESETID that have diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 0fc75c395957..d083b860f083 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -227,8 +227,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = { /* The gate clocks has mux parent. */ {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, - {MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, - {MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, + {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, + {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock}, {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock}, {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock}, diff --git a/drivers/clk/mvebu/ap806-system-controller.c b/drivers/clk/mvebu/ap806-system-controller.c index fa2fbd2cef4a..ea54a874bbda 100644 --- a/drivers/clk/mvebu/ap806-system-controller.c +++ b/drivers/clk/mvebu/ap806-system-controller.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell Armada AP806 System Controller * @@ -5,9 +6,6 @@ * * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #define pr_fmt(fmt) "ap806-system-controller: " fmt @@ -155,7 +153,6 @@ static int ap806_syscon_common_probe(struct platform_device *pdev, goto fail4; } - of_clk_add_provider(np, of_clk_src_onecell_get, &ap806_clk_data); ret = of_clk_add_provider(np, of_clk_src_onecell_get, &ap806_clk_data); if (ret) goto fail_clk_add; diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c index 2c7c1085f883..7dedfaa6e152 100644 --- a/drivers/clk/mvebu/armada-370.c +++ b/drivers/clk/mvebu/armada-370.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell Armada 370 SoC clocks * @@ -7,9 +8,6 @@ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> * Andrew Lunn <andrew@lunn.ch> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> diff --git a/drivers/clk/mvebu/armada-375.c b/drivers/clk/mvebu/armada-375.c index c7af2242b796..a7157c690238 100644 --- a/drivers/clk/mvebu/armada-375.c +++ b/drivers/clk/mvebu/armada-375.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell Armada 375 SoC clocks * @@ -7,9 +8,6 @@ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> * Andrew Lunn <andrew@lunn.ch> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c index 499f5962c8b0..1f1cff428d78 100644 --- a/drivers/clk/mvebu/armada-37xx-periph.c +++ b/drivers/clk/mvebu/armada-37xx-periph.c @@ -56,6 +56,15 @@ struct clk_periph_driver_data { struct clk_hw_onecell_data *hw_data; spinlock_t lock; + void __iomem *reg; + + /* Storage registers for suspend/resume operations */ + u32 tbg_sel; + u32 div_sel0; + u32 div_sel1; + u32 div_sel2; + u32 clk_sel; + u32 clk_dis; }; struct clk_double_div { @@ -672,6 +681,40 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data, return PTR_ERR_OR_ZERO(*hw); } +static int __maybe_unused armada_3700_periph_clock_suspend(struct device *dev) +{ + struct clk_periph_driver_data *data = dev_get_drvdata(dev); + + data->tbg_sel = readl(data->reg + TBG_SEL); + data->div_sel0 = readl(data->reg + DIV_SEL0); + data->div_sel1 = readl(data->reg + DIV_SEL1); + data->div_sel2 = readl(data->reg + DIV_SEL2); + data->clk_sel = readl(data->reg + CLK_SEL); + data->clk_dis = readl(data->reg + CLK_DIS); + + return 0; +} + +static int __maybe_unused armada_3700_periph_clock_resume(struct device *dev) +{ + struct clk_periph_driver_data *data = dev_get_drvdata(dev); + + /* Follow the same order than what the Cortex-M3 does (ATF code) */ + writel(data->clk_dis, data->reg + CLK_DIS); + writel(data->div_sel0, data->reg + DIV_SEL0); + writel(data->div_sel1, data->reg + DIV_SEL1); + writel(data->div_sel2, data->reg + DIV_SEL2); + writel(data->tbg_sel, data->reg + TBG_SEL); + writel(data->clk_sel, data->reg + CLK_SEL); + + return 0; +} + +static const struct dev_pm_ops armada_3700_periph_clock_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(armada_3700_periph_clock_suspend, + armada_3700_periph_clock_resume) +}; + static int armada_3700_periph_clock_probe(struct platform_device *pdev) { struct clk_periph_driver_data *driver_data; @@ -680,7 +723,6 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; int num_periph = 0, i, ret; struct resource *res; - void __iomem *reg; data = of_device_get_match_data(dev); if (!data) @@ -689,11 +731,6 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev) while (data[num_periph].name) num_periph++; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - reg = devm_ioremap_resource(dev, res); - if (IS_ERR(reg)) - return PTR_ERR(reg); - driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL); if (!driver_data) return -ENOMEM; @@ -706,12 +743,16 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev) return -ENOMEM; driver_data->hw_data->num = num_periph; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + driver_data->reg = devm_ioremap_resource(dev, res); + if (IS_ERR(driver_data->reg)) + return PTR_ERR(driver_data->reg); + spin_lock_init(&driver_data->lock); for (i = 0; i < num_periph; i++) { struct clk_hw **hw = &driver_data->hw_data->hws[i]; - - if (armada_3700_add_composite_clk(&data[i], reg, + if (armada_3700_add_composite_clk(&data[i], driver_data->reg, &driver_data->lock, dev, hw)) dev_err(dev, "Can't register periph clock %s\n", data[i].name); @@ -749,6 +790,7 @@ static struct platform_driver armada_3700_periph_clock_driver = { .driver = { .name = "marvell-armada-3700-periph-clock", .of_match_table = armada_3700_periph_clock_of_match, + .pm = &armada_3700_periph_clock_pm_ops, }, }; diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c index 7ff041f73b55..ee272d4d8c24 100644 --- a/drivers/clk/mvebu/armada-37xx-tbg.c +++ b/drivers/clk/mvebu/armada-37xx-tbg.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Marvell Armada 37xx SoC Time Base Generator clocks * * Copyright (C) 2016 Marvell * * Gregory CLEMENT <gregory.clement@free-electrons.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2 or later. This program is licensed "as is" - * without any warranty of any kind, whether express or implied. */ #include <linux/clk-provider.h> @@ -99,12 +96,13 @@ static int armada_3700_tbg_clock_probe(struct platform_device *pdev) hw_tbg_data->num = NUM_TBG; platform_set_drvdata(pdev, hw_tbg_data); - parent = devm_clk_get(dev, NULL); + parent = clk_get(dev, NULL); if (IS_ERR(parent)) { dev_err(dev, "Could get the clock parent\n"); return -EINVAL; } parent_name = __clk_get_name(parent); + clk_put(parent); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg = devm_ioremap_resource(dev, res); diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c index 612d65ede10a..e9e306d4e9af 100644 --- a/drivers/clk/mvebu/armada-37xx-xtal.c +++ b/drivers/clk/mvebu/armada-37xx-xtal.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell Armada 37xx SoC xtal clocks * @@ -5,9 +6,6 @@ * * Gregory CLEMENT <gregory.clement@free-electrons.com> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/clk-provider.h> diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c index 9ff4ea63932d..ef2ab81f087d 100644 --- a/drivers/clk/mvebu/armada-38x.c +++ b/drivers/clk/mvebu/armada-38x.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell Armada 380/385 SoC clocks * @@ -7,9 +8,6 @@ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> * Andrew Lunn <andrew@lunn.ch> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> diff --git a/drivers/clk/mvebu/armada-39x.c b/drivers/clk/mvebu/armada-39x.c index 4fdfd32247a9..674ccfd6236e 100644 --- a/drivers/clk/mvebu/armada-39x.c +++ b/drivers/clk/mvebu/armada-39x.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell Armada 39x SoC clocks * @@ -8,9 +9,6 @@ * Andrew Lunn <andrew@lunn.ch> * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c index 0ec44ae9a2a2..e8f03293ec83 100644 --- a/drivers/clk/mvebu/armada-xp.c +++ b/drivers/clk/mvebu/armada-xp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell Armada XP SoC clocks * @@ -7,9 +8,6 @@ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> * Andrew Lunn <andrew@lunn.ch> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c index 68f05c53d40e..1fc84b0e72ee 100644 --- a/drivers/clk/mvebu/clk-corediv.c +++ b/drivers/clk/mvebu/clk-corediv.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * MVEBU Core divider clock * @@ -5,9 +6,6 @@ * * Ezequiel Garcia <ezequiel.garcia@free-electrons.com> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c index 3045067448fb..c2af3395cf13 100644 --- a/drivers/clk/mvebu/clk-cpu.c +++ b/drivers/clk/mvebu/clk-cpu.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell MVEBU CPU clock handling. * @@ -5,9 +6,6 @@ * * Gregory CLEMENT <gregory.clement@free-electrons.com> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/slab.h> diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c index 472c88b90256..6ab3c2e627c7 100644 --- a/drivers/clk/mvebu/common.c +++ b/drivers/clk/mvebu/common.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell EBU SoC common clock handling * @@ -7,9 +8,6 @@ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> * Andrew Lunn <andrew@lunn.ch> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> diff --git a/drivers/clk/mvebu/common.h b/drivers/clk/mvebu/common.h index f0de6c8a494a..d1ab79b43105 100644 --- a/drivers/clk/mvebu/common.h +++ b/drivers/clk/mvebu/common.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Marvell EBU SoC common clock handling * @@ -7,9 +8,6 @@ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> * Andrew Lunn <andrew@lunn.ch> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef __CLK_MVEBU_COMMON_H_ diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c index 75bf7b8f282f..9781b1bf5998 100644 --- a/drivers/clk/mvebu/cp110-system-controller.c +++ b/drivers/clk/mvebu/cp110-system-controller.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell Armada CP110 System Controller * @@ -5,9 +6,6 @@ * * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ /* diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c index 59fad9546c84..e0dd99f36bf4 100644 --- a/drivers/clk/mvebu/dove.c +++ b/drivers/clk/mvebu/dove.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell Dove SoC clocks * @@ -7,9 +8,6 @@ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> * Andrew Lunn <andrew@lunn.ch> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c index a2a8d614039d..6f784167bda4 100644 --- a/drivers/clk/mvebu/kirkwood.c +++ b/drivers/clk/mvebu/kirkwood.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell Kirkwood SoC clocks * @@ -7,9 +8,6 @@ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> * Andrew Lunn <andrew@lunn.ch> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> diff --git a/drivers/clk/mvebu/mv98dx3236.c b/drivers/clk/mvebu/mv98dx3236.c index 6e203af73cac..0a74cf7a7725 100644 --- a/drivers/clk/mvebu/mv98dx3236.c +++ b/drivers/clk/mvebu/mv98dx3236.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell MV98DX3236 SoC clocks * @@ -7,9 +8,6 @@ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> * Andrew Lunn <andrew@lunn.ch> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> diff --git a/drivers/clk/mvebu/orion.c b/drivers/clk/mvebu/orion.c index a6e5bee23385..f681a65be20a 100644 --- a/drivers/clk/mvebu/orion.c +++ b/drivers/clk/mvebu/orion.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Marvell Orion SoC clocks * @@ -5,9 +6,6 @@ * * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 064768699fe7..a611531df115 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -1,3 +1,7 @@ +config KRAIT_CLOCKS + bool + select KRAIT_L2_ACCESSORS + config QCOM_GDSC bool select PM_GENERIC_DOMAINS if PM @@ -235,6 +239,31 @@ config MSM_GCC_8998 Say Y if you want to use peripheral devices such as UART, SPI, i2c, USB, UFS, SD/eMMC, PCIe, etc. +config QCS_GCC_404 + tristate "QCS404 Global Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the global clock controller on QCS404 devices. + Say Y if you want to use multimedia devices or peripheral + devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc. + +config SDM_CAMCC_845 + tristate "SDM845 Camera Clock Controller" + depends on COMMON_CLK_QCOM + select SDM_GCC_845 + help + Support for the camera clock controller on SDM845 devices. + Say Y if you want to support camera devices and camera functionality. + +config SDM_GCC_660 + tristate "SDM660 Global Clock Controller" + select QCOM_GDSC + depends on COMMON_CLK_QCOM + help + Support for the global clock controller on SDM660 devices. + Say Y if you want to use peripheral devices such as UART, SPI, + i2C, USB, UFS, SDDC, PCIe, etc. + config SDM_GCC_845 tristate "SDM845 Global Clock Controller" select QCOM_GDSC @@ -272,3 +301,27 @@ config SPMI_PMIC_CLKDIV Technologies, Inc. SPMI PMIC. It configures the frequency of clkdiv outputs of the PMIC. These clocks are typically wired through alternate functions on GPIO pins. + +config QCOM_HFPLL + tristate "High-Frequency PLL (HFPLL) Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the high-frequency PLLs present on Qualcomm devices. + Say Y if you want to support CPU frequency scaling on devices + such as MSM8974, APQ8084, etc. + +config KPSS_XCC + tristate "KPSS Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the Krait ACC and GCC clock controllers. Say Y + if you want to support CPU frequency scaling on devices such + as MSM8960, APQ8064, etc. + +config KRAITCC + tristate "Krait Clock Controller" + depends on COMMON_CLK_QCOM && ARM + select KRAIT_CLOCKS + help + Support for the Krait CPU clocks on Qualcomm devices. + Say Y if you want to support CPU frequency scaling. diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 21a45035930d..981882e16189 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -11,6 +11,8 @@ clk-qcom-y += clk-branch.o clk-qcom-y += clk-regmap-divider.o clk-qcom-y += clk-regmap-mux.o clk-qcom-y += clk-regmap-mux-div.o +clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o +clk-qcom-y += clk-hfpll.o clk-qcom-y += reset.o clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o @@ -39,7 +41,13 @@ obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o +obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o +obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o +obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o +obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o +obj-$(CONFIG_QCOM_HFPLL) += hfpll.o +obj-$(CONFIG_KRAITCC) += krait-cc.o diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c new file mode 100644 index 000000000000..1b2cefef7431 --- /dev/null +++ b/drivers/clk/qcom/camcc-sdm845.c @@ -0,0 +1,1745 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,camcc-sdm845.h> + +#include "common.h" +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "gdsc.h" + +enum { + P_BI_TCXO, + P_CAM_CC_PLL0_OUT_EVEN, + P_CAM_CC_PLL1_OUT_EVEN, + P_CAM_CC_PLL2_OUT_EVEN, + P_CAM_CC_PLL3_OUT_EVEN, + P_CORE_BI_PLL_TEST_SE, +}; + +static const struct parent_map cam_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_CAM_CC_PLL2_OUT_EVEN, 1 }, + { P_CAM_CC_PLL1_OUT_EVEN, 2 }, + { P_CAM_CC_PLL3_OUT_EVEN, 5 }, + { P_CAM_CC_PLL0_OUT_EVEN, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const cam_cc_parent_names_0[] = { + "bi_tcxo", + "cam_cc_pll2_out_even", + "cam_cc_pll1_out_even", + "cam_cc_pll3_out_even", + "cam_cc_pll0_out_even", + "core_bi_pll_test_se", +}; + +static struct clk_alpha_pll cam_cc_pll0 = { + .offset = 0x0, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll0", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fabia_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_fabia_even[] = { + { 0x0, 1 }, + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = { + .offset = 0x0, + .post_div_shift = 8, + .post_div_table = post_div_table_fabia_even, + .num_post_div = ARRAY_SIZE(post_div_table_fabia_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll0_out_even", + .parent_names = (const char *[]){ "cam_cc_pll0" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_fabia_ops, + }, +}; + +static struct clk_alpha_pll cam_cc_pll1 = { + .offset = 0x1000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll1", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fabia_ops, + }, + }, +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = { + .offset = 0x1000, + .post_div_shift = 8, + .post_div_table = post_div_table_fabia_even, + .num_post_div = ARRAY_SIZE(post_div_table_fabia_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll1_out_even", + .parent_names = (const char *[]){ "cam_cc_pll1" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_fabia_ops, + }, +}; + +static struct clk_alpha_pll cam_cc_pll2 = { + .offset = 0x2000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll2", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fabia_ops, + }, + }, +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll2_out_even = { + .offset = 0x2000, + .post_div_shift = 8, + .post_div_table = post_div_table_fabia_even, + .num_post_div = ARRAY_SIZE(post_div_table_fabia_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll2_out_even", + .parent_names = (const char *[]){ "cam_cc_pll2" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_fabia_ops, + }, +}; + +static struct clk_alpha_pll cam_cc_pll3 = { + .offset = 0x3000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll3", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fabia_ops, + }, + }, +}; + +static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = { + .offset = 0x3000, + .post_div_shift = 8, + .post_div_table = post_div_table_fabia_even, + .num_post_div = ARRAY_SIZE(post_div_table_fabia_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_pll3_out_even", + .parent_names = (const char *[]){ "cam_cc_pll3" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_fabia_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0), + F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0), + F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +/* + * As per HW design, some of the CAMCC RCGs needs to + * move to XO clock during their clock disable so using + * clk_rcg2_shared_ops for such RCGs. This is required + * to power down the camera memories gracefully. + * Also, use CLK_SET_RATE_PARENT flag for the RCGs which + * have CAM_CC_PLL2_OUT_EVEN PLL as parent in frequency + * table and requires reconfiguration of the PLL frequency. + */ +static struct clk_rcg2 cam_cc_bps_clk_src = { + .cmd_rcgr = 0x600c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_bps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cci_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0), + F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cci_clk_src = { + .cmd_rcgr = 0xb0d8, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cci_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { + .cmd_rcgr = 0x9060, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_cphy_rx_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(240000000, P_CAM_CC_PLL2_OUT_EVEN, 2, 0, 0), + F(269333333, P_CAM_CC_PLL1_OUT_EVEN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { + .cmd_rcgr = 0x5004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi0phytimer_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { + .cmd_rcgr = 0x5028, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi1phytimer_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = { + .cmd_rcgr = 0x504c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi2phytimer_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = { + .cmd_rcgr = 0x5070, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi3phytimer_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0), + F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { + .cmd_rcgr = 0x6038, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_fast_ahb_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0), + F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_fd_core_clk_src = { + .cmd_rcgr = 0xb0b0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_fd_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_fd_core_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0), + F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_icp_clk_src = { + .cmd_rcgr = 0xb088, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_icp_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_icp_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0), + F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0), + F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_0_clk_src = { + .cmd_rcgr = 0x900c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ife_0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0), + F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = { + .cmd_rcgr = 0x9038, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_csid_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_ife_1_clk_src = { + .cmd_rcgr = 0xa00c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ife_0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = { + .cmd_rcgr = 0xa030, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_csid_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_ife_lite_clk_src = { + .cmd_rcgr = 0xb004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ife_0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = { + .cmd_rcgr = 0xb024, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_csid_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0), + F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0), + F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0), + F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0), + F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_ipe_0_clk_src = { + .cmd_rcgr = 0x700c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ipe_0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_0_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_ipe_1_clk_src = { + .cmd_rcgr = 0x800c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_ipe_0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_1_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 cam_cc_jpeg_clk_src = { + .cmd_rcgr = 0xb04c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_bps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_jpeg_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0), + F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0), + F(269333333, P_CAM_CC_PLL1_OUT_EVEN, 3, 0, 0), + F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0), + F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_lrme_clk_src = { + .cmd_rcgr = 0xb0f8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_lrme_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_lrme_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(24000000, P_CAM_CC_PLL2_OUT_EVEN, 10, 1, 2), + F(33333333, P_CAM_CC_PLL0_OUT_EVEN, 2, 1, 9), + F(34285714, P_CAM_CC_PLL2_OUT_EVEN, 14, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_mclk0_clk_src = { + .cmd_rcgr = 0x4004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk0_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk1_clk_src = { + .cmd_rcgr = 0x4024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk1_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk2_clk_src = { + .cmd_rcgr = 0x4044, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk2_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 cam_cc_mclk3_clk_src = { + .cmd_rcgr = 0x4064, + .mnd_width = 8, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_mclk0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk3_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(60000000, P_CAM_CC_PLL0_OUT_EVEN, 10, 0, 0), + F(66666667, P_CAM_CC_PLL0_OUT_EVEN, 9, 0, 0), + F(73846154, P_CAM_CC_PLL2_OUT_EVEN, 6.5, 0, 0), + F(80000000, P_CAM_CC_PLL2_OUT_EVEN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { + .cmd_rcgr = 0x6054, + .mnd_width = 0, + .hid_width = 5, + .parent_map = cam_cc_parent_map_0, + .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "cam_cc_slow_ahb_clk_src", + .parent_names = cam_cc_parent_names_0, + .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch cam_cc_bps_ahb_clk = { + .halt_reg = 0x606c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x606c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_ahb_clk", + .parent_names = (const char *[]){ + "cam_cc_slow_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_areg_clk = { + .halt_reg = 0x6050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_areg_clk", + .parent_names = (const char *[]){ + "cam_cc_fast_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_axi_clk = { + .halt_reg = 0x6034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_bps_clk = { + .halt_reg = 0x6024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_bps_clk", + .parent_names = (const char *[]){ + "cam_cc_bps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_atb_clk = { + .halt_reg = 0xb12c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb12c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_camnoc_atb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_camnoc_axi_clk = { + .halt_reg = 0xb124, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb124, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_camnoc_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cci_clk = { + .halt_reg = 0xb0f0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb0f0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cci_clk", + .parent_names = (const char *[]){ + "cam_cc_cci_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_cpas_ahb_clk = { + .halt_reg = 0xb11c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb11c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_cpas_ahb_clk", + .parent_names = (const char *[]){ + "cam_cc_slow_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi0phytimer_clk = { + .halt_reg = 0x501c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x501c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi0phytimer_clk", + .parent_names = (const char *[]){ + "cam_cc_csi0phytimer_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi1phytimer_clk = { + .halt_reg = 0x5040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi1phytimer_clk", + .parent_names = (const char *[]){ + "cam_cc_csi1phytimer_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi2phytimer_clk = { + .halt_reg = 0x5064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5064, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi2phytimer_clk", + .parent_names = (const char *[]){ + "cam_cc_csi2phytimer_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csi3phytimer_clk = { + .halt_reg = 0x5088, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5088, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csi3phytimer_clk", + .parent_names = (const char *[]){ + "cam_cc_csi3phytimer_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy0_clk = { + .halt_reg = 0x5020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csiphy0_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy1_clk = { + .halt_reg = 0x5044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csiphy1_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy2_clk = { + .halt_reg = 0x5068, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5068, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csiphy2_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_csiphy3_clk = { + .halt_reg = 0x508c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x508c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_csiphy3_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_fd_core_clk = { + .halt_reg = 0xb0c8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb0c8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_fd_core_clk", + .parent_names = (const char *[]){ + "cam_cc_fd_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_fd_core_uar_clk = { + .halt_reg = 0xb0d0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb0d0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_fd_core_uar_clk", + .parent_names = (const char *[]){ + "cam_cc_fd_core_clk_src", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_apb_clk = { + .halt_reg = 0xb084, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb084, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_icp_apb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_atb_clk = { + .halt_reg = 0xb078, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb078, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_icp_atb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_clk = { + .halt_reg = 0xb0a0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb0a0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_icp_clk", + .parent_names = (const char *[]){ + "cam_cc_icp_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_cti_clk = { + .halt_reg = 0xb07c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb07c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_icp_cti_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_icp_ts_clk = { + .halt_reg = 0xb080, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb080, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_icp_ts_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_axi_clk = { + .halt_reg = 0x907c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x907c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_clk = { + .halt_reg = 0x9024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_cphy_rx_clk = { + .halt_reg = 0x9078, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9078, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_cphy_rx_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_csid_clk = { + .halt_reg = 0x9050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_csid_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_0_csid_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_0_dsp_clk = { + .halt_reg = 0x9034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_0_dsp_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_0_clk_src", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_axi_clk = { + .halt_reg = 0xa054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_clk = { + .halt_reg = 0xa024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_cphy_rx_clk = { + .halt_reg = 0xa050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_cphy_rx_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_csid_clk = { + .halt_reg = 0xa048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa048, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_csid_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_1_csid_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_1_dsp_clk = { + .halt_reg = 0xa02c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa02c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_1_dsp_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_1_clk_src", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_clk = { + .halt_reg = 0xb01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb01c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_lite_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = { + .halt_reg = 0xb044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_cphy_rx_clk", + .parent_names = (const char *[]){ + "cam_cc_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ife_lite_csid_clk = { + .halt_reg = 0xb03c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb03c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ife_lite_csid_clk", + .parent_names = (const char *[]){ + "cam_cc_ife_lite_csid_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_0_ahb_clk = { + .halt_reg = 0x703c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x703c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_0_ahb_clk", + .parent_names = (const char *[]){ + "cam_cc_slow_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_0_areg_clk = { + .halt_reg = 0x7038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_0_areg_clk", + .parent_names = (const char *[]){ + "cam_cc_fast_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_0_axi_clk = { + .halt_reg = 0x7034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_0_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_0_clk = { + .halt_reg = 0x7024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_0_clk", + .parent_names = (const char *[]){ + "cam_cc_ipe_0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_1_ahb_clk = { + .halt_reg = 0x803c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x803c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_1_ahb_clk", + .parent_names = (const char *[]){ + "cam_cc_slow_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_1_areg_clk = { + .halt_reg = 0x8038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_1_areg_clk", + .parent_names = (const char *[]){ + "cam_cc_fast_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_1_axi_clk = { + .halt_reg = 0x8034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_1_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_ipe_1_clk = { + .halt_reg = 0x8024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_ipe_1_clk", + .parent_names = (const char *[]){ + "cam_cc_ipe_1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_jpeg_clk = { + .halt_reg = 0xb064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb064, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_jpeg_clk", + .parent_names = (const char *[]){ + "cam_cc_jpeg_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_lrme_clk = { + .halt_reg = 0xb110, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb110, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_lrme_clk", + .parent_names = (const char *[]){ + "cam_cc_lrme_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk0_clk = { + .halt_reg = 0x401c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x401c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk0_clk", + .parent_names = (const char *[]){ + "cam_cc_mclk0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk1_clk = { + .halt_reg = 0x403c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x403c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk1_clk", + .parent_names = (const char *[]){ + "cam_cc_mclk1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk2_clk = { + .halt_reg = 0x405c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x405c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk2_clk", + .parent_names = (const char *[]){ + "cam_cc_mclk2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_mclk3_clk = { + .halt_reg = 0x407c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x407c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_mclk3_clk", + .parent_names = (const char *[]){ + "cam_cc_mclk3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_soc_ahb_clk = { + .halt_reg = 0xb13c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb13c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_soc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch cam_cc_sys_tmr_clk = { + .halt_reg = 0xb0a8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb0a8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "cam_cc_sys_tmr_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc bps_gdsc = { + .gdscr = 0x6004, + .pd = { + .name = "bps_gdsc", + }, + .flags = HW_CTRL | POLL_CFG_GDSCR, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc ipe_0_gdsc = { + .gdscr = 0x7004, + .pd = { + .name = "ipe_0_gdsc", + }, + .flags = HW_CTRL | POLL_CFG_GDSCR, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc ipe_1_gdsc = { + .gdscr = 0x8004, + .pd = { + .name = "ipe_1_gdsc", + }, + .flags = HW_CTRL | POLL_CFG_GDSCR, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc ife_0_gdsc = { + .gdscr = 0x9004, + .pd = { + .name = "ife_0_gdsc", + }, + .flags = POLL_CFG_GDSCR, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc ife_1_gdsc = { + .gdscr = 0xa004, + .pd = { + .name = "ife_1_gdsc", + }, + .flags = POLL_CFG_GDSCR, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc titan_top_gdsc = { + .gdscr = 0xb134, + .pd = { + .name = "titan_top_gdsc", + }, + .flags = POLL_CFG_GDSCR, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct clk_regmap *cam_cc_sdm845_clocks[] = { + [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr, + [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr, + [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr, + [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr, + [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr, + [CAM_CC_CAMNOC_ATB_CLK] = &cam_cc_camnoc_atb_clk.clkr, + [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr, + [CAM_CC_CCI_CLK] = &cam_cc_cci_clk.clkr, + [CAM_CC_CCI_CLK_SRC] = &cam_cc_cci_clk_src.clkr, + [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr, + [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr, + [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr, + [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr, + [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr, + [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr, + [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr, + [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr, + [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr, + [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr, + [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr, + [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr, + [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr, + [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr, + [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr, + [CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr, + [CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr, + [CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr, + [CAM_CC_ICP_APB_CLK] = &cam_cc_icp_apb_clk.clkr, + [CAM_CC_ICP_ATB_CLK] = &cam_cc_icp_atb_clk.clkr, + [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr, + [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr, + [CAM_CC_ICP_CTI_CLK] = &cam_cc_icp_cti_clk.clkr, + [CAM_CC_ICP_TS_CLK] = &cam_cc_icp_ts_clk.clkr, + [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr, + [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr, + [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr, + [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr, + [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr, + [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr, + [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr, + [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr, + [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr, + [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr, + [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr, + [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr, + [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr, + [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr, + [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr, + [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr, + [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr, + [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr, + [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr, + [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr, + [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr, + [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr, + [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr, + [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr, + [CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr, + [CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr, + [CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr, + [CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr, + [CAM_CC_IPE_1_CLK_SRC] = &cam_cc_ipe_1_clk_src.clkr, + [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr, + [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr, + [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr, + [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr, + [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr, + [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr, + [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr, + [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr, + [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr, + [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr, + [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr, + [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr, + [CAM_CC_PLL0] = &cam_cc_pll0.clkr, + [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr, + [CAM_CC_PLL1] = &cam_cc_pll1.clkr, + [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr, + [CAM_CC_PLL2] = &cam_cc_pll2.clkr, + [CAM_CC_PLL2_OUT_EVEN] = &cam_cc_pll2_out_even.clkr, + [CAM_CC_PLL3] = &cam_cc_pll3.clkr, + [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr, + [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr, + [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr, + [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr, +}; + +static struct gdsc *cam_cc_sdm845_gdscs[] = { + [BPS_GDSC] = &bps_gdsc, + [IPE_0_GDSC] = &ipe_0_gdsc, + [IPE_1_GDSC] = &ipe_1_gdsc, + [IFE_0_GDSC] = &ife_0_gdsc, + [IFE_1_GDSC] = &ife_1_gdsc, + [TITAN_TOP_GDSC] = &titan_top_gdsc, +}; + +static const struct regmap_config cam_cc_sdm845_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xd004, + .fast_io = true, +}; + +static const struct qcom_cc_desc cam_cc_sdm845_desc = { + .config = &cam_cc_sdm845_regmap_config, + .clks = cam_cc_sdm845_clocks, + .num_clks = ARRAY_SIZE(cam_cc_sdm845_clocks), + .gdscs = cam_cc_sdm845_gdscs, + .num_gdscs = ARRAY_SIZE(cam_cc_sdm845_gdscs), +}; + +static const struct of_device_id cam_cc_sdm845_match_table[] = { + { .compatible = "qcom,sdm845-camcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, cam_cc_sdm845_match_table); + +static int cam_cc_sdm845_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + struct alpha_pll_config cam_cc_pll_config = { }; + + regmap = qcom_cc_map(pdev, &cam_cc_sdm845_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + cam_cc_pll_config.l = 0x1f; + cam_cc_pll_config.alpha = 0x4000; + clk_fabia_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll_config); + + cam_cc_pll_config.l = 0x2a; + cam_cc_pll_config.alpha = 0x1556; + clk_fabia_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll_config); + + cam_cc_pll_config.l = 0x32; + cam_cc_pll_config.alpha = 0x0; + clk_fabia_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll_config); + + cam_cc_pll_config.l = 0x14; + clk_fabia_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll_config); + + return qcom_cc_really_probe(pdev, &cam_cc_sdm845_desc, regmap); +} + +static struct platform_driver cam_cc_sdm845_driver = { + .probe = cam_cc_sdm845_probe, + .driver = { + .name = "sdm845-camcc", + .of_match_table = cam_cc_sdm845_match_table, + }, +}; + +static int __init cam_cc_sdm845_init(void) +{ + return platform_driver_register(&cam_cc_sdm845_driver); +} +subsys_initcall(cam_cc_sdm845_init); + +static void __exit cam_cc_sdm845_exit(void) +{ + platform_driver_unregister(&cam_cc_sdm845_driver); +} +module_exit(cam_cc_sdm845_exit); + +MODULE_DESCRIPTION("QTI CAM_CC SDM845 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index a91d97cecbad..0ced4a5a9a17 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -220,6 +220,7 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, if (pll->flags & SUPPORTS_FSM_MODE) qcom_pll_set_fsm_mode(regmap, PLL_MODE(pll), 6, 0); } +EXPORT_SYMBOL_GPL(clk_alpha_pll_configure); static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw) { diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c index bc2205c450b6..99446bf630aa 100644 --- a/drivers/clk/qcom/clk-branch.c +++ b/drivers/clk/qcom/clk-branch.c @@ -18,7 +18,7 @@ static bool clk_branch_in_hwcg_mode(const struct clk_branch *br) u32 val; if (!br->hwcg_reg) - return 0; + return false; regmap_read(br->clkr.regmap, br->hwcg_reg, &val); diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c new file mode 100644 index 000000000000..3c04805f2a55 --- /dev/null +++ b/drivers/clk/qcom/clk-hfpll.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018, The Linux Foundation. All rights reserved. + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/regmap.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/clk-provider.h> +#include <linux/spinlock.h> + +#include "clk-regmap.h" +#include "clk-hfpll.h" + +#define PLL_OUTCTRL BIT(0) +#define PLL_BYPASSNL BIT(1) +#define PLL_RESET_N BIT(2) + +/* Initialize a HFPLL at a given rate and enable it. */ +static void __clk_hfpll_init_once(struct clk_hw *hw) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + + if (likely(h->init_done)) + return; + + /* Configure PLL parameters for integer mode. */ + if (hd->config_val) + regmap_write(regmap, hd->config_reg, hd->config_val); + regmap_write(regmap, hd->m_reg, 0); + regmap_write(regmap, hd->n_reg, 1); + + if (hd->user_reg) { + u32 regval = hd->user_val; + unsigned long rate; + + rate = clk_hw_get_rate(hw); + + /* Pick the right VCO. */ + if (hd->user_vco_mask && rate > hd->low_vco_max_rate) + regval |= hd->user_vco_mask; + regmap_write(regmap, hd->user_reg, regval); + } + + if (hd->droop_reg) + regmap_write(regmap, hd->droop_reg, hd->droop_val); + + h->init_done = true; +} + +static void __clk_hfpll_enable(struct clk_hw *hw) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + u32 val; + + __clk_hfpll_init_once(hw); + + /* Disable PLL bypass mode. */ + regmap_update_bits(regmap, hd->mode_reg, PLL_BYPASSNL, PLL_BYPASSNL); + + /* + * H/W requires a 5us delay between disabling the bypass and + * de-asserting the reset. Delay 10us just to be safe. + */ + udelay(10); + + /* De-assert active-low PLL reset. */ + regmap_update_bits(regmap, hd->mode_reg, PLL_RESET_N, PLL_RESET_N); + + /* Wait for PLL to lock. */ + if (hd->status_reg) { + do { + regmap_read(regmap, hd->status_reg, &val); + } while (!(val & BIT(hd->lock_bit))); + } else { + udelay(60); + } + + /* Enable PLL output. */ + regmap_update_bits(regmap, hd->mode_reg, PLL_OUTCTRL, PLL_OUTCTRL); +} + +/* Enable an already-configured HFPLL. */ +static int clk_hfpll_enable(struct clk_hw *hw) +{ + unsigned long flags; + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + u32 mode; + + spin_lock_irqsave(&h->lock, flags); + regmap_read(regmap, hd->mode_reg, &mode); + if (!(mode & (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL))) + __clk_hfpll_enable(hw); + spin_unlock_irqrestore(&h->lock, flags); + + return 0; +} + +static void __clk_hfpll_disable(struct clk_hfpll *h) +{ + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + + /* + * Disable the PLL output, disable test mode, enable the bypass mode, + * and assert the reset. + */ + regmap_update_bits(regmap, hd->mode_reg, + PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL, 0); +} + +static void clk_hfpll_disable(struct clk_hw *hw) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + unsigned long flags; + + spin_lock_irqsave(&h->lock, flags); + __clk_hfpll_disable(h); + spin_unlock_irqrestore(&h->lock, flags); +} + +static long clk_hfpll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + unsigned long rrate; + + rate = clamp(rate, hd->min_rate, hd->max_rate); + + rrate = DIV_ROUND_UP(rate, *parent_rate) * *parent_rate; + if (rrate > hd->max_rate) + rrate -= *parent_rate; + + return rrate; +} + +/* + * For optimization reasons, assumes no downstream clocks are actively using + * it. + */ +static int clk_hfpll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + unsigned long flags; + u32 l_val, val; + bool enabled; + + l_val = rate / parent_rate; + + spin_lock_irqsave(&h->lock, flags); + + enabled = __clk_is_enabled(hw->clk); + if (enabled) + __clk_hfpll_disable(h); + + /* Pick the right VCO. */ + if (hd->user_reg && hd->user_vco_mask) { + regmap_read(regmap, hd->user_reg, &val); + if (rate <= hd->low_vco_max_rate) + val &= ~hd->user_vco_mask; + else + val |= hd->user_vco_mask; + regmap_write(regmap, hd->user_reg, val); + } + + regmap_write(regmap, hd->l_reg, l_val); + + if (enabled) + __clk_hfpll_enable(hw); + + spin_unlock_irqrestore(&h->lock, flags); + + return 0; +} + +static unsigned long clk_hfpll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + u32 l_val; + + regmap_read(regmap, hd->l_reg, &l_val); + + return l_val * parent_rate; +} + +static void clk_hfpll_init(struct clk_hw *hw) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + u32 mode, status; + + regmap_read(regmap, hd->mode_reg, &mode); + if (mode != (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)) { + __clk_hfpll_init_once(hw); + return; + } + + if (hd->status_reg) { + regmap_read(regmap, hd->status_reg, &status); + if (!(status & BIT(hd->lock_bit))) { + WARN(1, "HFPLL %s is ON, but not locked!\n", + __clk_get_name(hw->clk)); + clk_hfpll_disable(hw); + __clk_hfpll_init_once(hw); + } + } +} + +static int hfpll_is_enabled(struct clk_hw *hw) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + u32 mode; + + regmap_read(regmap, hd->mode_reg, &mode); + mode &= 0x7; + return mode == (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL); +} + +const struct clk_ops clk_ops_hfpll = { + .enable = clk_hfpll_enable, + .disable = clk_hfpll_disable, + .is_enabled = hfpll_is_enabled, + .round_rate = clk_hfpll_round_rate, + .set_rate = clk_hfpll_set_rate, + .recalc_rate = clk_hfpll_recalc_rate, + .init = clk_hfpll_init, +}; +EXPORT_SYMBOL_GPL(clk_ops_hfpll); diff --git a/drivers/clk/qcom/clk-hfpll.h b/drivers/clk/qcom/clk-hfpll.h new file mode 100644 index 000000000000..2a57b2fb2f2f --- /dev/null +++ b/drivers/clk/qcom/clk-hfpll.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __QCOM_CLK_HFPLL_H__ +#define __QCOM_CLK_HFPLL_H__ + +#include <linux/clk-provider.h> +#include <linux/spinlock.h> +#include "clk-regmap.h" + +struct hfpll_data { + u32 mode_reg; + u32 l_reg; + u32 m_reg; + u32 n_reg; + u32 user_reg; + u32 droop_reg; + u32 config_reg; + u32 status_reg; + u8 lock_bit; + + u32 droop_val; + u32 config_val; + u32 user_val; + u32 user_vco_mask; + unsigned long low_vco_max_rate; + + unsigned long min_rate; + unsigned long max_rate; +}; + +struct clk_hfpll { + struct hfpll_data const *d; + int init_done; + + struct clk_regmap clkr; + spinlock_t lock; +}; + +#define to_clk_hfpll(_hw) \ + container_of(to_clk_regmap(_hw), struct clk_hfpll, clkr) + +extern const struct clk_ops clk_ops_hfpll; + +#endif diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c new file mode 100644 index 000000000000..59f1af415b58 --- /dev/null +++ b/drivers/clk/qcom/clk-krait.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018, The Linux Foundation. All rights reserved. + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/clk-provider.h> +#include <linux/spinlock.h> + +#include <asm/krait-l2-accessors.h> + +#include "clk-krait.h" + +/* Secondary and primary muxes share the same cp15 register */ +static DEFINE_SPINLOCK(krait_clock_reg_lock); + +#define LPL_SHIFT 8 +static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel) +{ + unsigned long flags; + u32 regval; + + spin_lock_irqsave(&krait_clock_reg_lock, flags); + regval = krait_get_l2_indirect_reg(mux->offset); + regval &= ~(mux->mask << mux->shift); + regval |= (sel & mux->mask) << mux->shift; + if (mux->lpl) { + regval &= ~(mux->mask << (mux->shift + LPL_SHIFT)); + regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT); + } + krait_set_l2_indirect_reg(mux->offset, regval); + spin_unlock_irqrestore(&krait_clock_reg_lock, flags); + + /* Wait for switch to complete. */ + mb(); + udelay(1); +} + +static int krait_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct krait_mux_clk *mux = to_krait_mux_clk(hw); + u32 sel; + + sel = clk_mux_index_to_val(mux->parent_map, 0, index); + mux->en_mask = sel; + /* Don't touch mux if CPU is off as it won't work */ + if (__clk_is_enabled(hw->clk)) + __krait_mux_set_sel(mux, sel); + + mux->reparent = true; + + return 0; +} + +static u8 krait_mux_get_parent(struct clk_hw *hw) +{ + struct krait_mux_clk *mux = to_krait_mux_clk(hw); + u32 sel; + + sel = krait_get_l2_indirect_reg(mux->offset); + sel >>= mux->shift; + sel &= mux->mask; + mux->en_mask = sel; + + return clk_mux_val_to_index(hw, mux->parent_map, 0, sel); +} + +const struct clk_ops krait_mux_clk_ops = { + .set_parent = krait_mux_set_parent, + .get_parent = krait_mux_get_parent, + .determine_rate = __clk_mux_determine_rate_closest, +}; +EXPORT_SYMBOL_GPL(krait_mux_clk_ops); + +/* The divider can divide by 2, 4, 6 and 8. But we only really need div-2. */ +static long krait_div2_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), rate * 2); + return DIV_ROUND_UP(*parent_rate, 2); +} + +static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct krait_div2_clk *d = to_krait_div2_clk(hw); + unsigned long flags; + u32 val; + u32 mask = BIT(d->width) - 1; + + if (d->lpl) + mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift; + + spin_lock_irqsave(&krait_clock_reg_lock, flags); + val = krait_get_l2_indirect_reg(d->offset); + val &= ~mask; + krait_set_l2_indirect_reg(d->offset, val); + spin_unlock_irqrestore(&krait_clock_reg_lock, flags); + + return 0; +} + +static unsigned long +krait_div2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + struct krait_div2_clk *d = to_krait_div2_clk(hw); + u32 mask = BIT(d->width) - 1; + u32 div; + + div = krait_get_l2_indirect_reg(d->offset); + div >>= d->shift; + div &= mask; + div = (div + 1) * 2; + + return DIV_ROUND_UP(parent_rate, div); +} + +const struct clk_ops krait_div2_clk_ops = { + .round_rate = krait_div2_round_rate, + .set_rate = krait_div2_set_rate, + .recalc_rate = krait_div2_recalc_rate, +}; +EXPORT_SYMBOL_GPL(krait_div2_clk_ops); diff --git a/drivers/clk/qcom/clk-krait.h b/drivers/clk/qcom/clk-krait.h new file mode 100644 index 000000000000..9120bd2f5297 --- /dev/null +++ b/drivers/clk/qcom/clk-krait.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __QCOM_CLK_KRAIT_H +#define __QCOM_CLK_KRAIT_H + +#include <linux/clk-provider.h> + +struct krait_mux_clk { + unsigned int *parent_map; + u32 offset; + u32 mask; + u32 shift; + u32 en_mask; + bool lpl; + u8 safe_sel; + u8 old_index; + bool reparent; + + struct clk_hw hw; + struct notifier_block clk_nb; +}; + +#define to_krait_mux_clk(_hw) container_of(_hw, struct krait_mux_clk, hw) + +extern const struct clk_ops krait_mux_clk_ops; + +struct krait_div2_clk { + u32 offset; + u8 width; + u32 shift; + bool lpl; + + struct clk_hw hw; +}; + +#define to_krait_div2_clk(_hw) container_of(_hw, struct krait_div2_clk, hw) + +extern const struct clk_ops krait_div2_clk_ops; + +#endif diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h index dbd5a9e83554..e5eca8a1abe4 100644 --- a/drivers/clk/qcom/clk-rcg.h +++ b/drivers/clk/qcom/clk-rcg.h @@ -163,4 +163,15 @@ extern const struct clk_ops clk_pixel_ops; extern const struct clk_ops clk_gfx3d_ops; extern const struct clk_ops clk_rcg2_shared_ops; +struct clk_rcg_dfs_data { + struct clk_rcg2 *rcg; + struct clk_init_data *init; +}; + +#define DEFINE_RCG_DFS(r) \ + { .rcg = &r##_src, .init = &r##_init } + +extern int qcom_cc_register_rcg_dfs(struct regmap *regmap, + const struct clk_rcg_dfs_data *rcgs, + size_t len); #endif diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 52208d4165f4..6e3bd195d012 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -12,6 +12,7 @@ #include <linux/delay.h> #include <linux/regmap.h> #include <linux/math64.h> +#include <linux/slab.h> #include <asm/div64.h> @@ -40,6 +41,14 @@ #define N_REG 0xc #define D_REG 0x10 +/* Dynamic Frequency Scaling */ +#define MAX_PERF_LEVEL 8 +#define SE_CMD_DFSR_OFFSET 0x14 +#define SE_CMD_DFS_EN BIT(0) +#define SE_PERF_DFSR(level) (0x1c + 0x4 * (level)) +#define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level)) +#define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level)) + enum freq_policy { FLOOR, CEIL, @@ -929,3 +938,189 @@ const struct clk_ops clk_rcg2_shared_ops = { .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, }; EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops); + +/* Common APIs to be used for DFS based RCGR */ +static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, + struct freq_tbl *f) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + struct clk_hw *p; + unsigned long prate = 0; + u32 val, mask, cfg, mode; + int i, num_parents; + + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg); + + mask = BIT(rcg->hid_width) - 1; + f->pre_div = 1; + if (cfg & mask) + f->pre_div = cfg & mask; + + cfg &= CFG_SRC_SEL_MASK; + cfg >>= CFG_SRC_SEL_SHIFT; + + num_parents = clk_hw_get_num_parents(hw); + for (i = 0; i < num_parents; i++) { + if (cfg == rcg->parent_map[i].cfg) { + f->src = rcg->parent_map[i].src; + p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i); + prate = clk_hw_get_rate(p); + } + } + + mode = cfg & CFG_MODE_MASK; + mode >>= CFG_MODE_SHIFT; + if (mode) { + mask = BIT(rcg->mnd_width) - 1; + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l), + &val); + val &= mask; + f->m = val; + + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l), + &val); + val = ~val; + val &= mask; + val += f->m; + f->n = val; + } + + f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div); +} + +static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg) +{ + struct freq_tbl *freq_tbl; + int i; + + /* Allocate space for 1 extra since table is NULL terminated */ + freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL); + if (!freq_tbl) + return -ENOMEM; + rcg->freq_tbl = freq_tbl; + + for (i = 0; i < MAX_PERF_LEVEL; i++) + clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i); + + return 0; +} + +static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + int ret; + + if (!rcg->freq_tbl) { + ret = clk_rcg2_dfs_populate_freq_table(rcg); + if (ret) { + pr_err("Failed to update DFS tables for %s\n", + clk_hw_get_name(hw)); + return ret; + } + } + + return clk_rcg2_determine_rate(hw, req); +} + +static unsigned long +clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + u32 level, mask, cfg, m = 0, n = 0, mode, pre_div; + + regmap_read(rcg->clkr.regmap, + rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level); + level &= GENMASK(4, 1); + level >>= 1; + + if (rcg->freq_tbl) + return rcg->freq_tbl[level].freq; + + /* + * Assume that parent_rate is actually the parent because + * we can't do any better at figuring it out when the table + * hasn't been populated yet. We only populate the table + * in determine_rate because we can't guarantee the parents + * will be registered with the framework until then. + */ + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level), + &cfg); + + mask = BIT(rcg->hid_width) - 1; + pre_div = 1; + if (cfg & mask) + pre_div = cfg & mask; + + mode = cfg & CFG_MODE_MASK; + mode >>= CFG_MODE_SHIFT; + if (mode) { + mask = BIT(rcg->mnd_width) - 1; + regmap_read(rcg->clkr.regmap, + rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m); + m &= mask; + + regmap_read(rcg->clkr.regmap, + rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n); + n = ~n; + n &= mask; + n += m; + } + + return calc_rate(parent_rate, m, n, mode, pre_div); +} + +static const struct clk_ops clk_rcg2_dfs_ops = { + .is_enabled = clk_rcg2_is_enabled, + .get_parent = clk_rcg2_get_parent, + .determine_rate = clk_rcg2_dfs_determine_rate, + .recalc_rate = clk_rcg2_dfs_recalc_rate, +}; + +static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data, + struct regmap *regmap) +{ + struct clk_rcg2 *rcg = data->rcg; + struct clk_init_data *init = data->init; + u32 val; + int ret; + + ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val); + if (ret) + return -EINVAL; + + if (!(val & SE_CMD_DFS_EN)) + return 0; + + /* + * Rate changes with consumer writing a register in + * their own I/O region + */ + init->flags |= CLK_GET_RATE_NOCACHE; + init->ops = &clk_rcg2_dfs_ops; + + rcg->freq_tbl = NULL; + + pr_debug("DFS registered for clk %s\n", init->name); + + return 0; +} + +int qcom_cc_register_rcg_dfs(struct regmap *regmap, + const struct clk_rcg_dfs_data *rcgs, size_t len) +{ + int i, ret; + + for (i = 0; i < len; i++) { + ret = clk_rcg2_enable_dfs(&rcgs[i], regmap); + if (ret) { + const char *name = rcgs[i].init->name; + + pr_err("DFS register failed for clk %s\n", name); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs); diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index 5f61225657ab..33d1bc5c6a46 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -30,6 +30,7 @@ #include "clk-pll.h" #include "clk-rcg.h" #include "clk-branch.h" +#include "clk-hfpll.h" #include "reset.h" static struct clk_pll pll0 = { @@ -113,6 +114,84 @@ static struct clk_regmap pll8_vote = { }, }; +static struct hfpll_data hfpll0_data = { + .mode_reg = 0x3200, + .l_reg = 0x3208, + .m_reg = 0x320c, + .n_reg = 0x3210, + .config_reg = 0x3204, + .status_reg = 0x321c, + .config_val = 0x7845c665, + .droop_reg = 0x3214, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll0 = { + .d = &hfpll0_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll0", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock), +}; + +static struct hfpll_data hfpll1_data = { + .mode_reg = 0x3240, + .l_reg = 0x3248, + .m_reg = 0x324c, + .n_reg = 0x3250, + .config_reg = 0x3244, + .status_reg = 0x325c, + .config_val = 0x7845c665, + .droop_reg = 0x3314, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll1 = { + .d = &hfpll1_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll1", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock), +}; + +static struct hfpll_data hfpll_l2_data = { + .mode_reg = 0x3300, + .l_reg = 0x3308, + .m_reg = 0x330c, + .n_reg = 0x3310, + .config_reg = 0x3304, + .status_reg = 0x331c, + .config_val = 0x7845c665, + .droop_reg = 0x3314, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll_l2 = { + .d = &hfpll_l2_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll_l2", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock), +}; + static struct clk_pll pll14 = { .l_reg = 0x31c4, .m_reg = 0x31c8, @@ -2797,6 +2876,9 @@ static struct clk_regmap *gcc_ipq806x_clks[] = { [UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr, [NSSTCM_CLK_SRC] = &nss_tcm_src.clkr, [NSSTCM_CLK] = &nss_tcm_clk.clkr, + [PLL9] = &hfpll0.clkr, + [PLL10] = &hfpll1.clkr, + [PLL12] = &hfpll_l2.clkr, }; static const struct qcom_reset_map gcc_ipq806x_resets[] = { diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c index fd495e0471bb..399474755654 100644 --- a/drivers/clk/qcom/gcc-msm8960.c +++ b/drivers/clk/qcom/gcc-msm8960.c @@ -30,6 +30,7 @@ #include "clk-pll.h" #include "clk-rcg.h" #include "clk-branch.h" +#include "clk-hfpll.h" #include "reset.h" static struct clk_pll pll3 = { @@ -86,6 +87,164 @@ static struct clk_regmap pll8_vote = { }, }; +static struct hfpll_data hfpll0_data = { + .mode_reg = 0x3200, + .l_reg = 0x3208, + .m_reg = 0x320c, + .n_reg = 0x3210, + .config_reg = 0x3204, + .status_reg = 0x321c, + .config_val = 0x7845c665, + .droop_reg = 0x3214, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll0 = { + .d = &hfpll0_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll0", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock), +}; + +static struct hfpll_data hfpll1_8064_data = { + .mode_reg = 0x3240, + .l_reg = 0x3248, + .m_reg = 0x324c, + .n_reg = 0x3250, + .config_reg = 0x3244, + .status_reg = 0x325c, + .config_val = 0x7845c665, + .droop_reg = 0x3254, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct hfpll_data hfpll1_data = { + .mode_reg = 0x3300, + .l_reg = 0x3308, + .m_reg = 0x330c, + .n_reg = 0x3310, + .config_reg = 0x3304, + .status_reg = 0x331c, + .config_val = 0x7845c665, + .droop_reg = 0x3314, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll1 = { + .d = &hfpll1_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll1", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock), +}; + +static struct hfpll_data hfpll2_data = { + .mode_reg = 0x3280, + .l_reg = 0x3288, + .m_reg = 0x328c, + .n_reg = 0x3290, + .config_reg = 0x3284, + .status_reg = 0x329c, + .config_val = 0x7845c665, + .droop_reg = 0x3294, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll2 = { + .d = &hfpll2_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll2", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll2.lock), +}; + +static struct hfpll_data hfpll3_data = { + .mode_reg = 0x32c0, + .l_reg = 0x32c8, + .m_reg = 0x32cc, + .n_reg = 0x32d0, + .config_reg = 0x32c4, + .status_reg = 0x32dc, + .config_val = 0x7845c665, + .droop_reg = 0x32d4, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll3 = { + .d = &hfpll3_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll3", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll3.lock), +}; + +static struct hfpll_data hfpll_l2_8064_data = { + .mode_reg = 0x3300, + .l_reg = 0x3308, + .m_reg = 0x330c, + .n_reg = 0x3310, + .config_reg = 0x3304, + .status_reg = 0x331c, + .config_val = 0x7845c665, + .droop_reg = 0x3314, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct hfpll_data hfpll_l2_data = { + .mode_reg = 0x3400, + .l_reg = 0x3408, + .m_reg = 0x340c, + .n_reg = 0x3410, + .config_reg = 0x3404, + .status_reg = 0x341c, + .config_val = 0x7845c665, + .droop_reg = 0x3414, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll_l2 = { + .d = &hfpll_l2_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll_l2", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock), +}; + static struct clk_pll pll14 = { .l_reg = 0x31c4, .m_reg = 0x31c8, @@ -3107,6 +3266,9 @@ static struct clk_regmap *gcc_msm8960_clks[] = { [PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr, [PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr, [RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr, + [PLL9] = &hfpll0.clkr, + [PLL10] = &hfpll1.clkr, + [PLL12] = &hfpll_l2.clkr, }; static const struct qcom_reset_map gcc_msm8960_resets[] = { @@ -3318,6 +3480,11 @@ static struct clk_regmap *gcc_apq8064_clks[] = { [PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr, [PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr, [RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr, + [PLL9] = &hfpll0.clkr, + [PLL10] = &hfpll1.clkr, + [PLL12] = &hfpll_l2.clkr, + [PLL16] = &hfpll2.clkr, + [PLL17] = &hfpll3.clkr, }; static const struct qcom_reset_map gcc_apq8064_resets[] = { @@ -3477,6 +3644,11 @@ static int gcc_msm8960_probe(struct platform_device *pdev) if (ret) return ret; + if (match->data == &gcc_apq8064_desc) { + hfpll1.d = &hfpll1_8064_data; + hfpll_l2.d = &hfpll_l2_8064_data; + } + tsens = platform_device_register_data(&pdev->dev, "qcom-tsens", -1, NULL, 0); if (IS_ERR(tsens)) diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index 9a3290fdd01b..9d136172c27c 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -260,6 +260,36 @@ static struct clk_alpha_pll_postdiv gpll0 = { }, }; +static struct clk_branch gcc_mmss_gpll0_div_clk = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mmss_gpll0_div_clk", + .parent_names = (const char *[]){ "gpll0" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_gpll0_div_clk = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_gpll0_div_clk", + .parent_names = (const char *[]){ "gpll0" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops + }, + }, +}; + static struct clk_alpha_pll gpll4_early = { .offset = 0x77000, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], @@ -2951,6 +2981,20 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = { }, }; +static struct clk_branch gcc_aggre1_pnoc_ahb_clk = { + .halt_reg = 0x82014, + .clkr = { + .enable_reg = 0x82014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre1_pnoc_ahb_clk", + .parent_names = (const char *[]){ "periph_noc_clk_src" }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_aggre2_ufs_axi_clk = { .halt_reg = 0x83014, .clkr = { @@ -2981,6 +3025,34 @@ static struct clk_branch gcc_aggre2_usb3_axi_clk = { }, }; +static struct clk_branch gcc_dcc_ahb_clk = { + .halt_reg = 0x84004, + .clkr = { + .enable_reg = 0x84004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_dcc_ahb_clk", + .parent_names = (const char *[]){ "config_noc_clk_src" }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre0_noc_mpu_cfg_ahb_clk = { + .halt_reg = 0x85000, + .clkr = { + .enable_reg = 0x85000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre0_noc_mpu_cfg_ahb_clk", + .parent_names = (const char *[]){ "config_noc_clk_src" }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_qspi_ahb_clk = { .halt_reg = 0x8b004, .clkr = { @@ -3039,6 +3111,20 @@ static struct clk_branch gcc_hdmi_clkref_clk = { }, }; +static struct clk_branch gcc_edp_clkref_clk = { + .halt_reg = 0x88004, + .clkr = { + .enable_reg = 0x88004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_edp_clkref_clk", + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_ufs_clkref_clk = { .halt_reg = 0x88008, .clkr = { @@ -3095,6 +3181,62 @@ static struct clk_branch gcc_rx1_usb2_clkref_clk = { }, }; +static struct clk_branch gcc_mss_cfg_ahb_clk = { + .halt_reg = 0x8a000, + .clkr = { + .enable_reg = 0x8a000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_cfg_ahb_clk", + .parent_names = (const char *[]){ "config_noc_clk_src" }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = { + .halt_reg = 0x8a004, + .clkr = { + .enable_reg = 0x8a004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_mnoc_bimc_axi_clk", + .parent_names = (const char *[]){ "system_noc_clk_src" }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_snoc_axi_clk = { + .halt_reg = 0x8a024, + .clkr = { + .enable_reg = 0x8a024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_snoc_axi_clk", + .parent_names = (const char *[]){ "system_noc_clk_src" }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_q6_bimc_axi_clk = { + .halt_reg = 0x8a028, + .clkr = { + .enable_reg = 0x8a028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_q6_bimc_axi_clk", + .parent_names = (const char *[]){ "system_noc_clk_src" }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_hw *gcc_msm8996_hws[] = { &xo.hw, &gpll0_early_div.hw, @@ -3355,6 +3497,7 @@ static struct clk_regmap *gcc_msm8996_clocks[] = { [GCC_AGGRE0_CNOC_AHB_CLK] = &gcc_aggre0_cnoc_ahb_clk.clkr, [GCC_SMMU_AGGRE0_AXI_CLK] = &gcc_smmu_aggre0_axi_clk.clkr, [GCC_SMMU_AGGRE0_AHB_CLK] = &gcc_smmu_aggre0_ahb_clk.clkr, + [GCC_AGGRE1_PNOC_AHB_CLK] = &gcc_aggre1_pnoc_ahb_clk.clkr, [GCC_AGGRE2_UFS_AXI_CLK] = &gcc_aggre2_ufs_axi_clk.clkr, [GCC_AGGRE2_USB3_AXI_CLK] = &gcc_aggre2_usb3_axi_clk.clkr, [GCC_QSPI_AHB_CLK] = &gcc_qspi_ahb_clk.clkr, @@ -3365,6 +3508,15 @@ static struct clk_regmap *gcc_msm8996_clocks[] = { [GCC_PCIE_CLKREF_CLK] = &gcc_pcie_clkref_clk.clkr, [GCC_RX2_USB2_CLKREF_CLK] = &gcc_rx2_usb2_clkref_clk.clkr, [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr, + [GCC_EDP_CLKREF_CLK] = &gcc_edp_clkref_clk.clkr, + [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr, + [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr, + [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr, + [GCC_MSS_MNOC_BIMC_AXI_CLK] = &gcc_mss_mnoc_bimc_axi_clk.clkr, + [GCC_DCC_AHB_CLK] = &gcc_dcc_ahb_clk.clkr, + [GCC_AGGRE0_NOC_MPU_CFG_AHB_CLK] = &gcc_aggre0_noc_mpu_cfg_ahb_clk.clkr, + [GCC_MMSS_GPLL0_DIV_CLK] = &gcc_mmss_gpll0_div_clk.clkr, + [GCC_MSS_GPLL0_DIV_CLK] = &gcc_mss_gpll0_div_clk.clkr, }; static struct gdsc *gcc_msm8996_gdscs[] = { diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c new file mode 100644 index 000000000000..e4ca6a45f313 --- /dev/null +++ b/drivers/clk/qcom/gcc-qcs404.c @@ -0,0 +1,2744 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/clk-provider.h> +#include <linux/regmap.h> +#include <linux/reset-controller.h> + +#include <dt-bindings/clock/qcom,gcc-qcs404.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "common.h" +#include "reset.h" + +enum { + P_CORE_BI_PLL_TEST_SE, + P_DSI0_PHY_PLL_OUT_BYTECLK, + P_DSI0_PHY_PLL_OUT_DSICLK, + P_GPLL0_OUT_AUX, + P_GPLL0_OUT_MAIN, + P_GPLL1_OUT_MAIN, + P_GPLL3_OUT_MAIN, + P_GPLL4_OUT_AUX, + P_GPLL4_OUT_MAIN, + P_GPLL6_OUT_AUX, + P_HDMI_PHY_PLL_CLK, + P_PCIE_0_PIPE_CLK, + P_SLEEP_CLK, + P_XO, +}; + +static const struct parent_map gcc_parent_map_0[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_0[] = { + "cxo", + "gpll0_out_main", + "core_bi_pll_test_se", +}; + +static const char * const gcc_parent_names_ao_0[] = { + "cxo", + "gpll0_ao_out_main", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_1[] = { + { P_XO, 0 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_1[] = { + "cxo", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_2[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL6_OUT_AUX, 2 }, + { P_SLEEP_CLK, 6 }, +}; + +static const char * const gcc_parent_names_2[] = { + "cxo", + "gpll0_out_main", + "gpll6_out_aux", + "sleep_clk", +}; + +static const struct parent_map gcc_parent_map_3[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL6_OUT_AUX, 2 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_3[] = { + "cxo", + "gpll0_out_main", + "gpll6_out_aux", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_4[] = { + { P_XO, 0 }, + { P_GPLL1_OUT_MAIN, 1 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_4[] = { + "cxo", + "gpll1_out_main", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_5[] = { + { P_XO, 0 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 }, + { P_GPLL0_OUT_AUX, 2 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_5[] = { + "cxo", + "dsi0pll_byteclk_src", + "gpll0_out_aux", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_6[] = { + { P_XO, 0 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, + { P_GPLL0_OUT_AUX, 3 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_6[] = { + "cxo", + "dsi0_phy_pll_out_byteclk", + "gpll0_out_aux", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_7[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL3_OUT_MAIN, 2 }, + { P_GPLL6_OUT_AUX, 3 }, + { P_GPLL4_OUT_AUX, 4 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_7[] = { + "cxo", + "gpll0_out_main", + "gpll3_out_main", + "gpll6_out_aux", + "gpll4_out_aux", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_8[] = { + { P_XO, 0 }, + { P_HDMI_PHY_PLL_CLK, 1 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_8[] = { + "cxo", + "hdmi_phy_pll_clk", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_9[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_DSI0_PHY_PLL_OUT_DSICLK, 2 }, + { P_GPLL6_OUT_AUX, 3 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_9[] = { + "cxo", + "gpll0_out_main", + "dsi0_phy_pll_out_dsiclk", + "gpll6_out_aux", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_10[] = { + { P_XO, 0 }, + { P_SLEEP_CLK, 1 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_10[] = { + "cxo", + "sleep_clk", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_11[] = { + { P_XO, 0 }, + { P_PCIE_0_PIPE_CLK, 1 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_11[] = { + "cxo", + "pcie_0_pipe_clk", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_12[] = { + { P_XO, 0 }, + { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, + { P_GPLL0_OUT_AUX, 2 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_12[] = { + "cxo", + "dsi0pll_pclk_src", + "gpll0_out_aux", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_13[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL4_OUT_MAIN, 2 }, + { P_GPLL6_OUT_AUX, 3 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_13[] = { + "cxo", + "gpll0_out_main", + "gpll4_out_main", + "gpll6_out_aux", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_14[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL4_OUT_AUX, 2 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_14[] = { + "cxo", + "gpll0_out_main", + "gpll4_out_aux", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_15[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_AUX, 2 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_15[] = { + "cxo", + "gpll0_out_aux", + "core_bi_pll_test_se", +}; + +static struct clk_fixed_factor cxo = { + .mult = 1, + .div = 1, + .hw.init = &(struct clk_init_data){ + .name = "cxo", + .parent_names = (const char *[]){ "xo_board" }, + .num_parents = 1, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_alpha_pll gpll0_sleep_clk_src = { + .offset = 0x21000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .clkr = { + .enable_reg = 0x45008, + .enable_mask = BIT(23), + .enable_is_inverted = true, + .hw.init = &(struct clk_init_data){ + .name = "gpll0_sleep_clk_src", + .parent_names = (const char *[]){ "cxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_alpha_pll gpll0_out_main = { + .offset = 0x21000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .flags = SUPPORTS_FSM_MODE, + .clkr = { + .enable_reg = 0x45000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpll0_out_main", + .parent_names = (const char *[]) + { "gpll0_sleep_clk_src" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_alpha_pll gpll0_ao_out_main = { + .offset = 0x21000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .flags = SUPPORTS_FSM_MODE, + .clkr = { + .enable_reg = 0x45000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpll0_ao_out_main", + .parent_names = (const char *[]){ "cxo" }, + .num_parents = 1, + .flags = CLK_IS_CRITICAL, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_alpha_pll gpll1_out_main = { + .offset = 0x20000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .clkr = { + .enable_reg = 0x45000, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gpll1_out_main", + .parent_names = (const char *[]){ "cxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +/* 930MHz configuration */ +static const struct alpha_pll_config gpll3_config = { + .l = 48, + .alpha = 0x0, + .alpha_en_mask = BIT(24), + .post_div_mask = 0xf << 8, + .post_div_val = 0x1 << 8, + .vco_mask = 0x3 << 20, + .main_output_mask = 0x1, + .config_ctl_val = 0x4001055b, +}; + +static const struct pll_vco gpll3_vco[] = { + { 700000000, 1400000000, 0 }, +}; + +static struct clk_alpha_pll gpll3_out_main = { + .offset = 0x22000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .vco_table = gpll3_vco, + .num_vco = ARRAY_SIZE(gpll3_vco), + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gpll3_out_main", + .parent_names = (const char *[]){ "cxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_alpha_pll gpll4_out_main = { + .offset = 0x24000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .clkr = { + .enable_reg = 0x45000, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gpll4_out_main", + .parent_names = (const char *[]){ "cxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_pll gpll6 = { + .l_reg = 0x37004, + .m_reg = 0x37008, + .n_reg = 0x3700C, + .config_reg = 0x37014, + .mode_reg = 0x37000, + .status_reg = 0x3701C, + .status_bit = 17, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll6", + .parent_names = (const char *[]){ "cxo" }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + +static struct clk_regmap gpll6_out_aux = { + .enable_reg = 0x45000, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gpll6_out_aux", + .parent_names = (const char *[]){ "gpll6" }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + +static const struct freq_tbl ftbl_apss_ahb_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 apss_ahb_clk_src = { + .cmd_rcgr = 0x46000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_apss_ahb_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "apss_ahb_clk_src", + .parent_names = gcc_parent_names_ao_0, + .num_parents = 3, + .flags = CLK_IS_CRITICAL, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_blsp1_qup0_i2c_apps_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), + { } +}; + +static struct clk_rcg2 blsp1_qup0_i2c_apps_clk_src = { + .cmd_rcgr = 0x602c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup0_i2c_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_blsp1_qup0_spi_apps_clk_src[] = { + F(960000, P_XO, 10, 1, 2), + F(4800000, P_XO, 4, 0, 0), + F(9600000, P_XO, 2, 0, 0), + F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5), + F(19200000, P_XO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2), + F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), + { } +}; + +static struct clk_rcg2 blsp1_qup0_spi_apps_clk_src = { + .cmd_rcgr = 0x6034, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup0_spi_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = { + .cmd_rcgr = 0x200c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup1_i2c_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = { + F(960000, P_XO, 10, 1, 2), + F(4800000, P_XO, 4, 0, 0), + F(9600000, P_XO, 2, 0, 0), + F(10480000, P_GPLL0_OUT_MAIN, 1, 3, 229), + F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5), + F(19200000, P_XO, 1, 0, 0), + F(20961000, P_GPLL0_OUT_MAIN, 1, 6, 229), + { } +}; + +static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = { + .cmd_rcgr = 0x2024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup1_spi_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = { + .cmd_rcgr = 0x3000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup2_i2c_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_blsp1_qup2_spi_apps_clk_src[] = { + F(960000, P_XO, 10, 1, 2), + F(4800000, P_XO, 4, 0, 0), + F(9600000, P_XO, 2, 0, 0), + F(15000000, P_GPLL0_OUT_MAIN, 1, 3, 160), + F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5), + F(19200000, P_XO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2), + F(30000000, P_GPLL0_OUT_MAIN, 1, 3, 80), + { } +}; + +static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = { + .cmd_rcgr = 0x3014, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_qup2_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup2_spi_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = { + .cmd_rcgr = 0x4000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup3_i2c_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = { + .cmd_rcgr = 0x4024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup3_spi_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = { + .cmd_rcgr = 0x5000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup4_i2c_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = { + .cmd_rcgr = 0x5024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup4_spi_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_blsp1_uart0_apps_clk_src[] = { + F(3686400, P_GPLL0_OUT_MAIN, 1, 72, 15625), + F(7372800, P_GPLL0_OUT_MAIN, 1, 144, 15625), + F(14745600, P_GPLL0_OUT_MAIN, 1, 288, 15625), + F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5), + F(19200000, P_XO, 1, 0, 0), + F(24000000, P_GPLL0_OUT_MAIN, 1, 3, 100), + F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2), + F(32000000, P_GPLL0_OUT_MAIN, 1, 1, 25), + F(40000000, P_GPLL0_OUT_MAIN, 1, 1, 20), + F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 500), + F(48000000, P_GPLL0_OUT_MAIN, 1, 3, 50), + F(51200000, P_GPLL0_OUT_MAIN, 1, 8, 125), + F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 100), + F(58982400, P_GPLL0_OUT_MAIN, 1, 1152, 15625), + F(60000000, P_GPLL0_OUT_MAIN, 1, 3, 40), + F(64000000, P_GPLL0_OUT_MAIN, 1, 2, 25), + { } +}; + +static struct clk_rcg2 blsp1_uart0_apps_clk_src = { + .cmd_rcgr = 0x600c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_uart0_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_uart0_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_uart1_apps_clk_src = { + .cmd_rcgr = 0x2044, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_uart0_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_uart1_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_uart2_apps_clk_src = { + .cmd_rcgr = 0x3034, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_uart0_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_uart2_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_uart3_apps_clk_src = { + .cmd_rcgr = 0x4014, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_uart0_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_uart3_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup0_i2c_apps_clk_src = { + .cmd_rcgr = 0xc00c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_qup0_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_qup0_i2c_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup0_spi_apps_clk_src = { + .cmd_rcgr = 0xc024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_qup0_spi_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_uart0_apps_clk_src = { + .cmd_rcgr = 0xc044, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_blsp1_uart0_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_uart0_apps_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 byte0_clk_src = { + .cmd_rcgr = 0x4d044, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .clkr.hw.init = &(struct clk_init_data){ + .name = "byte0_clk_src", + .parent_names = gcc_parent_names_5, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + }, +}; + +static const struct freq_tbl ftbl_emac_clk_src[] = { + F(5000000, P_GPLL1_OUT_MAIN, 2, 1, 50), + F(50000000, P_GPLL1_OUT_MAIN, 10, 0, 0), + F(125000000, P_GPLL1_OUT_MAIN, 4, 0, 0), + F(250000000, P_GPLL1_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 emac_clk_src = { + .cmd_rcgr = 0x4e01c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_emac_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "emac_clk_src", + .parent_names = gcc_parent_names_4, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_emac_ptp_clk_src[] = { + F(50000000, P_GPLL1_OUT_MAIN, 10, 0, 0), + F(125000000, P_GPLL1_OUT_MAIN, 4, 0, 0), + F(250000000, P_GPLL1_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 emac_ptp_clk_src = { + .cmd_rcgr = 0x4e014, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_emac_ptp_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "emac_ptp_clk_src", + .parent_names = gcc_parent_names_4, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_esc0_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 esc0_clk_src = { + .cmd_rcgr = 0x4d05c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_6, + .freq_tbl = ftbl_esc0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "esc0_clk_src", + .parent_names = gcc_parent_names_6, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gfx3d_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), + F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(228571429, P_GPLL0_OUT_MAIN, 3.5, 0, 0), + F(240000000, P_GPLL6_OUT_AUX, 4.5, 0, 0), + F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(270000000, P_GPLL6_OUT_AUX, 4, 0, 0), + F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + F(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + F(484800000, P_GPLL3_OUT_MAIN, 1, 0, 0), + F(523200000, P_GPLL3_OUT_MAIN, 1, 0, 0), + F(550000000, P_GPLL3_OUT_MAIN, 1, 0, 0), + F(598000000, P_GPLL3_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gfx3d_clk_src = { + .cmd_rcgr = 0x59000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_7, + .freq_tbl = ftbl_gfx3d_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gfx3d_clk_src", + .parent_names = gcc_parent_names_7, + .num_parents = 6, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gp1_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gp1_clk_src = { + .cmd_rcgr = 0x8004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gp1_clk_src", + .parent_names = gcc_parent_names_2, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gp2_clk_src = { + .cmd_rcgr = 0x9004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gp2_clk_src", + .parent_names = gcc_parent_names_2, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gp3_clk_src = { + .cmd_rcgr = 0xa004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gp3_clk_src", + .parent_names = gcc_parent_names_2, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 hdmi_app_clk_src = { + .cmd_rcgr = 0x4d0e4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_esc0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "hdmi_app_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 hdmi_pclk_clk_src = { + .cmd_rcgr = 0x4d0dc, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_8, + .freq_tbl = ftbl_esc0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "hdmi_pclk_clk_src", + .parent_names = gcc_parent_names_8, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_mdp_clk_src[] = { + F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), + F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(145454545, P_GPLL0_OUT_MAIN, 5.5, 0, 0), + F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0), + F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 mdp_clk_src = { + .cmd_rcgr = 0x4d014, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_9, + .freq_tbl = ftbl_mdp_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "mdp_clk_src", + .parent_names = gcc_parent_names_9, + .num_parents = 5, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_pcie_0_aux_clk_src[] = { + F(1200000, P_XO, 16, 0, 0), + { } +}; + +static struct clk_rcg2 pcie_0_aux_clk_src = { + .cmd_rcgr = 0x3e024, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_10, + .freq_tbl = ftbl_pcie_0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pcie_0_aux_clk_src", + .parent_names = gcc_parent_names_10, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_pcie_0_pipe_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(125000000, P_PCIE_0_PIPE_CLK, 2, 0, 0), + F(250000000, P_PCIE_0_PIPE_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 pcie_0_pipe_clk_src = { + .cmd_rcgr = 0x3e01c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_11, + .freq_tbl = ftbl_pcie_0_pipe_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pcie_0_pipe_clk_src", + .parent_names = gcc_parent_names_11, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 pclk0_clk_src = { + .cmd_rcgr = 0x4d000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_12, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pclk0_clk_src", + .parent_names = gcc_parent_names_12, + .num_parents = 4, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_pixel_ops, + }, +}; + +static const struct freq_tbl ftbl_pdm2_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0), + { } +}; + +static struct clk_rcg2 pdm2_clk_src = { + .cmd_rcgr = 0x44010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_pdm2_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pdm2_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = { + F(144000, P_XO, 16, 3, 25), + F(400000, P_XO, 12, 1, 4), + F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4), + F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2), + F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(192000000, P_GPLL4_OUT_MAIN, 6, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(384000000, P_GPLL4_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 sdcc1_apps_clk_src = { + .cmd_rcgr = 0x42004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_13, + .freq_tbl = ftbl_sdcc1_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "sdcc1_apps_clk_src", + .parent_names = gcc_parent_names_13, + .num_parents = 5, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = { + F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0), + F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 sdcc1_ice_core_clk_src = { + .cmd_rcgr = 0x5d000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_sdcc1_ice_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "sdcc1_ice_core_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_sdcc2_apps_clk_src[] = { + F(144000, P_XO, 16, 3, 25), + F(400000, P_XO, 12, 1, 4), + F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4), + F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2), + F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 sdcc2_apps_clk_src = { + .cmd_rcgr = 0x43004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_14, + .freq_tbl = ftbl_sdcc2_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "sdcc2_apps_clk_src", + .parent_names = gcc_parent_names_14, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 usb20_mock_utmi_clk_src = { + .cmd_rcgr = 0x41048, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_esc0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb20_mock_utmi_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_usb30_master_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 usb30_master_clk_src = { + .cmd_rcgr = 0x39028, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_usb30_master_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb30_master_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 usb30_mock_utmi_clk_src = { + .cmd_rcgr = 0x3901c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_esc0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb30_mock_utmi_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 usb3_phy_aux_clk_src = { + .cmd_rcgr = 0x3903c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_pcie_0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb3_phy_aux_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0), + { } +}; + +static struct clk_rcg2 usb_hs_system_clk_src = { + .cmd_rcgr = 0x41010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_usb_hs_system_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb_hs_system_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 vsync_clk_src = { + .cmd_rcgr = 0x4d02c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_15, + .freq_tbl = ftbl_esc0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "vsync_clk_src", + .parent_names = gcc_parent_names_15, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gcc_apss_ahb_clk = { + .halt_reg = 0x4601c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(14), + .hw.init = &(struct clk_init_data){ + .name = "gcc_apss_ahb_clk", + .parent_names = (const char *[]){ + "apss_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_apss_tcu_clk = { + .halt_reg = 0x5b004, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_apss_tcu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_bimc_gfx_clk = { + .halt_reg = 0x59034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x59034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_bimc_gfx_clk", + .ops = &clk_branch2_ops, + .parent_names = (const char *[]){ + "gcc_apss_tcu_clk", + }, + + }, + }, +}; + +static struct clk_branch gcc_bimc_gpu_clk = { + .halt_reg = 0x59030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x59030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_bimc_gpu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_bimc_mdss_clk = { + .halt_reg = 0x31038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x31038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_bimc_mdss_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_ahb_clk = { + .halt_reg = 0x1008, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_dcc_clk = { + .halt_reg = 0x77004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x77004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_dcc_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_dcc_xo_clk = { + .halt_reg = 0x77008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x77008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_dcc_xo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup0_i2c_apps_clk = { + .halt_reg = 0x6028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup0_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup0_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup0_spi_apps_clk = { + .halt_reg = 0x6024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup0_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup0_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = { + .halt_reg = 0x2008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup1_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup1_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = { + .halt_reg = 0x2004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup1_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup1_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = { + .halt_reg = 0x3010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup2_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup2_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = { + .halt_reg = 0x300c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x300c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup2_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup2_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = { + .halt_reg = 0x4020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup3_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup3_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = { + .halt_reg = 0x401c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x401c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup3_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup3_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = { + .halt_reg = 0x5020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup4_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup4_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = { + .halt_reg = 0x501c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x501c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup4_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup4_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart0_apps_clk = { + .halt_reg = 0x6004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart0_apps_clk", + .parent_names = (const char *[]){ + "blsp1_uart0_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart1_apps_clk = { + .halt_reg = 0x203c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x203c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart1_apps_clk", + .parent_names = (const char *[]){ + "blsp1_uart1_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart2_apps_clk = { + .halt_reg = 0x302c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x302c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart2_apps_clk", + .parent_names = (const char *[]){ + "blsp1_uart2_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart3_apps_clk = { + .halt_reg = 0x400c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x400c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart3_apps_clk", + .parent_names = (const char *[]){ + "blsp1_uart3_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_ahb_clk = { + .halt_reg = 0xb008, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(20), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup0_i2c_apps_clk = { + .halt_reg = 0xc008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_qup0_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp2_qup0_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup0_spi_apps_clk = { + .halt_reg = 0xc004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_qup0_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp2_qup0_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_uart0_apps_clk = { + .halt_reg = 0xc03c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc03c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_uart0_apps_clk", + .parent_names = (const char *[]){ + "blsp2_uart0_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_boot_rom_ahb_clk = { + .halt_reg = 0x1300c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gcc_boot_rom_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_crypto_ahb_clk = { + .halt_reg = 0x16024, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_crypto_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_crypto_axi_clk = { + .halt_reg = 0x16020, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_crypto_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_crypto_clk = { + .halt_reg = 0x1601c, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_crypto_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_eth_axi_clk = { + .halt_reg = 0x4e010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4e010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_eth_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_eth_ptp_clk = { + .halt_reg = 0x4e004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4e004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_eth_ptp_clk", + .parent_names = (const char *[]){ + "emac_ptp_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_eth_rgmii_clk = { + .halt_reg = 0x4e008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4e008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_eth_rgmii_clk", + .parent_names = (const char *[]){ + "emac_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_eth_slave_ahb_clk = { + .halt_reg = 0x4e00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4e00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_eth_slave_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_geni_ir_s_clk = { + .halt_reg = 0xf008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_geni_ir_s_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_geni_ir_h_clk = { + .halt_reg = 0xf004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_geni_ir_h_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gfx_tcu_clk = { + .halt_reg = 0x12020, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4500C, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gfx_tcu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gfx_tbu_clk = { + .halt_reg = 0x12010, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4500C, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gfx_tbu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x8000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk", + .parent_names = (const char *[]){ + "gp1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x9000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk", + .parent_names = (const char *[]){ + "gp2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0xa000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk", + .parent_names = (const char *[]){ + "gp3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gtcu_ahb_clk = { + .halt_reg = 0x12044, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gtcu_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdp_tbu_clk = { + .halt_reg = 0x1201c, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdp_tbu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_ahb_clk = { + .halt_reg = 0x4d07c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4d07c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_axi_clk = { + .halt_reg = 0x4d080, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4d080, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_byte0_clk = { + .halt_reg = 0x4d094, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4d094, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_byte0_clk", + .parent_names = (const char *[]){ + "byte0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_esc0_clk = { + .halt_reg = 0x4d098, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4d098, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_esc0_clk", + .parent_names = (const char *[]){ + "esc0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_hdmi_app_clk = { + .halt_reg = 0x4d0d8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4d0d8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_hdmi_app_clk", + .parent_names = (const char *[]){ + "hdmi_app_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_hdmi_pclk_clk = { + .halt_reg = 0x4d0d4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4d0d4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_hdmi_pclk_clk", + .parent_names = (const char *[]){ + "hdmi_pclk_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_mdp_clk = { + .halt_reg = 0x4d088, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4d088, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_mdp_clk", + .parent_names = (const char *[]){ + "mdp_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_pclk0_clk = { + .halt_reg = 0x4d084, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4d084, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_pclk0_clk", + .parent_names = (const char *[]){ + "pclk0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdss_vsync_clk = { + .halt_reg = 0x4d090, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4d090, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdss_vsync_clk", + .parent_names = (const char *[]){ + "vsync_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_oxili_ahb_clk = { + .halt_reg = 0x59028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x59028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_oxili_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_oxili_gfx3d_clk = { + .halt_reg = 0x59020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x59020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_oxili_gfx3d_clk", + .parent_names = (const char *[]){ + "gfx3d_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_aux_clk = { + .halt_reg = 0x3e014, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(27), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_aux_clk", + .parent_names = (const char *[]){ + "pcie_0_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { + .halt_reg = 0x3e008, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(11), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_mstr_axi_clk = { + .halt_reg = 0x3e018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(18), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_pipe_clk = { + .halt_reg = 0x3e00c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(28), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_pipe_clk", + .parent_names = (const char *[]){ + "pcie_0_pipe_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_axi_clk = { + .halt_reg = 0x3e010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(22), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcnoc_usb2_clk = { + .halt_reg = 0x27008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x27008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcnoc_usb2_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcnoc_usb3_clk = { + .halt_reg = 0x2700c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2700c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcnoc_usb3_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x4400c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4400c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk", + .parent_names = (const char *[]){ + "pdm2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_ahb_clk = { + .halt_reg = 0x44004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x44004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_prng_ahb_clk = { + .halt_reg = 0x13004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(8), + .hw.init = &(struct clk_init_data){ + .name = "gcc_prng_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* PWM clks do not have XO as parent as src clk is a balance root */ +static struct clk_branch gcc_pwm0_xo512_clk = { + .halt_reg = 0x44018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x44018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pwm0_xo512_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pwm1_xo512_clk = { + .halt_reg = 0x49004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x49004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pwm1_xo512_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pwm2_xo512_clk = { + .halt_reg = 0x4a004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4a004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pwm2_xo512_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qdss_dap_clk = { + .halt_reg = 0x29084, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(21), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qdss_dap_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ahb_clk = { + .halt_reg = 0x4201c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4201c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_apps_clk = { + .halt_reg = 0x42018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x42018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_apps_clk", + .parent_names = (const char *[]){ + "sdcc1_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ice_core_clk = { + .halt_reg = 0x5d014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5d014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ice_core_clk", + .parent_names = (const char *[]){ + "sdcc1_ice_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_ahb_clk = { + .halt_reg = 0x4301c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4301c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_apps_clk = { + .halt_reg = 0x43018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x43018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_apps_clk", + .parent_names = (const char *[]){ + "sdcc2_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_smmu_cfg_clk = { + .halt_reg = 0x12038, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x3600C, + .enable_mask = BIT(12), + .hw.init = &(struct clk_init_data){ + .name = "gcc_smmu_cfg_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sys_noc_usb3_clk = { + .halt_reg = 0x26014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x26014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sys_noc_usb3_clk", + .parent_names = (const char *[]){ + "usb30_master_clk_src", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb_hs_inactivity_timers_clk = { + .halt_reg = 0x4100C, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4100C, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb_hs_inactivity_timers_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb20_mock_utmi_clk = { + .halt_reg = 0x41044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x41044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb20_mock_utmi_clk", + .parent_names = (const char *[]){ + "usb20_mock_utmi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb2a_phy_sleep_clk = { + .halt_reg = 0x4102c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4102c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb2a_phy_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_master_clk = { + .halt_reg = 0x3900c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3900c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_master_clk", + .parent_names = (const char *[]){ + "usb30_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_mock_utmi_clk = { + .halt_reg = 0x39014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x39014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_mock_utmi_clk", + .parent_names = (const char *[]){ + "usb30_mock_utmi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_sleep_clk = { + .halt_reg = 0x39010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x39010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_phy_aux_clk = { + .halt_reg = 0x39044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x39044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_phy_aux_clk", + .parent_names = (const char *[]){ + "usb3_phy_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_phy_pipe_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x39018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_phy_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb_hs_phy_cfg_ahb_clk = { + .halt_reg = 0x41030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x41030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb_hs_phy_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb_hs_system_clk = { + .halt_reg = 0x41004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x41004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb_hs_system_clk", + .parent_names = (const char *[]){ + "usb_hs_system_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_hw *gcc_qcs404_hws[] = { + &cxo.hw, +}; + +static struct clk_regmap *gcc_qcs404_clocks[] = { + [GCC_APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr, + [GCC_BLSP1_QUP0_I2C_APPS_CLK_SRC] = &blsp1_qup0_i2c_apps_clk_src.clkr, + [GCC_BLSP1_QUP0_SPI_APPS_CLK_SRC] = &blsp1_qup0_spi_apps_clk_src.clkr, + [GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr, + [GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr, + [GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr, + [GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr, + [GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr, + [GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr, + [GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr, + [GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr, + [GCC_BLSP1_UART0_APPS_CLK_SRC] = &blsp1_uart0_apps_clk_src.clkr, + [GCC_BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr, + [GCC_BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr, + [GCC_BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr, + [GCC_BLSP2_QUP0_I2C_APPS_CLK_SRC] = &blsp2_qup0_i2c_apps_clk_src.clkr, + [GCC_BLSP2_QUP0_SPI_APPS_CLK_SRC] = &blsp2_qup0_spi_apps_clk_src.clkr, + [GCC_BLSP2_UART0_APPS_CLK_SRC] = &blsp2_uart0_apps_clk_src.clkr, + [GCC_BYTE0_CLK_SRC] = &byte0_clk_src.clkr, + [GCC_EMAC_CLK_SRC] = &emac_clk_src.clkr, + [GCC_EMAC_PTP_CLK_SRC] = &emac_ptp_clk_src.clkr, + [GCC_ESC0_CLK_SRC] = &esc0_clk_src.clkr, + [GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr, + [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr, + [GCC_BIMC_MDSS_CLK] = &gcc_bimc_mdss_clk.clkr, + [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr, + [GCC_BLSP1_QUP0_I2C_APPS_CLK] = &gcc_blsp1_qup0_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP0_SPI_APPS_CLK] = &gcc_blsp1_qup0_spi_apps_clk.clkr, + [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr, + [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr, + [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr, + [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr, + [GCC_BLSP1_UART0_APPS_CLK] = &gcc_blsp1_uart0_apps_clk.clkr, + [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr, + [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr, + [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr, + [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr, + [GCC_BLSP2_QUP0_I2C_APPS_CLK] = &gcc_blsp2_qup0_i2c_apps_clk.clkr, + [GCC_BLSP2_QUP0_SPI_APPS_CLK] = &gcc_blsp2_qup0_spi_apps_clk.clkr, + [GCC_BLSP2_UART0_APPS_CLK] = &gcc_blsp2_uart0_apps_clk.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_ETH_AXI_CLK] = &gcc_eth_axi_clk.clkr, + [GCC_ETH_PTP_CLK] = &gcc_eth_ptp_clk.clkr, + [GCC_ETH_RGMII_CLK] = &gcc_eth_rgmii_clk.clkr, + [GCC_ETH_SLAVE_AHB_CLK] = &gcc_eth_slave_ahb_clk.clkr, + [GCC_GENI_IR_S_CLK] = &gcc_geni_ir_s_clk.clkr, + [GCC_GENI_IR_H_CLK] = &gcc_geni_ir_h_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr, + [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr, + [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr, + [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr, + [GCC_MDSS_HDMI_APP_CLK] = &gcc_mdss_hdmi_app_clk.clkr, + [GCC_MDSS_HDMI_PCLK_CLK] = &gcc_mdss_hdmi_pclk_clk.clkr, + [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr, + [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr, + [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr, + [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr, + [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr, + [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr, + [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr, + [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr, + [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr, + [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr, + [GCC_PCNOC_USB2_CLK] = &gcc_pcnoc_usb2_clk.clkr, + [GCC_PCNOC_USB3_CLK] = &gcc_pcnoc_usb3_clk.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, + [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, + [GCC_PWM0_XO512_CLK] = &gcc_pwm0_xo512_clk.clkr, + [GCC_PWM1_XO512_CLK] = &gcc_pwm1_xo512_clk.clkr, + [GCC_PWM2_XO512_CLK] = &gcc_pwm2_xo512_clk.clkr, + [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, + [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, + [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr, + [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr, + [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr, + [GCC_SYS_NOC_USB3_CLK] = &gcc_sys_noc_usb3_clk.clkr, + [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr, + [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr, + [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr, + [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr, + [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr, + [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr, + [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr, + [GCC_USB_HS_PHY_CFG_AHB_CLK] = &gcc_usb_hs_phy_cfg_ahb_clk.clkr, + [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr, + [GCC_GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr, + [GCC_GP1_CLK_SRC] = &gp1_clk_src.clkr, + [GCC_GP2_CLK_SRC] = &gp2_clk_src.clkr, + [GCC_GP3_CLK_SRC] = &gp3_clk_src.clkr, + [GCC_GPLL0_OUT_MAIN] = &gpll0_out_main.clkr, + [GCC_GPLL0_AO_OUT_MAIN] = &gpll0_ao_out_main.clkr, + [GCC_GPLL0_SLEEP_CLK_SRC] = &gpll0_sleep_clk_src.clkr, + [GCC_GPLL1_OUT_MAIN] = &gpll1_out_main.clkr, + [GCC_GPLL3_OUT_MAIN] = &gpll3_out_main.clkr, + [GCC_GPLL4_OUT_MAIN] = &gpll4_out_main.clkr, + [GCC_GPLL6] = &gpll6.clkr, + [GCC_GPLL6_OUT_AUX] = &gpll6_out_aux, + [GCC_HDMI_APP_CLK_SRC] = &hdmi_app_clk_src.clkr, + [GCC_HDMI_PCLK_CLK_SRC] = &hdmi_pclk_clk_src.clkr, + [GCC_MDP_CLK_SRC] = &mdp_clk_src.clkr, + [GCC_PCIE_0_AUX_CLK_SRC] = &pcie_0_aux_clk_src.clkr, + [GCC_PCIE_0_PIPE_CLK_SRC] = &pcie_0_pipe_clk_src.clkr, + [GCC_PCLK0_CLK_SRC] = &pclk0_clk_src.clkr, + [GCC_PDM2_CLK_SRC] = &pdm2_clk_src.clkr, + [GCC_SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr, + [GCC_SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr, + [GCC_SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr, + [GCC_USB20_MOCK_UTMI_CLK_SRC] = &usb20_mock_utmi_clk_src.clkr, + [GCC_USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr, + [GCC_USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr, + [GCC_USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr, + [GCC_USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr, + [GCC_VSYNC_CLK_SRC] = &vsync_clk_src.clkr, + [GCC_USB_HS_INACTIVITY_TIMERS_CLK] = + &gcc_usb_hs_inactivity_timers_clk.clkr, + [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr, + [GCC_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr, + [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr, + [GCC_GFX_TBU_CLK] = &gcc_gfx_tbu_clk.clkr, + [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr, + [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr, + [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr, + [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr, + [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr, + [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr, + [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr, + [GCC_DCC_CLK] = &gcc_dcc_clk.clkr, + [GCC_DCC_XO_CLK] = &gcc_dcc_xo_clk.clkr, +}; + +static const struct qcom_reset_map gcc_qcs404_resets[] = { + [GCC_GENI_IR_BCR] = { 0x0F000 }, + [GCC_USB_HS_BCR] = { 0x41000 }, + [GCC_USB2_HS_PHY_ONLY_BCR] = { 0x41034 }, + [GCC_QUSB2_PHY_BCR] = { 0x4103c }, + [GCC_USB_HS_PHY_CFG_AHB_BCR] = { 0x0000c, 1 }, + [GCC_USB2A_PHY_BCR] = { 0x0000c, 0 }, + [GCC_USB3_PHY_BCR] = { 0x39004 }, + [GCC_USB_30_BCR] = { 0x39000 }, + [GCC_USB3PHY_PHY_BCR] = { 0x39008 }, + [GCC_PCIE_0_BCR] = { 0x3e000 }, + [GCC_PCIE_0_PHY_BCR] = { 0x3e004 }, + [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x3e038 }, + [GCC_PCIEPHY_0_PHY_BCR] = { 0x3e03c }, + [GCC_EMAC_BCR] = { 0x4e000 }, +}; + +static const struct regmap_config gcc_qcs404_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x7f000, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_qcs404_desc = { + .config = &gcc_qcs404_regmap_config, + .clks = gcc_qcs404_clocks, + .num_clks = ARRAY_SIZE(gcc_qcs404_clocks), + .resets = gcc_qcs404_resets, + .num_resets = ARRAY_SIZE(gcc_qcs404_resets), +}; + +static const struct of_device_id gcc_qcs404_match_table[] = { + { .compatible = "qcom,gcc-qcs404" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_qcs404_match_table); + +static int gcc_qcs404_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret, i; + + regmap = qcom_cc_map(pdev, &gcc_qcs404_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + clk_alpha_pll_configure(&gpll3_out_main, regmap, &gpll3_config); + + for (i = 0; i < ARRAY_SIZE(gcc_qcs404_hws); i++) { + ret = devm_clk_hw_register(&pdev->dev, gcc_qcs404_hws[i]); + if (ret) + return ret; + } + + return qcom_cc_really_probe(pdev, &gcc_qcs404_desc, regmap); +} + +static struct platform_driver gcc_qcs404_driver = { + .probe = gcc_qcs404_probe, + .driver = { + .name = "gcc-qcs404", + .of_match_table = gcc_qcs404_match_table, + }, +}; + +static int __init gcc_qcs404_init(void) +{ + return platform_driver_register(&gcc_qcs404_driver); +} +subsys_initcall(gcc_qcs404_init); + +static void __exit gcc_qcs404_exit(void) +{ + platform_driver_unregister(&gcc_qcs404_driver); +} +module_exit(gcc_qcs404_exit); + +MODULE_DESCRIPTION("Qualcomm GCC QCS404 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c new file mode 100644 index 000000000000..ba239ea4c842 --- /dev/null +++ b/drivers/clk/qcom/gcc-sdm660.c @@ -0,0 +1,2480 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2018, Craig Tatlor. + */ + +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/clk-provider.h> +#include <linux/regmap.h> +#include <linux/reset-controller.h> + +#include <dt-bindings/clock/qcom,gcc-sdm660.h> + +#include "common.h" +#include "clk-regmap.h" +#include "clk-alpha-pll.h" +#include "clk-rcg.h" +#include "clk-branch.h" +#include "reset.h" +#include "gdsc.h" + +#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } + +enum { + P_XO, + P_SLEEP_CLK, + P_GPLL0, + P_GPLL1, + P_GPLL4, + P_GPLL0_EARLY_DIV, + P_GPLL1_EARLY_DIV, +}; + +static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL0_EARLY_DIV, 6 }, +}; + +static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div[] = { + "xo", + "gpll0", + "gpll0_early_div", +}; + +static const struct parent_map gcc_parent_map_xo_gpll0[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, +}; + +static const char * const gcc_parent_names_xo_gpll0[] = { + "xo", + "gpll0", +}; + +static const struct parent_map gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_SLEEP_CLK, 5 }, + { P_GPLL0_EARLY_DIV, 6 }, +}; + +static const char * const gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div[] = { + "xo", + "gpll0", + "sleep_clk", + "gpll0_early_div", +}; + +static const struct parent_map gcc_parent_map_xo_sleep_clk[] = { + { P_XO, 0 }, + { P_SLEEP_CLK, 5 }, +}; + +static const char * const gcc_parent_names_xo_sleep_clk[] = { + "xo", + "sleep_clk", +}; + +static const struct parent_map gcc_parent_map_xo_gpll4[] = { + { P_XO, 0 }, + { P_GPLL4, 5 }, +}; + +static const char * const gcc_parent_names_xo_gpll4[] = { + "xo", + "gpll4", +}; + +static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL0_EARLY_DIV, 3 }, + { P_GPLL1, 4 }, + { P_GPLL4, 5 }, + { P_GPLL1_EARLY_DIV, 6 }, +}; + +static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div[] = { + "xo", + "gpll0", + "gpll0_early_div", + "gpll1", + "gpll4", + "gpll1_early_div", +}; + +static const struct parent_map gcc_parent_map_xo_gpll0_gpll4_gpll0_early_div[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL4, 5 }, + { P_GPLL0_EARLY_DIV, 6 }, +}; + +static const char * const gcc_parent_names_xo_gpll0_gpll4_gpll0_early_div[] = { + "xo", + "gpll0", + "gpll4", + "gpll0_early_div", +}; + +static const struct parent_map gcc_parent_map_xo_gpll0_gpll0_early_div_gpll4[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL0_EARLY_DIV, 2 }, + { P_GPLL4, 5 }, +}; + +static const char * const gcc_parent_names_xo_gpll0_gpll0_early_div_gpll4[] = { + "xo", + "gpll0", + "gpll0_early_div", + "gpll4", +}; + +static struct clk_fixed_factor xo = { + .mult = 1, + .div = 1, + .hw.init = &(struct clk_init_data){ + .name = "xo", + .parent_names = (const char *[]){ "xo_board" }, + .num_parents = 1, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_alpha_pll gpll0_early = { + .offset = 0x0, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpll0_early", + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_fixed_factor gpll0_early_div = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "gpll0_early_div", + .parent_names = (const char *[]){ "gpll0_early" }, + .num_parents = 1, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_alpha_pll_postdiv gpll0 = { + .offset = 0x00000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll0", + .parent_names = (const char *[]){ "gpll0_early" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_ops, + }, +}; + +static struct clk_alpha_pll gpll1_early = { + .offset = 0x1000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gpll1_early", + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_fixed_factor gpll1_early_div = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "gpll1_early_div", + .parent_names = (const char *[]){ "gpll1_early" }, + .num_parents = 1, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_alpha_pll_postdiv gpll1 = { + .offset = 0x1000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll1", + .parent_names = (const char *[]){ "gpll1_early" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_ops, + }, +}; + +static struct clk_alpha_pll gpll4_early = { + .offset = 0x77000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gpll4_early", + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_alpha_pll_postdiv gpll4 = { + .offset = 0x77000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], + .clkr.hw.init = &(struct clk_init_data) + { + .name = "gpll4", + .parent_names = (const char *[]) { "gpll4_early" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_ops, + }, +}; + +static const struct freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(50000000, P_GPLL0, 12, 0, 0), + { } +}; + +static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = { + .cmd_rcgr = 0x19020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup1_i2c_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = { + F(960000, P_XO, 10, 1, 2), + F(4800000, P_XO, 4, 0, 0), + F(9600000, P_XO, 2, 0, 0), + F(15000000, P_GPLL0, 10, 1, 4), + F(19200000, P_XO, 1, 0, 0), + F(25000000, P_GPLL0, 12, 1, 2), + F(50000000, P_GPLL0, 12, 0, 0), + { } +}; + +static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = { + .cmd_rcgr = 0x1900c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup1_spi_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = { + .cmd_rcgr = 0x1b020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup2_i2c_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = { + .cmd_rcgr = 0x1b00c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup2_spi_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = { + .cmd_rcgr = 0x1d020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup3_i2c_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = { + .cmd_rcgr = 0x1d00c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup3_spi_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = { + .cmd_rcgr = 0x1f020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup4_i2c_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = { + .cmd_rcgr = 0x1f00c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_qup4_spi_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_blsp1_uart1_apps_clk_src[] = { + F(3686400, P_GPLL0, 1, 96, 15625), + F(7372800, P_GPLL0, 1, 192, 15625), + F(14745600, P_GPLL0, 1, 384, 15625), + F(16000000, P_GPLL0, 5, 2, 15), + F(19200000, P_XO, 1, 0, 0), + F(24000000, P_GPLL0, 5, 1, 5), + F(32000000, P_GPLL0, 1, 4, 75), + F(40000000, P_GPLL0, 15, 0, 0), + F(46400000, P_GPLL0, 1, 29, 375), + F(48000000, P_GPLL0, 12.5, 0, 0), + F(51200000, P_GPLL0, 1, 32, 375), + F(56000000, P_GPLL0, 1, 7, 75), + F(58982400, P_GPLL0, 1, 1536, 15625), + F(60000000, P_GPLL0, 10, 0, 0), + F(63157895, P_GPLL0, 9.5, 0, 0), + { } +}; + +static struct clk_rcg2 blsp1_uart1_apps_clk_src = { + .cmd_rcgr = 0x1a00c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_uart1_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_uart1_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_uart2_apps_clk_src = { + .cmd_rcgr = 0x1c00c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_uart1_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp1_uart2_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = { + .cmd_rcgr = 0x26020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_qup1_i2c_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = { + .cmd_rcgr = 0x2600c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_qup1_spi_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = { + .cmd_rcgr = 0x28020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_qup2_i2c_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = { + .cmd_rcgr = 0x2800c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_qup2_spi_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = { + .cmd_rcgr = 0x2a020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_qup3_i2c_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = { + .cmd_rcgr = 0x2a00c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_qup3_spi_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = { + .cmd_rcgr = 0x2c020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_qup4_i2c_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = { + .cmd_rcgr = 0x2c00c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_qup4_spi_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_uart1_apps_clk_src = { + .cmd_rcgr = 0x2700c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_uart1_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_uart1_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_uart2_apps_clk_src = { + .cmd_rcgr = 0x2900c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_blsp1_uart1_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "blsp2_uart2_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gp1_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(100000000, P_GPLL0, 6, 0, 0), + F(200000000, P_GPLL0, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gp1_clk_src = { + .cmd_rcgr = 0x64004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div, + .freq_tbl = ftbl_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gp1_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gp2_clk_src = { + .cmd_rcgr = 0x65004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div, + .freq_tbl = ftbl_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gp2_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gp3_clk_src = { + .cmd_rcgr = 0x66004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_sleep_clk_gpll0_early_div, + .freq_tbl = ftbl_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gp3_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_sleep_clk_gpll0_early_div, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_hmss_gpll0_clk_src[] = { + F(300000000, P_GPLL0, 2, 0, 0), + F(600000000, P_GPLL0, 1, 0, 0), + { } +}; + +static struct clk_rcg2 hmss_gpll0_clk_src = { + .cmd_rcgr = 0x4805c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_hmss_gpll0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "hmss_gpll0_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_hmss_gpll4_clk_src[] = { + F(384000000, P_GPLL4, 4, 0, 0), + F(768000000, P_GPLL4, 2, 0, 0), + F(1536000000, P_GPLL4, 1, 0, 0), + { } +}; + +static struct clk_rcg2 hmss_gpll4_clk_src = { + .cmd_rcgr = 0x48074, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll4, + .freq_tbl = ftbl_hmss_gpll4_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "hmss_gpll4_clk_src", + .parent_names = gcc_parent_names_xo_gpll4, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_hmss_rbcpr_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 hmss_rbcpr_clk_src = { + .cmd_rcgr = 0x48044, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_hmss_rbcpr_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "hmss_rbcpr_clk_src", + .parent_names = gcc_parent_names_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_pdm2_clk_src[] = { + F(60000000, P_GPLL0, 10, 0, 0), + { } +}; + +static struct clk_rcg2 pdm2_clk_src = { + .cmd_rcgr = 0x33010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_pdm2_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pdm2_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_qspi_ser_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(80200000, P_GPLL1_EARLY_DIV, 5, 0, 0), + F(160400000, P_GPLL1, 5, 0, 0), + F(267333333, P_GPLL1, 3, 0, 0), + { } +}; + +static struct clk_rcg2 qspi_ser_clk_src = { + .cmd_rcgr = 0x4d00c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div, + .freq_tbl = ftbl_qspi_ser_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "qspi_ser_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div, + .num_parents = 6, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = { + F(144000, P_XO, 16, 3, 25), + F(400000, P_XO, 12, 1, 4), + F(20000000, P_GPLL0_EARLY_DIV, 5, 1, 3), + F(25000000, P_GPLL0_EARLY_DIV, 6, 1, 2), + F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0), + F(100000000, P_GPLL0, 6, 0, 0), + F(192000000, P_GPLL4, 8, 0, 0), + F(384000000, P_GPLL4, 4, 0, 0), + { } +}; + +static struct clk_rcg2 sdcc1_apps_clk_src = { + .cmd_rcgr = 0x1602c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll4_gpll0_early_div, + .freq_tbl = ftbl_sdcc1_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "sdcc1_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll4_gpll0_early_div, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = { + F(75000000, P_GPLL0_EARLY_DIV, 4, 0, 0), + F(150000000, P_GPLL0, 4, 0, 0), + F(200000000, P_GPLL0, 3, 0, 0), + F(300000000, P_GPLL0, 2, 0, 0), + { } +}; + +static struct clk_rcg2 sdcc1_ice_core_clk_src = { + .cmd_rcgr = 0x16010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_sdcc1_ice_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "sdcc1_ice_core_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_sdcc2_apps_clk_src[] = { + F(144000, P_XO, 16, 3, 25), + F(400000, P_XO, 12, 1, 4), + F(20000000, P_GPLL0_EARLY_DIV, 5, 1, 3), + F(25000000, P_GPLL0_EARLY_DIV, 6, 1, 2), + F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0), + F(100000000, P_GPLL0, 6, 0, 0), + F(192000000, P_GPLL4, 8, 0, 0), + F(200000000, P_GPLL0, 3, 0, 0), + { } +}; + +static struct clk_rcg2 sdcc2_apps_clk_src = { + .cmd_rcgr = 0x14010, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div_gpll4, + .freq_tbl = ftbl_sdcc2_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "sdcc2_apps_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div_gpll4, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_ufs_axi_clk_src[] = { + F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0), + F(100000000, P_GPLL0, 6, 0, 0), + F(150000000, P_GPLL0, 4, 0, 0), + F(200000000, P_GPLL0, 3, 0, 0), + F(240000000, P_GPLL0, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 ufs_axi_clk_src = { + .cmd_rcgr = 0x75018, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_ufs_axi_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "ufs_axi_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_ufs_ice_core_clk_src[] = { + F(75000000, P_GPLL0_EARLY_DIV, 4, 0, 0), + F(150000000, P_GPLL0, 4, 0, 0), + F(300000000, P_GPLL0, 2, 0, 0), + { } +}; + +static struct clk_rcg2 ufs_ice_core_clk_src = { + .cmd_rcgr = 0x76010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_ufs_ice_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "ufs_ice_core_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 ufs_phy_aux_clk_src = { + .cmd_rcgr = 0x76044, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_sleep_clk, + .freq_tbl = ftbl_hmss_rbcpr_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "ufs_phy_aux_clk_src", + .parent_names = gcc_parent_names_xo_sleep_clk, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_ufs_unipro_core_clk_src[] = { + F(37500000, P_GPLL0_EARLY_DIV, 8, 0, 0), + F(75000000, P_GPLL0, 8, 0, 0), + F(150000000, P_GPLL0, 4, 0, 0), + { } +}; + +static struct clk_rcg2 ufs_unipro_core_clk_src = { + .cmd_rcgr = 0x76028, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_ufs_unipro_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "ufs_unipro_core_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_usb20_master_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(60000000, P_GPLL0, 10, 0, 0), + F(120000000, P_GPLL0, 5, 0, 0), + { } +}; + +static struct clk_rcg2 usb20_master_clk_src = { + .cmd_rcgr = 0x2f010, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_usb20_master_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb20_master_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_usb20_mock_utmi_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(60000000, P_GPLL0, 10, 0, 0), + { } +}; + +static struct clk_rcg2 usb20_mock_utmi_clk_src = { + .cmd_rcgr = 0x2f024, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_usb20_mock_utmi_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb20_mock_utmi_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_usb30_master_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(66666667, P_GPLL0_EARLY_DIV, 4.5, 0, 0), + F(120000000, P_GPLL0, 5, 0, 0), + F(133333333, P_GPLL0, 4.5, 0, 0), + F(150000000, P_GPLL0, 4, 0, 0), + F(200000000, P_GPLL0, 3, 0, 0), + F(240000000, P_GPLL0, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 usb30_master_clk_src = { + .cmd_rcgr = 0xf014, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_usb30_master_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb30_master_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(40000000, P_GPLL0_EARLY_DIV, 7.5, 0, 0), + F(60000000, P_GPLL0, 10, 0, 0), + { } +}; + +static struct clk_rcg2 usb30_mock_utmi_clk_src = { + .cmd_rcgr = 0xf028, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div, + .freq_tbl = ftbl_usb30_mock_utmi_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb30_mock_utmi_clk_src", + .parent_names = gcc_parent_names_xo_gpll0_gpll0_early_div, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = { + F(1200000, P_XO, 16, 0, 0), + F(19200000, P_XO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 usb3_phy_aux_clk_src = { + .cmd_rcgr = 0x5000c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_xo_sleep_clk, + .freq_tbl = ftbl_usb3_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "usb3_phy_aux_clk_src", + .parent_names = gcc_parent_names_xo_sleep_clk, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gcc_aggre2_ufs_axi_clk = { + .halt_reg = 0x75034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x75034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre2_ufs_axi_clk", + .parent_names = (const char *[]){ + "ufs_axi_clk_src", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre2_usb3_axi_clk = { + .halt_reg = 0xf03c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf03c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre2_usb3_axi_clk", + .parent_names = (const char *[]){ + "usb30_master_clk_src", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_bimc_gfx_clk = { + .halt_reg = 0x7106c, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x7106c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_bimc_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_bimc_hmss_axi_clk = { + .halt_reg = 0x48004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(22), + .hw.init = &(struct clk_init_data){ + .name = "gcc_bimc_hmss_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_bimc_mss_q6_axi_clk = { + .halt_reg = 0x4401c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4401c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_bimc_mss_q6_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_ahb_clk = { + .halt_reg = 0x17004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(17), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = { + .halt_reg = 0x19008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x19008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup1_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup1_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = { + .halt_reg = 0x19004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x19004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup1_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup1_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = { + .halt_reg = 0x1b008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1b008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup2_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup2_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = { + .halt_reg = 0x1b004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1b004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup2_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup2_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = { + .halt_reg = 0x1d008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1d008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup3_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup3_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = { + .halt_reg = 0x1d004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1d004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup3_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup3_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = { + .halt_reg = 0x1f008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1f008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup4_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup4_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = { + .halt_reg = 0x1f004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1f004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_qup4_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp1_qup4_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart1_apps_clk = { + .halt_reg = 0x1a004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1a004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart1_apps_clk", + .parent_names = (const char *[]){ + "blsp1_uart1_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart2_apps_clk = { + .halt_reg = 0x1c004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1c004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_uart2_apps_clk", + .parent_names = (const char *[]){ + "blsp1_uart2_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_ahb_clk = { + .halt_reg = 0x25004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = { + .halt_reg = 0x26008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x26008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_qup1_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp2_qup1_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = { + .halt_reg = 0x26004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x26004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_qup1_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp2_qup1_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = { + .halt_reg = 0x28008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x28008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_qup2_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp2_qup2_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = { + .halt_reg = 0x28004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x28004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_qup2_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp2_qup2_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = { + .halt_reg = 0x2a008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_qup3_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp2_qup3_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = { + .halt_reg = 0x2a004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_qup3_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp2_qup3_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = { + .halt_reg = 0x2c008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2c008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_qup4_i2c_apps_clk", + .parent_names = (const char *[]){ + "blsp2_qup4_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = { + .halt_reg = 0x2c004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2c004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_qup4_spi_apps_clk", + .parent_names = (const char *[]){ + "blsp2_qup4_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_uart1_apps_clk = { + .halt_reg = 0x27004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x27004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_uart1_apps_clk", + .parent_names = (const char *[]){ + "blsp2_uart1_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_uart2_apps_clk = { + .halt_reg = 0x29004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x29004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp2_uart2_apps_clk", + .parent_names = (const char *[]){ + "blsp2_uart2_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_boot_rom_ahb_clk = { + .halt_reg = 0x38004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_boot_rom_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb2_axi_clk = { + .halt_reg = 0x5058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cfg_noc_usb2_axi_clk", + .parent_names = (const char *[]){ + "usb20_master_clk_src", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_axi_clk = { + .halt_reg = 0x5018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cfg_noc_usb3_axi_clk", + .parent_names = (const char *[]){ + "usb30_master_clk_src", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_dcc_ahb_clk = { + .halt_reg = 0x84004, + .clkr = { + .enable_reg = 0x84004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_dcc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x64000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x64000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk", + .parent_names = (const char *[]){ + "gp1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x65000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x65000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk", + .parent_names = (const char *[]){ + "gp2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0x66000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x66000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk", + .parent_names = (const char *[]){ + "gp3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_bimc_gfx_clk = { + .halt_reg = 0x71010, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x71010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_bimc_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_cfg_ahb_clk = { + .halt_reg = 0x71004, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x71004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_clk = { + .halt_reg = 0x5200c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_clk", + .parent_names = (const char *[]){ + "gpll0", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_div_clk = { + .halt_reg = 0x5200c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_div_clk", + .parent_names = (const char *[]){ + "gpll0_early_div", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_hmss_dvm_bus_clk = { + .halt_reg = 0x4808c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4808c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_hmss_dvm_bus_clk", + .ops = &clk_branch2_ops, + .flags = CLK_IGNORE_UNUSED, + }, + }, +}; + +static struct clk_branch gcc_hmss_rbcpr_clk = { + .halt_reg = 0x48008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x48008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_hmss_rbcpr_clk", + .parent_names = (const char *[]){ + "hmss_rbcpr_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mmss_gpll0_clk = { + .halt_reg = 0x5200c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mmss_gpll0_clk", + .parent_names = (const char *[]){ + "gpll0", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mmss_gpll0_div_clk = { + .halt_reg = 0x5200c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mmss_gpll0_div_clk", + .parent_names = (const char *[]){ + "gpll0_early_div", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = { + .halt_reg = 0x9004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mmss_noc_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mmss_sys_noc_axi_clk = { + .halt_reg = 0x9000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mmss_sys_noc_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_cfg_ahb_clk = { + .halt_reg = 0x8a000, + .clkr = { + .enable_reg = 0x8a000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = { + .halt_reg = 0x8a004, + .clkr = { + .enable_reg = 0x8a004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_mnoc_bimc_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_q6_bimc_axi_clk = { + .halt_reg = 0x8a040, + .clkr = { + .enable_reg = 0x8a040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_q6_bimc_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_snoc_axi_clk = { + .halt_reg = 0x8a03c, + .clkr = { + .enable_reg = 0x8a03c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_snoc_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x3300c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3300c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk", + .parent_names = (const char *[]){ + "pdm2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_ahb_clk = { + .halt_reg = 0x33004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x33004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_prng_ahb_clk = { + .halt_reg = 0x34004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_prng_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qspi_ahb_clk = { + .halt_reg = 0x4d004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4d004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qspi_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qspi_ser_clk = { + .halt_reg = 0x4d008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4d008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qspi_ser_clk", + .parent_names = (const char *[]){ + "qspi_ser_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_rx0_usb2_clkref_clk = { + .halt_reg = 0x88018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x88018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_rx0_usb2_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_rx1_usb2_clkref_clk = { + .halt_reg = 0x88014, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x88014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_rx1_usb2_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ahb_clk = { + .halt_reg = 0x16008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x16008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_apps_clk = { + .halt_reg = 0x16004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x16004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_apps_clk", + .parent_names = (const char *[]){ + "sdcc1_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ice_core_clk = { + .halt_reg = 0x1600c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1600c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ice_core_clk", + .parent_names = (const char *[]){ + "sdcc1_ice_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_ahb_clk = { + .halt_reg = 0x14008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_apps_clk = { + .halt_reg = 0x14004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_apps_clk", + .parent_names = (const char *[]){ + "sdcc2_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_ahb_clk = { + .halt_reg = 0x7500c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7500c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_axi_clk = { + .halt_reg = 0x75008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x75008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_axi_clk", + .parent_names = (const char *[]){ + "ufs_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_clkref_clk = { + .halt_reg = 0x88008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x88008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_ice_core_clk = { + .halt_reg = 0x7600c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7600c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_ice_core_clk", + .parent_names = (const char *[]){ + "ufs_ice_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_aux_clk = { + .halt_reg = 0x76040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x76040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_aux_clk", + .parent_names = (const char *[]){ + "ufs_phy_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_rx_symbol_0_clk = { + .halt_reg = 0x75014, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x75014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_rx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_rx_symbol_1_clk = { + .halt_reg = 0x7605c, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x7605c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_rx_symbol_1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_tx_symbol_0_clk = { + .halt_reg = 0x75010, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x75010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_tx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_unipro_core_clk = { + .halt_reg = 0x76008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x76008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_unipro_core_clk", + .parent_names = (const char *[]){ + "ufs_unipro_core_clk_src", + }, + .flags = CLK_SET_RATE_PARENT, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb20_master_clk = { + .halt_reg = 0x2f004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2f004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb20_master_clk", + .parent_names = (const char *[]){ + "usb20_master_clk_src" + }, + .flags = CLK_SET_RATE_PARENT, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb20_mock_utmi_clk = { + .halt_reg = 0x2f00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2f00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb20_mock_utmi_clk", + .parent_names = (const char *[]){ + "usb20_mock_utmi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb20_sleep_clk = { + .halt_reg = 0x2f008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2f008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb20_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_master_clk = { + .halt_reg = 0xf008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_master_clk", + .parent_names = (const char *[]){ + "usb30_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_mock_utmi_clk = { + .halt_reg = 0xf010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_mock_utmi_clk", + .parent_names = (const char *[]){ + "usb30_mock_utmi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_sleep_clk = { + .halt_reg = 0xf00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_clkref_clk = { + .halt_reg = 0x8800c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8800c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_phy_aux_clk = { + .halt_reg = 0x50000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x50000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_phy_aux_clk", + .parent_names = (const char *[]){ + "usb3_phy_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_phy_pipe_clk = { + .halt_reg = 0x50004, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x50004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_phy_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = { + .halt_reg = 0x6a004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6a004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb_phy_cfg_ahb2phy_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc ufs_gdsc = { + .gdscr = 0x75004, + .gds_hw_ctrl = 0x0, + .pd = { + .name = "ufs_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc usb_30_gdsc = { + .gdscr = 0xf004, + .gds_hw_ctrl = 0x0, + .pd = { + .name = "usb_30_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc pcie_0_gdsc = { + .gdscr = 0x6b004, + .gds_hw_ctrl = 0x0, + .pd = { + .name = "pcie_0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct clk_hw *gcc_sdm660_hws[] = { + &xo.hw, + &gpll0_early_div.hw, + &gpll1_early_div.hw, +}; + +static struct clk_regmap *gcc_sdm660_clocks[] = { + [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr, + [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr, + [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr, + [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr, + [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr, + [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr, + [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr, + [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr, + [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr, + [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr, + [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr, + [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr, + [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr, + [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr, + [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr, + [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr, + [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr, + [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr, + [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr, + [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr, + [GCC_AGGRE2_UFS_AXI_CLK] = &gcc_aggre2_ufs_axi_clk.clkr, + [GCC_AGGRE2_USB3_AXI_CLK] = &gcc_aggre2_usb3_axi_clk.clkr, + [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr, + [GCC_BIMC_HMSS_AXI_CLK] = &gcc_bimc_hmss_axi_clk.clkr, + [GCC_BIMC_MSS_Q6_AXI_CLK] = &gcc_bimc_mss_q6_axi_clk.clkr, + [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr, + [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr, + [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr, + [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr, + [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr, + [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr, + [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr, + [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr, + [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr, + [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr, + [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr, + [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr, + [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr, + [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr, + [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr, + [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr, + [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr, + [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_CFG_NOC_USB2_AXI_CLK] = &gcc_cfg_noc_usb2_axi_clk.clkr, + [GCC_CFG_NOC_USB3_AXI_CLK] = &gcc_cfg_noc_usb3_axi_clk.clkr, + [GCC_DCC_AHB_CLK] = &gcc_dcc_ahb_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_GPU_BIMC_GFX_CLK] = &gcc_gpu_bimc_gfx_clk.clkr, + [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr, + [GCC_GPU_GPLL0_CLK] = &gcc_gpu_gpll0_clk.clkr, + [GCC_GPU_GPLL0_DIV_CLK] = &gcc_gpu_gpll0_div_clk.clkr, + [GCC_HMSS_DVM_BUS_CLK] = &gcc_hmss_dvm_bus_clk.clkr, + [GCC_HMSS_RBCPR_CLK] = &gcc_hmss_rbcpr_clk.clkr, + [GCC_MMSS_GPLL0_CLK] = &gcc_mmss_gpll0_clk.clkr, + [GCC_MMSS_GPLL0_DIV_CLK] = &gcc_mmss_gpll0_div_clk.clkr, + [GCC_MMSS_NOC_CFG_AHB_CLK] = &gcc_mmss_noc_cfg_ahb_clk.clkr, + [GCC_MMSS_SYS_NOC_AXI_CLK] = &gcc_mmss_sys_noc_axi_clk.clkr, + [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr, + [GCC_MSS_MNOC_BIMC_AXI_CLK] = &gcc_mss_mnoc_bimc_axi_clk.clkr, + [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr, + [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, + [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, + [GCC_QSPI_AHB_CLK] = &gcc_qspi_ahb_clk.clkr, + [GCC_QSPI_SER_CLK] = &gcc_qspi_ser_clk.clkr, + [GCC_RX0_USB2_CLKREF_CLK] = &gcc_rx0_usb2_clkref_clk.clkr, + [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr, + [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, + [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, + [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr, + [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr, + [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr, + [GCC_UFS_AHB_CLK] = &gcc_ufs_ahb_clk.clkr, + [GCC_UFS_AXI_CLK] = &gcc_ufs_axi_clk.clkr, + [GCC_UFS_CLKREF_CLK] = &gcc_ufs_clkref_clk.clkr, + [GCC_UFS_ICE_CORE_CLK] = &gcc_ufs_ice_core_clk.clkr, + [GCC_UFS_PHY_AUX_CLK] = &gcc_ufs_phy_aux_clk.clkr, + [GCC_UFS_RX_SYMBOL_0_CLK] = &gcc_ufs_rx_symbol_0_clk.clkr, + [GCC_UFS_RX_SYMBOL_1_CLK] = &gcc_ufs_rx_symbol_1_clk.clkr, + [GCC_UFS_TX_SYMBOL_0_CLK] = &gcc_ufs_tx_symbol_0_clk.clkr, + [GCC_UFS_UNIPRO_CORE_CLK] = &gcc_ufs_unipro_core_clk.clkr, + [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr, + [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr, + [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr, + [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr, + [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr, + [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr, + [GCC_USB3_CLKREF_CLK] = &gcc_usb3_clkref_clk.clkr, + [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr, + [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr, + [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr, + [GP1_CLK_SRC] = &gp1_clk_src.clkr, + [GP2_CLK_SRC] = &gp2_clk_src.clkr, + [GP3_CLK_SRC] = &gp3_clk_src.clkr, + [GPLL0] = &gpll0.clkr, + [GPLL0_EARLY] = &gpll0_early.clkr, + [GPLL1] = &gpll1.clkr, + [GPLL1_EARLY] = &gpll1_early.clkr, + [GPLL4] = &gpll4.clkr, + [GPLL4_EARLY] = &gpll4_early.clkr, + [HMSS_GPLL0_CLK_SRC] = &hmss_gpll0_clk_src.clkr, + [HMSS_GPLL4_CLK_SRC] = &hmss_gpll4_clk_src.clkr, + [HMSS_RBCPR_CLK_SRC] = &hmss_rbcpr_clk_src.clkr, + [PDM2_CLK_SRC] = &pdm2_clk_src.clkr, + [QSPI_SER_CLK_SRC] = &qspi_ser_clk_src.clkr, + [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr, + [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr, + [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr, + [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr, + [UFS_ICE_CORE_CLK_SRC] = &ufs_ice_core_clk_src.clkr, + [UFS_PHY_AUX_CLK_SRC] = &ufs_phy_aux_clk_src.clkr, + [UFS_UNIPRO_CORE_CLK_SRC] = &ufs_unipro_core_clk_src.clkr, + [USB20_MASTER_CLK_SRC] = &usb20_master_clk_src.clkr, + [USB20_MOCK_UTMI_CLK_SRC] = &usb20_mock_utmi_clk_src.clkr, + [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr, + [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr, + [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr, +}; + +static struct gdsc *gcc_sdm660_gdscs[] = { + [UFS_GDSC] = &ufs_gdsc, + [USB_30_GDSC] = &usb_30_gdsc, + [PCIE_0_GDSC] = &pcie_0_gdsc, +}; + +static const struct qcom_reset_map gcc_sdm660_resets[] = { + [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 }, + [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 }, + [GCC_UFS_BCR] = { 0x75000 }, + [GCC_USB3_DP_PHY_BCR] = { 0x50028 }, + [GCC_USB3_PHY_BCR] = { 0x50020 }, + [GCC_USB3PHY_PHY_BCR] = { 0x50024 }, + [GCC_USB_20_BCR] = { 0x2f000 }, + [GCC_USB_30_BCR] = { 0xf000 }, + [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 }, +}; + +static const struct regmap_config gcc_sdm660_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x94000, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_sdm660_desc = { + .config = &gcc_sdm660_regmap_config, + .clks = gcc_sdm660_clocks, + .num_clks = ARRAY_SIZE(gcc_sdm660_clocks), + .resets = gcc_sdm660_resets, + .num_resets = ARRAY_SIZE(gcc_sdm660_resets), + .gdscs = gcc_sdm660_gdscs, + .num_gdscs = ARRAY_SIZE(gcc_sdm660_gdscs), +}; + +static const struct of_device_id gcc_sdm660_match_table[] = { + { .compatible = "qcom,gcc-sdm630" }, + { .compatible = "qcom,gcc-sdm660" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_sdm660_match_table); + +static int gcc_sdm660_probe(struct platform_device *pdev) +{ + int i, ret; + struct regmap *regmap; + + regmap = qcom_cc_map(pdev, &gcc_sdm660_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + /* + * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be + * turned off by hardware during certain apps low power modes. + */ + ret = regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21)); + if (ret) + return ret; + + /* Register the hws */ + for (i = 0; i < ARRAY_SIZE(gcc_sdm660_hws); i++) { + ret = devm_clk_hw_register(&pdev->dev, gcc_sdm660_hws[i]); + if (ret) + return ret; + } + + return qcom_cc_really_probe(pdev, &gcc_sdm660_desc, regmap); +} + +static struct platform_driver gcc_sdm660_driver = { + .probe = gcc_sdm660_probe, + .driver = { + .name = "gcc-sdm660", + .of_match_table = gcc_sdm660_match_table, + }, +}; + +static int __init gcc_sdm660_init(void) +{ + return platform_driver_register(&gcc_sdm660_driver); +} +core_initcall_sync(gcc_sdm660_init); + +static void __exit gcc_sdm660_exit(void) +{ + platform_driver_unregister(&gcc_sdm660_driver); +} +module_exit(gcc_sdm660_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("QCOM GCC sdm660 Driver"); diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index fa1a196350f1..f133b7f5652f 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -99,22 +99,6 @@ static const char * const gcc_parent_names_4[] = { "core_bi_pll_test_se", }; -static const struct parent_map gcc_parent_map_5[] = { - { P_BI_TCXO, 0 }, - { P_GPLL0_OUT_MAIN, 1 }, - { P_GPLL4_OUT_MAIN, 5 }, - { P_GPLL0_OUT_EVEN, 6 }, - { P_CORE_BI_PLL_TEST_SE, 7 }, -}; - -static const char * const gcc_parent_names_5[] = { - "bi_tcxo", - "gpll0", - "gpll4", - "gpll0_out_even", - "core_bi_pll_test_se", -}; - static const struct parent_map gcc_parent_map_6[] = { { P_BI_TCXO, 0 }, { P_GPLL0_OUT_MAIN, 1 }, @@ -356,6 +340,28 @@ static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = { }, }; +static const struct freq_tbl ftbl_gcc_qspi_core_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_qspi_core_clk_src = { + .cmd_rcgr = 0x4b008, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qspi_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qspi_core_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_floor_ops, + }, +}; + static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = { F(9600000, P_BI_TCXO, 2, 0, 0), F(19200000, P_BI_TCXO, 1, 0, 0), @@ -396,18 +402,27 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = { { } }; +static struct clk_init_data gcc_qupv3_wrap0_s0_clk_init = { + .name = "gcc_qupv3_wrap0_s0_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, +}; + static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { .cmd_rcgr = 0x17034, .mnd_width = 16, .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s0_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s1_clk_init = { + .name = "gcc_qupv3_wrap0_s1_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { @@ -416,12 +431,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s1_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s2_clk_init = { + .name = "gcc_qupv3_wrap0_s2_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { @@ -430,12 +447,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s2_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s3_clk_init = { + .name = "gcc_qupv3_wrap0_s3_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { @@ -444,12 +463,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s3_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s4_clk_init = { + .name = "gcc_qupv3_wrap0_s4_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { @@ -458,12 +479,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s4_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s5_clk_init = { + .name = "gcc_qupv3_wrap0_s5_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { @@ -472,12 +495,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s5_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s6_clk_init = { + .name = "gcc_qupv3_wrap0_s6_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = { @@ -486,12 +511,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s6_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s7_clk_init = { + .name = "gcc_qupv3_wrap0_s7_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = { @@ -500,12 +527,14 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap0_s7_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s0_clk_init = { + .name = "gcc_qupv3_wrap1_s0_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { @@ -514,12 +543,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s0_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s1_clk_init = { + .name = "gcc_qupv3_wrap1_s1_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { @@ -528,12 +559,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s1_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s2_clk_init = { + .name = "gcc_qupv3_wrap1_s2_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { @@ -542,12 +575,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s2_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s3_clk_init = { + .name = "gcc_qupv3_wrap1_s3_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { @@ -556,12 +591,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s3_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s4_clk_init = { + .name = "gcc_qupv3_wrap1_s4_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { @@ -570,12 +607,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s4_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s5_clk_init = { + .name = "gcc_qupv3_wrap1_s5_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { @@ -584,12 +623,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s5_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s6_clk_init = { + .name = "gcc_qupv3_wrap1_s6_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = { @@ -598,12 +639,14 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s6_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s7_clk_init = { + .name = "gcc_qupv3_wrap1_s7_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_shared_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = { @@ -612,12 +655,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_0, .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, - .clkr.hw.init = &(struct clk_init_data){ - .name = "gcc_qupv3_wrap1_s7_clk_src", - .parent_names = gcc_parent_names_0, - .num_parents = 4, - .ops = &clk_rcg2_shared_ops, - }, + .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_init, }; static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = { @@ -1933,6 +1971,37 @@ static struct clk_branch gcc_qmip_video_ahb_clk = { }, }; +static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = { + .halt_reg = 0x4b000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qspi_cnoc_periph_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qspi_core_clk = { + .halt_reg = 0x4b004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4b004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qspi_core_clk", + .parent_names = (const char *[]){ + "gcc_qspi_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_qupv3_wrap0_s0_clk = { .halt_reg = 0x17030, .halt_check = BRANCH_HALT_VOTED, @@ -3381,6 +3450,9 @@ static struct clk_regmap *gcc_sdm845_clocks[] = { [GPLL4] = &gpll4.clkr, [GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr, [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr, + [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr, + [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr, + [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr, }; static const struct qcom_reset_map gcc_sdm845_resets[] = { @@ -3458,9 +3530,29 @@ static const struct of_device_id gcc_sdm845_match_table[] = { }; MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table); +static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = { + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk), +}; + static int gcc_sdm845_probe(struct platform_device *pdev) { struct regmap *regmap; + int ret; regmap = qcom_cc_map(pdev, &gcc_sdm845_desc); if (IS_ERR(regmap)) @@ -3470,6 +3562,11 @@ static int gcc_sdm845_probe(struct platform_device *pdev) regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3); regmap_update_bits(regmap, 0x71028, 0x3, 0x3); + ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, + ARRAY_SIZE(gcc_dfs_clocks)); + if (ret) + return ret; + return qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap); } diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c new file mode 100644 index 000000000000..a6de7101430c --- /dev/null +++ b/drivers/clk/qcom/hfpll.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018, The Linux Foundation. All rights reserved. + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/regmap.h> + +#include "clk-regmap.h" +#include "clk-hfpll.h" + +static const struct hfpll_data hdata = { + .mode_reg = 0x00, + .l_reg = 0x04, + .m_reg = 0x08, + .n_reg = 0x0c, + .user_reg = 0x10, + .config_reg = 0x14, + .config_val = 0x430405d, + .status_reg = 0x1c, + .lock_bit = 16, + + .user_val = 0x8, + .user_vco_mask = 0x100000, + .low_vco_max_rate = 1248000000, + .min_rate = 537600000UL, + .max_rate = 2900000000UL, +}; + +static const struct of_device_id qcom_hfpll_match_table[] = { + { .compatible = "qcom,hfpll" }, + { } +}; +MODULE_DEVICE_TABLE(of, qcom_hfpll_match_table); + +static const struct regmap_config hfpll_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x30, + .fast_io = true, +}; + +static int qcom_hfpll_probe(struct platform_device *pdev) +{ + struct resource *res; + struct device *dev = &pdev->dev; + void __iomem *base; + struct regmap *regmap; + struct clk_hfpll *h; + struct clk_init_data init = { + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .ops = &clk_ops_hfpll, + }; + + h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL); + if (!h) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + regmap = devm_regmap_init_mmio(&pdev->dev, base, &hfpll_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + if (of_property_read_string_index(dev->of_node, "clock-output-names", + 0, &init.name)) + return -ENODEV; + + h->d = &hdata; + h->clkr.hw.init = &init; + spin_lock_init(&h->lock); + + return devm_clk_register_regmap(&pdev->dev, &h->clkr); +} + +static struct platform_driver qcom_hfpll_driver = { + .probe = qcom_hfpll_probe, + .driver = { + .name = "qcom-hfpll", + .of_match_table = qcom_hfpll_match_table, + }, +}; +module_platform_driver(qcom_hfpll_driver); + +MODULE_DESCRIPTION("QCOM HFPLL Clock Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:qcom-hfpll"); diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c new file mode 100644 index 000000000000..8590b5edd19d --- /dev/null +++ b/drivers/clk/qcom/kpss-xcc.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018, The Linux Foundation. All rights reserved. + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> + +static const char *aux_parents[] = { + "pll8_vote", + "pxo", +}; + +static unsigned int aux_parent_map[] = { + 3, + 0, +}; + +static const struct of_device_id kpss_xcc_match_table[] = { + { .compatible = "qcom,kpss-acc-v1", .data = (void *)1UL }, + { .compatible = "qcom,kpss-gcc" }, + {} +}; +MODULE_DEVICE_TABLE(of, kpss_xcc_match_table); + +static int kpss_xcc_driver_probe(struct platform_device *pdev) +{ + const struct of_device_id *id; + struct clk *clk; + struct resource *res; + void __iomem *base; + const char *name; + + id = of_match_device(kpss_xcc_match_table, &pdev->dev); + if (!id) + return -ENODEV; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + if (id->data) { + if (of_property_read_string_index(pdev->dev.of_node, + "clock-output-names", + 0, &name)) + return -ENODEV; + base += 0x14; + } else { + name = "acpu_l2_aux"; + base += 0x28; + } + + clk = clk_register_mux_table(&pdev->dev, name, aux_parents, + ARRAY_SIZE(aux_parents), 0, base, 0, 0x3, + 0, aux_parent_map, NULL); + + platform_set_drvdata(pdev, clk); + + return PTR_ERR_OR_ZERO(clk); +} + +static int kpss_xcc_driver_remove(struct platform_device *pdev) +{ + clk_unregister_mux(platform_get_drvdata(pdev)); + return 0; +} + +static struct platform_driver kpss_xcc_driver = { + .probe = kpss_xcc_driver_probe, + .remove = kpss_xcc_driver_remove, + .driver = { + .name = "kpss-xcc", + .of_match_table = kpss_xcc_match_table, + }, +}; +module_platform_driver(kpss_xcc_driver); + +MODULE_DESCRIPTION("Krait Processor Sub System (KPSS) Clock Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:kpss-xcc"); diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c new file mode 100644 index 000000000000..4d4b657d33c3 --- /dev/null +++ b/drivers/clk/qcom/krait-cc.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018, The Linux Foundation. All rights reserved. + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/slab.h> + +#include "clk-krait.h" + +static unsigned int sec_mux_map[] = { + 2, + 0, +}; + +static unsigned int pri_mux_map[] = { + 1, + 2, + 0, +}; + +/* + * Notifier function for switching the muxes to safe parent + * while the hfpll is getting reprogrammed. + */ +static int krait_notifier_cb(struct notifier_block *nb, + unsigned long event, + void *data) +{ + int ret = 0; + struct krait_mux_clk *mux = container_of(nb, struct krait_mux_clk, + clk_nb); + /* Switch to safe parent */ + if (event == PRE_RATE_CHANGE) { + mux->old_index = krait_mux_clk_ops.get_parent(&mux->hw); + ret = krait_mux_clk_ops.set_parent(&mux->hw, mux->safe_sel); + mux->reparent = false; + /* + * By the time POST_RATE_CHANGE notifier is called, + * clk framework itself would have changed the parent for the new rate. + * Only otherwise, put back to the old parent. + */ + } else if (event == POST_RATE_CHANGE) { + if (!mux->reparent) + ret = krait_mux_clk_ops.set_parent(&mux->hw, + mux->old_index); + } + + return notifier_from_errno(ret); +} + +static int krait_notifier_register(struct device *dev, struct clk *clk, + struct krait_mux_clk *mux) +{ + int ret = 0; + + mux->clk_nb.notifier_call = krait_notifier_cb; + ret = clk_notifier_register(clk, &mux->clk_nb); + if (ret) + dev_err(dev, "failed to register clock notifier: %d\n", ret); + + return ret; +} + +static int +krait_add_div(struct device *dev, int id, const char *s, unsigned int offset) +{ + struct krait_div2_clk *div; + struct clk_init_data init = { + .num_parents = 1, + .ops = &krait_div2_clk_ops, + .flags = CLK_SET_RATE_PARENT, + }; + const char *p_names[1]; + struct clk *clk; + + div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL); + if (!div) + return -ENOMEM; + + div->width = 2; + div->shift = 6; + div->lpl = id >= 0; + div->offset = offset; + div->hw.init = &init; + + init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s); + if (!init.name) + return -ENOMEM; + + init.parent_names = p_names; + p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s); + if (!p_names[0]) { + kfree(init.name); + return -ENOMEM; + } + + clk = devm_clk_register(dev, &div->hw); + kfree(p_names[0]); + kfree(init.name); + + return PTR_ERR_OR_ZERO(clk); +} + +static int +krait_add_sec_mux(struct device *dev, int id, const char *s, + unsigned int offset, bool unique_aux) +{ + int ret; + struct krait_mux_clk *mux; + static const char *sec_mux_list[] = { + "acpu_aux", + "qsb", + }; + struct clk_init_data init = { + .parent_names = sec_mux_list, + .num_parents = ARRAY_SIZE(sec_mux_list), + .ops = &krait_mux_clk_ops, + .flags = CLK_SET_RATE_PARENT, + }; + struct clk *clk; + + mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); + if (!mux) + return -ENOMEM; + + mux->offset = offset; + mux->lpl = id >= 0; + mux->mask = 0x3; + mux->shift = 2; + mux->parent_map = sec_mux_map; + mux->hw.init = &init; + mux->safe_sel = 0; + + init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s); + if (!init.name) + return -ENOMEM; + + if (unique_aux) { + sec_mux_list[0] = kasprintf(GFP_KERNEL, "acpu%s_aux", s); + if (!sec_mux_list[0]) { + clk = ERR_PTR(-ENOMEM); + goto err_aux; + } + } + + clk = devm_clk_register(dev, &mux->hw); + + ret = krait_notifier_register(dev, clk, mux); + if (ret) + goto unique_aux; + +unique_aux: + if (unique_aux) + kfree(sec_mux_list[0]); +err_aux: + kfree(init.name); + return PTR_ERR_OR_ZERO(clk); +} + +static struct clk * +krait_add_pri_mux(struct device *dev, int id, const char *s, + unsigned int offset) +{ + int ret; + struct krait_mux_clk *mux; + const char *p_names[3]; + struct clk_init_data init = { + .parent_names = p_names, + .num_parents = ARRAY_SIZE(p_names), + .ops = &krait_mux_clk_ops, + .flags = CLK_SET_RATE_PARENT, + }; + struct clk *clk; + + mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); + if (!mux) + return ERR_PTR(-ENOMEM); + + mux->mask = 0x3; + mux->shift = 0; + mux->offset = offset; + mux->lpl = id >= 0; + mux->parent_map = pri_mux_map; + mux->hw.init = &init; + mux->safe_sel = 2; + + init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s); + if (!init.name) + return ERR_PTR(-ENOMEM); + + p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s); + if (!p_names[0]) { + clk = ERR_PTR(-ENOMEM); + goto err_p0; + } + + p_names[1] = kasprintf(GFP_KERNEL, "hfpll%s_div", s); + if (!p_names[1]) { + clk = ERR_PTR(-ENOMEM); + goto err_p1; + } + + p_names[2] = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s); + if (!p_names[2]) { + clk = ERR_PTR(-ENOMEM); + goto err_p2; + } + + clk = devm_clk_register(dev, &mux->hw); + + ret = krait_notifier_register(dev, clk, mux); + if (ret) + goto err_p3; +err_p3: + kfree(p_names[2]); +err_p2: + kfree(p_names[1]); +err_p1: + kfree(p_names[0]); +err_p0: + kfree(init.name); + return clk; +} + +/* id < 0 for L2, otherwise id == physical CPU number */ +static struct clk *krait_add_clks(struct device *dev, int id, bool unique_aux) +{ + int ret; + unsigned int offset; + void *p = NULL; + const char *s; + struct clk *clk; + + if (id >= 0) { + offset = 0x4501 + (0x1000 * id); + s = p = kasprintf(GFP_KERNEL, "%d", id); + if (!s) + return ERR_PTR(-ENOMEM); + } else { + offset = 0x500; + s = "_l2"; + } + + ret = krait_add_div(dev, id, s, offset); + if (ret) { + clk = ERR_PTR(ret); + goto err; + } + + ret = krait_add_sec_mux(dev, id, s, offset, unique_aux); + if (ret) { + clk = ERR_PTR(ret); + goto err; + } + + clk = krait_add_pri_mux(dev, id, s, offset); +err: + kfree(p); + return clk; +} + +static struct clk *krait_of_get(struct of_phandle_args *clkspec, void *data) +{ + unsigned int idx = clkspec->args[0]; + struct clk **clks = data; + + if (idx >= 5) { + pr_err("%s: invalid clock index %d\n", __func__, idx); + return ERR_PTR(-EINVAL); + } + + return clks[idx] ? : ERR_PTR(-ENODEV); +} + +static const struct of_device_id krait_cc_match_table[] = { + { .compatible = "qcom,krait-cc-v1", (void *)1UL }, + { .compatible = "qcom,krait-cc-v2" }, + {} +}; +MODULE_DEVICE_TABLE(of, krait_cc_match_table); + +static int krait_cc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct of_device_id *id; + unsigned long cur_rate, aux_rate; + int cpu; + struct clk *clk; + struct clk **clks; + struct clk *l2_pri_mux_clk; + + id = of_match_device(krait_cc_match_table, dev); + if (!id) + return -ENODEV; + + /* Rate is 1 because 0 causes problems for __clk_mux_determine_rate */ + clk = clk_register_fixed_rate(dev, "qsb", NULL, 0, 1); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + if (!id->data) { + clk = clk_register_fixed_factor(dev, "acpu_aux", + "gpll0_vote", 0, 1, 2); + if (IS_ERR(clk)) + return PTR_ERR(clk); + } + + /* Krait configurations have at most 4 CPUs and one L2 */ + clks = devm_kcalloc(dev, 5, sizeof(*clks), GFP_KERNEL); + if (!clks) + return -ENOMEM; + + for_each_possible_cpu(cpu) { + clk = krait_add_clks(dev, cpu, id->data); + if (IS_ERR(clk)) + return PTR_ERR(clk); + clks[cpu] = clk; + } + + l2_pri_mux_clk = krait_add_clks(dev, -1, id->data); + if (IS_ERR(l2_pri_mux_clk)) + return PTR_ERR(l2_pri_mux_clk); + clks[4] = l2_pri_mux_clk; + + /* + * We don't want the CPU or L2 clocks to be turned off at late init + * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the + * refcount of these clocks. Any cpufreq/hotplug manager can assume + * that the clocks have already been prepared and enabled by the time + * they take over. + */ + for_each_online_cpu(cpu) { + clk_prepare_enable(l2_pri_mux_clk); + WARN(clk_prepare_enable(clks[cpu]), + "Unable to turn on CPU%d clock", cpu); + } + + /* + * Force reinit of HFPLLs and muxes to overwrite any potential + * incorrect configuration of HFPLLs and muxes by the bootloader. + * While at it, also make sure the cores are running at known rates + * and print the current rate. + * + * The clocks are set to aux clock rate first to make sure the + * secondary mux is not sourcing off of QSB. The rate is then set to + * two different rates to force a HFPLL reinit under all + * circumstances. + */ + cur_rate = clk_get_rate(l2_pri_mux_clk); + aux_rate = 384000000; + if (cur_rate == 1) { + pr_info("L2 @ QSB rate. Forcing new rate.\n"); + cur_rate = aux_rate; + } + clk_set_rate(l2_pri_mux_clk, aux_rate); + clk_set_rate(l2_pri_mux_clk, 2); + clk_set_rate(l2_pri_mux_clk, cur_rate); + pr_info("L2 @ %lu KHz\n", clk_get_rate(l2_pri_mux_clk) / 1000); + for_each_possible_cpu(cpu) { + clk = clks[cpu]; + cur_rate = clk_get_rate(clk); + if (cur_rate == 1) { + pr_info("CPU%d @ QSB rate. Forcing new rate.\n", cpu); + cur_rate = aux_rate; + } + + clk_set_rate(clk, aux_rate); + clk_set_rate(clk, 2); + clk_set_rate(clk, cur_rate); + pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000); + } + + of_clk_add_provider(dev->of_node, krait_of_get, clks); + + return 0; +} + +static struct platform_driver krait_cc_driver = { + .probe = krait_cc_probe, + .driver = { + .name = "krait-cc", + .of_match_table = krait_cc_match_table, + }, +}; +module_platform_driver(krait_cc_driver); + +MODULE_DESCRIPTION("Krait CPU Clock Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:krait-cc"); diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index 9022bbe1297e..b879e3e3a6b4 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -1,13 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 + config CLK_RENESAS bool "Renesas SoC clock support" if COMPILE_TEST && !ARCH_RENESAS default y if ARCH_RENESAS select CLK_EMEV2 if ARCH_EMEV2 select CLK_RZA1 if ARCH_R7S72100 + select CLK_R7S9210 if ARCH_R7S9210 select CLK_R8A73A4 if ARCH_R8A73A4 select CLK_R8A7740 if ARCH_R8A7740 - select CLK_R8A7743 if ARCH_R8A7743 + select CLK_R8A7743 if ARCH_R8A7743 || ARCH_R8A7744 select CLK_R8A7745 if ARCH_R8A7745 select CLK_R8A77470 if ARCH_R8A77470 + select CLK_R8A774A1 if ARCH_R8A774A1 + select CLK_R8A774C0 if ARCH_R8A774C0 select CLK_R8A7778 if ARCH_R8A7778 select CLK_R8A7779 if ARCH_R8A7779 select CLK_R8A7790 if ARCH_R8A7790 @@ -45,6 +50,10 @@ config CLK_RZA1 bool "RZ/A1H clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP +config CLK_R7S9210 + bool "RZ/A2 clock support" if COMPILE_TEST + select CLK_RENESAS_CPG_MSSR + config CLK_R8A73A4 bool "R-Mobile APE6 clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP @@ -67,6 +76,14 @@ config CLK_R8A77470 bool "RZ/G1C clock support" if COMPILE_TEST select CLK_RCAR_GEN2_CPG +config CLK_R8A774A1 + bool "RZ/G2M clock support" if COMPILE_TEST + select CLK_RCAR_GEN3_CPG + +config CLK_R8A774C0 + bool "RZ/G2E clock support" if COMPILE_TEST + select CLK_RCAR_GEN3_CPG + config CLK_R8A7778 bool "R-Car M1A clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile index e4aa3d6143d2..c793e3cc9452 100644 --- a/drivers/clk/renesas/Makefile +++ b/drivers/clk/renesas/Makefile @@ -2,11 +2,14 @@ # SoC obj-$(CONFIG_CLK_EMEV2) += clk-emev2.o obj-$(CONFIG_CLK_RZA1) += clk-rz.o +obj-$(CONFIG_CLK_R7S9210) += r7s9210-cpg-mssr.o obj-$(CONFIG_CLK_R8A73A4) += clk-r8a73a4.o obj-$(CONFIG_CLK_R8A7740) += clk-r8a7740.o obj-$(CONFIG_CLK_R8A7743) += r8a7743-cpg-mssr.o obj-$(CONFIG_CLK_R8A7745) += r8a7745-cpg-mssr.o obj-$(CONFIG_CLK_R8A77470) += r8a77470-cpg-mssr.o +obj-$(CONFIG_CLK_R8A774A1) += r8a774a1-cpg-mssr.o +obj-$(CONFIG_CLK_R8A774C0) += r8a774c0-cpg-mssr.o obj-$(CONFIG_CLK_R8A7778) += clk-r8a7778.o obj-$(CONFIG_CLK_R8A7779) += clk-r8a7779.o obj-$(CONFIG_CLK_R8A7790) += r8a7790-cpg-mssr.o diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c index 9febbf42c3df..57c934164306 100644 --- a/drivers/clk/renesas/clk-div6.c +++ b/drivers/clk/renesas/clk-div6.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a7790 Common Clock Framework support * * Copyright (C) 2013 Renesas Solutions Corp. * * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/clk-provider.h> @@ -312,8 +309,8 @@ static void __init cpg_div6_clock_init(struct device_node *np) num_parents = of_clk_get_parent_count(np); if (num_parents < 1) { - pr_err("%s: no parent found for %s DIV6 clock\n", - __func__, np->name); + pr_err("%s: no parent found for %pOFn DIV6 clock\n", + __func__, np); return; } @@ -324,8 +321,8 @@ static void __init cpg_div6_clock_init(struct device_node *np) reg = of_iomap(np, 0); if (reg == NULL) { - pr_err("%s: failed to map %s DIV6 clock register\n", - __func__, np->name); + pr_err("%s: failed to map %pOFn DIV6 clock register\n", + __func__, np); goto error; } @@ -337,8 +334,8 @@ static void __init cpg_div6_clock_init(struct device_node *np) clk = cpg_div6_register(clk_name, num_parents, parent_names, reg, NULL); if (IS_ERR(clk)) { - pr_err("%s: failed to register %s DIV6 clock (%ld)\n", - __func__, np->name, PTR_ERR(clk)); + pr_err("%s: failed to register %pOFn DIV6 clock (%ld)\n", + __func__, np, PTR_ERR(clk)); goto error; } diff --git a/drivers/clk/renesas/clk-emev2.c b/drivers/clk/renesas/clk-emev2.c index a91825471c79..7807b30a5bbb 100644 --- a/drivers/clk/renesas/clk-emev2.c +++ b/drivers/clk/renesas/clk-emev2.c @@ -1,21 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * EMMA Mobile EV2 common clock framework support * * Copyright (C) 2013 Takashi Yoshii <takashi.yoshii.ze@renesas.com> * Copyright (C) 2012 Magnus Damm - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/clk-provider.h> #include <linux/clkdev.h> @@ -86,8 +74,8 @@ static void __init emev2_smu_clkdiv_init(struct device_node *np) clk = clk_register_divider(NULL, np->name, parent_name, 0, smu_base + reg[0], reg[1], 8, 0, &lock); of_clk_add_provider(np, of_clk_src_simple_get, clk); - clk_register_clkdev(clk, np->name, NULL); - pr_debug("## %s %s %p\n", __func__, np->name, clk); + clk_register_clkdev(clk, np->full_name, NULL); + pr_debug("## %s %pOFn %p\n", __func__, np, clk); } CLK_OF_DECLARE(emev2_smu_clkdiv, "renesas,emev2-smu-clkdiv", emev2_smu_clkdiv_init); @@ -104,7 +92,7 @@ static void __init emev2_smu_gclk_init(struct device_node *np) clk = clk_register_gate(NULL, np->name, parent_name, 0, smu_base + reg[0], reg[1], 0, &lock); of_clk_add_provider(np, of_clk_src_simple_get, clk); - clk_register_clkdev(clk, np->name, NULL); - pr_debug("## %s %s %p\n", __func__, np->name, clk); + clk_register_clkdev(clk, np->full_name, NULL); + pr_debug("## %s %pOFn %p\n", __func__, np, clk); } CLK_OF_DECLARE(emev2_smu_gclk, "renesas,emev2-smu-gclk", emev2_smu_gclk_init); diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c index e82adcb16a52..1c1768c2cc82 100644 --- a/drivers/clk/renesas/clk-mstp.c +++ b/drivers/clk/renesas/clk-mstp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * R-Car MSTP clocks * @@ -5,10 +6,6 @@ * Copyright (C) 2015 Glider bvba * * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/clk.h> @@ -239,8 +236,8 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) break; if (clkidx >= MSTP_MAX_CLOCKS) { - pr_err("%s: invalid clock %s %s index %u\n", - __func__, np->name, name, clkidx); + pr_err("%s: invalid clock %pOFn %s index %u\n", + __func__, np, name, clkidx); continue; } @@ -259,8 +256,8 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) */ clk_register_clkdev(clks[clkidx], name, NULL); } else { - pr_err("%s: failed to register %s %s clock (%ld)\n", - __func__, np->name, name, PTR_ERR(clks[clkidx])); + pr_err("%s: failed to register %pOFn %s clock (%ld)\n", + __func__, np, name, PTR_ERR(clks[clkidx])); } } diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c index 7b903ce4c901..2719c248c67b 100644 --- a/drivers/clk/renesas/clk-r8a73a4.c +++ b/drivers/clk/renesas/clk-r8a73a4.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a73a4 Core CPG Clocks * * Copyright (C) 2014 Ulrich Hecht - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/clk-provider.h> @@ -228,8 +225,8 @@ static void __init r8a73a4_cpg_clocks_init(struct device_node *np) clk = r8a73a4_cpg_register_clock(np, cpg, name); if (IS_ERR(clk)) - pr_err("%s: failed to register %s %s clock (%ld)\n", - __func__, np->name, name, PTR_ERR(clk)); + pr_err("%s: failed to register %pOFn %s clock (%ld)\n", + __func__, np, name, PTR_ERR(clk)); else cpg->data.clks[i] = clk; } diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c index a7a30d2eca41..5967656c13cc 100644 --- a/drivers/clk/renesas/clk-r8a7740.c +++ b/drivers/clk/renesas/clk-r8a7740.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a7740 Core CPG Clocks * * Copyright (C) 2014 Ulrich Hecht - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/clk-provider.h> @@ -187,8 +184,8 @@ static void __init r8a7740_cpg_clocks_init(struct device_node *np) clk = r8a7740_cpg_register_clock(np, cpg, name); if (IS_ERR(clk)) - pr_err("%s: failed to register %s %s clock (%ld)\n", - __func__, np->name, name, PTR_ERR(clk)); + pr_err("%s: failed to register %pOFn %s clock (%ld)\n", + __func__, np, name, PTR_ERR(clk)); else cpg->data.clks[i] = clk; } diff --git a/drivers/clk/renesas/clk-r8a7778.c b/drivers/clk/renesas/clk-r8a7778.c index 886a8380e912..3ccc53685bdd 100644 --- a/drivers/clk/renesas/clk-r8a7778.c +++ b/drivers/clk/renesas/clk-r8a7778.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a7778 Core CPG Clocks * * Copyright (C) 2014 Ulrich Hecht - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/clk-provider.h> @@ -130,8 +127,8 @@ static void __init r8a7778_cpg_clocks_init(struct device_node *np) clk = r8a7778_cpg_register_clock(np, cpg, name); if (IS_ERR(clk)) - pr_err("%s: failed to register %s %s clock (%ld)\n", - __func__, np->name, name, PTR_ERR(clk)); + pr_err("%s: failed to register %pOFn %s clock (%ld)\n", + __func__, np, name, PTR_ERR(clk)); else cpg->data.clks[i] = clk; } diff --git a/drivers/clk/renesas/clk-r8a7779.c b/drivers/clk/renesas/clk-r8a7779.c index 5adcca4656c3..9f3b5522eef5 100644 --- a/drivers/clk/renesas/clk-r8a7779.c +++ b/drivers/clk/renesas/clk-r8a7779.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a7779 Core CPG Clocks * * Copyright (C) 2013, 2014 Horms Solutions Ltd. * * Contact: Simon Horman <horms@verge.net.au> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/clk-provider.h> @@ -164,8 +161,8 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np) clk = r8a7779_cpg_register_clock(np, cpg, config, plla_mult, name); if (IS_ERR(clk)) - pr_err("%s: failed to register %s %s clock (%ld)\n", - __func__, np->name, name, PTR_ERR(clk)); + pr_err("%s: failed to register %pOFn %s clock (%ld)\n", + __func__, np, name, PTR_ERR(clk)); else cpg->data.clks[i] = clk; } diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c index bccd62f2cb09..2913b4148157 100644 --- a/drivers/clk/renesas/clk-rcar-gen2.c +++ b/drivers/clk/renesas/clk-rcar-gen2.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * rcar_gen2 Core CPG Clocks * * Copyright (C) 2013 Ideas On Board SPRL * * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/clk-provider.h> @@ -445,8 +442,8 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np) clk = rcar_gen2_cpg_register_clock(np, cpg, config, name); if (IS_ERR(clk)) - pr_err("%s: failed to register %s %s clock (%ld)\n", - __func__, np->name, name, PTR_ERR(clk)); + pr_err("%s: failed to register %pOFn %s clock (%ld)\n", + __func__, np, name, PTR_ERR(clk)); else cpg->data.clks[i] = clk; } diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c index ac2f86d626b6..3cda53a97f4e 100644 --- a/drivers/clk/renesas/clk-rz.c +++ b/drivers/clk/renesas/clk-rz.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * RZ/A1 Core CPG Clocks * * Copyright (C) 2013 Ideas On Board SPRL * Copyright (C) 2014 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/clk-provider.h> @@ -113,8 +110,8 @@ static void __init rz_cpg_clocks_init(struct device_node *np) clk = rz_cpg_register_clock(np, cpg, name); if (IS_ERR(clk)) - pr_err("%s: failed to register %s %s clock (%ld)\n", - __func__, np->name, name, PTR_ERR(clk)); + pr_err("%s: failed to register %pOFn %s clock (%ld)\n", + __func__, np, name, PTR_ERR(clk)); else cpg->data.clks[i] = clk; } diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c index bab33610eb6c..dc8ffc7c727a 100644 --- a/drivers/clk/renesas/clk-sh73a0.c +++ b/drivers/clk/renesas/clk-sh73a0.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * sh73a0 Core CPG Clocks * * Copyright (C) 2014 Ulrich Hecht - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/clk-provider.h> @@ -206,8 +203,8 @@ static void __init sh73a0_cpg_clocks_init(struct device_node *np) clk = sh73a0_cpg_register_clock(np, cpg, name); if (IS_ERR(clk)) - pr_err("%s: failed to register %s %s clock (%ld)\n", - __func__, np->name, name, PTR_ERR(clk)); + pr_err("%s: failed to register %pOFn %s clock (%ld)\n", + __func__, np, name, PTR_ERR(clk)); else cpg->data.clks[i] = clk; } diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c new file mode 100644 index 000000000000..5135f13ec628 --- /dev/null +++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * R7S9210 Clock Pulse Generator / Module Standby + * + * Based on r8a7795-cpg-mssr.c + * + * Copyright (C) 2018 Chris Brandt + * Copyright (C) 2018 Renesas Electronics Corp. + * + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <dt-bindings/clock/r7s9210-cpg-mssr.h> +#include "renesas-cpg-mssr.h" + +#define CPG_FRQCR 0x00 + +static u8 cpg_mode; + +/* Internal Clock ratio table */ +static const struct { + unsigned int i; + unsigned int g; + unsigned int b; + unsigned int p1; + /* p0 is always 32 */; +} ratio_tab[5] = { /* I, G, B, P1 */ + { 2, 4, 8, 16}, /* FRQCR = 0x012 */ + { 4, 4, 8, 16}, /* FRQCR = 0x112 */ + { 8, 4, 8, 16}, /* FRQCR = 0x212 */ + { 16, 8, 16, 16}, /* FRQCR = 0x322 */ + { 16, 16, 32, 32}, /* FRQCR = 0x333 */ + }; + +enum rz_clk_types { + CLK_TYPE_RZA_MAIN = CLK_TYPE_CUSTOM, + CLK_TYPE_RZA_PLL, +}; + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R7S9210_CLK_P0, + + /* External Input Clocks */ + CLK_EXTAL, + + /* Internal Core Clocks */ + CLK_MAIN, + CLK_PLL, + + /* Module Clocks */ + MOD_CLK_BASE +}; + +static struct cpg_core_clk r7s9210_early_core_clks[] = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + + /* Internal Core Clocks */ + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_RZA_MAIN, CLK_EXTAL), + DEF_BASE(".pll", CLK_PLL, CLK_TYPE_RZA_PLL, CLK_MAIN), + + /* Core Clock Outputs */ + DEF_FIXED("p1c", R7S9210_CLK_P1C, CLK_PLL, 16, 1), +}; + +static const struct mssr_mod_clk r7s9210_early_mod_clks[] __initconst = { + DEF_MOD_STB("ostm2", 34, R7S9210_CLK_P1C), + DEF_MOD_STB("ostm1", 35, R7S9210_CLK_P1C), + DEF_MOD_STB("ostm0", 36, R7S9210_CLK_P1C), +}; + +static struct cpg_core_clk r7s9210_core_clks[] = { + /* Core Clock Outputs */ + DEF_FIXED("i", R7S9210_CLK_I, CLK_PLL, 2, 1), + DEF_FIXED("g", R7S9210_CLK_G, CLK_PLL, 4, 1), + DEF_FIXED("b", R7S9210_CLK_B, CLK_PLL, 8, 1), + DEF_FIXED("p1", R7S9210_CLK_P1, CLK_PLL, 16, 1), + DEF_FIXED("p0", R7S9210_CLK_P0, CLK_PLL, 32, 1), +}; + +static const struct mssr_mod_clk r7s9210_mod_clks[] __initconst = { + DEF_MOD_STB("scif4", 43, R7S9210_CLK_P1C), + DEF_MOD_STB("scif3", 44, R7S9210_CLK_P1C), + DEF_MOD_STB("scif2", 45, R7S9210_CLK_P1C), + DEF_MOD_STB("scif1", 46, R7S9210_CLK_P1C), + DEF_MOD_STB("scif0", 47, R7S9210_CLK_P1C), + + DEF_MOD_STB("ether1", 64, R7S9210_CLK_B), + DEF_MOD_STB("ether0", 65, R7S9210_CLK_B), + + DEF_MOD_STB("i2c3", 84, R7S9210_CLK_P1), + DEF_MOD_STB("i2c2", 85, R7S9210_CLK_P1), + DEF_MOD_STB("i2c1", 86, R7S9210_CLK_P1), + DEF_MOD_STB("i2c0", 87, R7S9210_CLK_P1), + + DEF_MOD_STB("spi2", 95, R7S9210_CLK_P1), + DEF_MOD_STB("spi1", 96, R7S9210_CLK_P1), + DEF_MOD_STB("spi0", 97, R7S9210_CLK_P1), +}; + +/* The clock dividers in the table vary based on DT and register settings */ +static void __init r7s9210_update_clk_table(struct clk *extal_clk, + void __iomem *base) +{ + int i; + u16 frqcr; + u8 index; + + /* If EXTAL is above 12MHz, then we know it is Mode 1 */ + if (clk_get_rate(extal_clk) > 12000000) + cpg_mode = 1; + + frqcr = clk_readl(base + CPG_FRQCR) & 0xFFF; + if (frqcr == 0x012) + index = 0; + else if (frqcr == 0x112) + index = 1; + else if (frqcr == 0x212) + index = 2; + else if (frqcr == 0x322) + index = 3; + else if (frqcr == 0x333) + index = 4; + else + BUG_ON(1); /* Illegal FRQCR value */ + + for (i = 0; i < ARRAY_SIZE(r7s9210_core_clks); i++) { + switch (r7s9210_core_clks[i].id) { + case R7S9210_CLK_I: + r7s9210_core_clks[i].div = ratio_tab[index].i; + break; + case R7S9210_CLK_G: + r7s9210_core_clks[i].div = ratio_tab[index].g; + break; + case R7S9210_CLK_B: + r7s9210_core_clks[i].div = ratio_tab[index].b; + break; + case R7S9210_CLK_P1: + case R7S9210_CLK_P1C: + r7s9210_core_clks[i].div = ratio_tab[index].p1; + break; + case R7S9210_CLK_P0: + r7s9210_core_clks[i].div = 32; + break; + } + } +} + +struct clk * __init rza2_cpg_clk_register(struct device *dev, + const struct cpg_core_clk *core, const struct cpg_mssr_info *info, + struct clk **clks, void __iomem *base, + struct raw_notifier_head *notifiers) +{ + struct clk *parent; + unsigned int mult = 1; + unsigned int div = 1; + + parent = clks[core->parent]; + if (IS_ERR(parent)) + return ERR_CAST(parent); + + switch (core->id) { + case CLK_MAIN: + break; + + case CLK_PLL: + if (cpg_mode) + mult = 44; /* Divider 1 is 1/2 */ + else + mult = 88; /* Divider 1 is 1 */ + break; + + default: + return ERR_PTR(-EINVAL); + } + + if (core->id == CLK_MAIN) + r7s9210_update_clk_table(parent, base); + + return clk_register_fixed_factor(NULL, core->name, + __clk_get_name(parent), 0, mult, div); +} + +const struct cpg_mssr_info r7s9210_cpg_mssr_info __initconst = { + /* Early Clocks */ + .early_core_clks = r7s9210_early_core_clks, + .num_early_core_clks = ARRAY_SIZE(r7s9210_early_core_clks), + .early_mod_clks = r7s9210_early_mod_clks, + .num_early_mod_clks = ARRAY_SIZE(r7s9210_early_mod_clks), + + /* Core Clocks */ + .core_clks = r7s9210_core_clks, + .num_core_clks = ARRAY_SIZE(r7s9210_core_clks), + .last_dt_core_clk = LAST_DT_CORE_CLK, + .num_total_core_clks = MOD_CLK_BASE, + + /* Module Clocks */ + .mod_clks = r7s9210_mod_clks, + .num_mod_clks = ARRAY_SIZE(r7s9210_mod_clks), + .num_hw_mod_clks = 11 * 32, /* includes STBCR0 which doesn't exist */ + + /* Callbacks */ + .cpg_clk_register = rza2_cpg_clk_register, + + /* RZ/A2 has Standby Control Registers */ + .stbyctrl = true, +}; + +static void __init r7s9210_cpg_mssr_early_init(struct device_node *np) +{ + cpg_mssr_early_init(np, &r7s9210_cpg_mssr_info); +} + +CLK_OF_DECLARE_DRIVER(cpg_mstp_clks, "renesas,r7s9210-cpg-mssr", + r7s9210_cpg_mssr_early_init); diff --git a/drivers/clk/renesas/r8a7743-cpg-mssr.c b/drivers/clk/renesas/r8a7743-cpg-mssr.c index 011c170ec3f9..c01d9af2525a 100644 --- a/drivers/clk/renesas/r8a7743-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7743-cpg-mssr.c @@ -1,16 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a7743 Clock Pulse Generator / Module Standby and Software Reset * * Copyright (C) 2016 Cogent Embedded Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation; of the License. */ #include <linux/device.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/of.h> #include <linux/soc/renesas/rcar-rst.h> #include <dt-bindings/clock/r8a7743-cpg-mssr.h> @@ -37,7 +35,7 @@ enum clk_ids { MOD_CLK_BASE }; -static const struct cpg_core_clk r8a7743_core_clks[] __initconst = { +static struct cpg_core_clk r8a7743_core_clks[] __initdata = { /* External Clock Inputs */ DEF_INPUT("extal", CLK_EXTAL), DEF_INPUT("usb_extal", CLK_USB_EXTAL), @@ -238,6 +236,8 @@ static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = { static int __init r8a7743_cpg_mssr_init(struct device *dev) { const struct rcar_gen2_cpg_pll_config *cpg_pll_config; + struct device_node *np = dev->of_node; + unsigned int i; u32 cpg_mode; int error; @@ -247,6 +247,14 @@ static int __init r8a7743_cpg_mssr_init(struct device *dev) cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; + if (of_device_is_compatible(np, "renesas,r8a7744-cpg-mssr")) { + /* RZ/G1N uses a 1/5 divider for ZG */ + for (i = 0; i < ARRAY_SIZE(r8a7743_core_clks); i++) + if (r8a7743_core_clks[i].id == R8A7743_CLK_ZG) { + r8a7743_core_clks[i].div = 5; + break; + } + } return rcar_gen2_cpg_init(cpg_pll_config, 2, cpg_mode); } diff --git a/drivers/clk/renesas/r8a7745-cpg-mssr.c b/drivers/clk/renesas/r8a7745-cpg-mssr.c index 4b0a9243b748..493874e5ebee 100644 --- a/drivers/clk/renesas/r8a7745-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7745-cpg-mssr.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a7745 Clock Pulse Generator / Module Standby and Software Reset * * Copyright (C) 2016 Cogent Embedded Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation; of the License. */ #include <linux/device.h> diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c new file mode 100644 index 000000000000..b0da34217bdf --- /dev/null +++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * r8a774a1 Clock Pulse Generator / Module Standby and Software Reset + * + * Copyright (C) 2018 Renesas Electronics Corp. + * + * Based on r8a7796-cpg-mssr.c + * + * Copyright (C) 2016 Glider bvba + */ + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/soc/renesas/rcar-rst.h> + +#include <dt-bindings/clock/r8a774a1-cpg-mssr.h> + +#include "renesas-cpg-mssr.h" +#include "rcar-gen3-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R8A774A1_CLK_OSC, + + /* External Input Clocks */ + CLK_EXTAL, + CLK_EXTALR, + + /* Internal Core Clocks */ + CLK_MAIN, + CLK_PLL0, + CLK_PLL1, + CLK_PLL2, + CLK_PLL3, + CLK_PLL4, + CLK_PLL1_DIV2, + CLK_PLL1_DIV4, + CLK_S0, + CLK_S1, + CLK_S2, + CLK_S3, + CLK_SDSRC, + CLK_RINT, + + /* Module Clocks */ + MOD_CLK_BASE +}; + +static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + DEF_INPUT("extalr", CLK_EXTALR), + + /* Internal Core Clocks */ + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL), + DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN), + DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN), + DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN3_PLL2, CLK_MAIN), + DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN), + DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN), + + DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), + DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1), + DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), + DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), + DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + + DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), + + /* Core Clock Outputs */ + DEF_BASE("z", R8A774A1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0), + DEF_BASE("z2", R8A774A1_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2), + DEF_FIXED("ztr", R8A774A1_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), + DEF_FIXED("ztrd2", R8A774A1_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), + DEF_FIXED("zt", R8A774A1_CLK_ZT, CLK_PLL1_DIV2, 4, 1), + DEF_FIXED("zx", R8A774A1_CLK_ZX, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED("s0d1", R8A774A1_CLK_S0D1, CLK_S0, 1, 1), + DEF_FIXED("s0d2", R8A774A1_CLK_S0D2, CLK_S0, 2, 1), + DEF_FIXED("s0d3", R8A774A1_CLK_S0D3, CLK_S0, 3, 1), + DEF_FIXED("s0d4", R8A774A1_CLK_S0D4, CLK_S0, 4, 1), + DEF_FIXED("s0d6", R8A774A1_CLK_S0D6, CLK_S0, 6, 1), + DEF_FIXED("s0d8", R8A774A1_CLK_S0D8, CLK_S0, 8, 1), + DEF_FIXED("s0d12", R8A774A1_CLK_S0D12, CLK_S0, 12, 1), + DEF_FIXED("s1d2", R8A774A1_CLK_S1D2, CLK_S1, 2, 1), + DEF_FIXED("s1d4", R8A774A1_CLK_S1D4, CLK_S1, 4, 1), + DEF_FIXED("s2d1", R8A774A1_CLK_S2D1, CLK_S2, 1, 1), + DEF_FIXED("s2d2", R8A774A1_CLK_S2D2, CLK_S2, 2, 1), + DEF_FIXED("s2d4", R8A774A1_CLK_S2D4, CLK_S2, 4, 1), + DEF_FIXED("s3d1", R8A774A1_CLK_S3D1, CLK_S3, 1, 1), + DEF_FIXED("s3d2", R8A774A1_CLK_S3D2, CLK_S3, 2, 1), + DEF_FIXED("s3d4", R8A774A1_CLK_S3D4, CLK_S3, 4, 1), + + DEF_GEN3_SD("sd0", R8A774A1_CLK_SD0, CLK_SDSRC, 0x074), + DEF_GEN3_SD("sd1", R8A774A1_CLK_SD1, CLK_SDSRC, 0x078), + DEF_GEN3_SD("sd2", R8A774A1_CLK_SD2, CLK_SDSRC, 0x268), + DEF_GEN3_SD("sd3", R8A774A1_CLK_SD3, CLK_SDSRC, 0x26c), + + DEF_FIXED("cl", R8A774A1_CLK_CL, CLK_PLL1_DIV2, 48, 1), + DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1), + + DEF_DIV6P1("csi0", R8A774A1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c), + DEF_DIV6P1("mso", R8A774A1_CLK_MSO, CLK_PLL1_DIV4, 0x014), + DEF_DIV6P1("hdmi", R8A774A1_CLK_HDMI, CLK_PLL1_DIV4, 0x250), + + DEF_GEN3_OSC("osc", R8A774A1_CLK_OSC, CLK_EXTAL, 8), + + DEF_BASE("r", R8A774A1_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT), +}; + +static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = { + DEF_MOD("fdp1-0", 119, R8A774A1_CLK_S0D1), + DEF_MOD("scif5", 202, R8A774A1_CLK_S3D4), + DEF_MOD("scif4", 203, R8A774A1_CLK_S3D4), + DEF_MOD("scif3", 204, R8A774A1_CLK_S3D4), + DEF_MOD("scif1", 206, R8A774A1_CLK_S3D4), + DEF_MOD("scif0", 207, R8A774A1_CLK_S3D4), + DEF_MOD("msiof3", 208, R8A774A1_CLK_MSO), + DEF_MOD("msiof2", 209, R8A774A1_CLK_MSO), + DEF_MOD("msiof1", 210, R8A774A1_CLK_MSO), + DEF_MOD("msiof0", 211, R8A774A1_CLK_MSO), + DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S0D3), + DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S0D3), + DEF_MOD("sys-dmac0", 219, R8A774A1_CLK_S0D3), + DEF_MOD("cmt3", 300, R8A774A1_CLK_R), + DEF_MOD("cmt2", 301, R8A774A1_CLK_R), + DEF_MOD("cmt1", 302, R8A774A1_CLK_R), + DEF_MOD("cmt0", 303, R8A774A1_CLK_R), + DEF_MOD("scif2", 310, R8A774A1_CLK_S3D4), + DEF_MOD("sdif3", 311, R8A774A1_CLK_SD3), + DEF_MOD("sdif2", 312, R8A774A1_CLK_SD2), + DEF_MOD("sdif1", 313, R8A774A1_CLK_SD1), + DEF_MOD("sdif0", 314, R8A774A1_CLK_SD0), + DEF_MOD("pcie1", 318, R8A774A1_CLK_S3D1), + DEF_MOD("pcie0", 319, R8A774A1_CLK_S3D1), + DEF_MOD("usb3-if0", 328, R8A774A1_CLK_S3D1), + DEF_MOD("usb-dmac0", 330, R8A774A1_CLK_S3D1), + DEF_MOD("usb-dmac1", 331, R8A774A1_CLK_S3D1), + DEF_MOD("rwdt", 402, R8A774A1_CLK_R), + DEF_MOD("intc-ex", 407, R8A774A1_CLK_CP), + DEF_MOD("intc-ap", 408, R8A774A1_CLK_S0D3), + DEF_MOD("audmac1", 501, R8A774A1_CLK_S0D3), + DEF_MOD("audmac0", 502, R8A774A1_CLK_S0D3), + DEF_MOD("hscif4", 516, R8A774A1_CLK_S3D1), + DEF_MOD("hscif3", 517, R8A774A1_CLK_S3D1), + DEF_MOD("hscif2", 518, R8A774A1_CLK_S3D1), + DEF_MOD("hscif1", 519, R8A774A1_CLK_S3D1), + DEF_MOD("hscif0", 520, R8A774A1_CLK_S3D1), + DEF_MOD("thermal", 522, R8A774A1_CLK_CP), + DEF_MOD("pwm", 523, R8A774A1_CLK_S0D12), + DEF_MOD("fcpvd2", 601, R8A774A1_CLK_S0D2), + DEF_MOD("fcpvd1", 602, R8A774A1_CLK_S0D2), + DEF_MOD("fcpvd0", 603, R8A774A1_CLK_S0D2), + DEF_MOD("fcpvb0", 607, R8A774A1_CLK_S0D1), + DEF_MOD("fcpvi0", 611, R8A774A1_CLK_S0D1), + DEF_MOD("fcpf0", 615, R8A774A1_CLK_S0D1), + DEF_MOD("fcpci0", 617, R8A774A1_CLK_S0D2), + DEF_MOD("fcpcs", 619, R8A774A1_CLK_S0D2), + DEF_MOD("vspd2", 621, R8A774A1_CLK_S0D2), + DEF_MOD("vspd1", 622, R8A774A1_CLK_S0D2), + DEF_MOD("vspd0", 623, R8A774A1_CLK_S0D2), + DEF_MOD("vspb", 626, R8A774A1_CLK_S0D1), + DEF_MOD("vspi0", 631, R8A774A1_CLK_S0D1), + DEF_MOD("ehci1", 702, R8A774A1_CLK_S3D4), + DEF_MOD("ehci0", 703, R8A774A1_CLK_S3D4), + DEF_MOD("hsusb", 704, R8A774A1_CLK_S3D4), + DEF_MOD("csi20", 714, R8A774A1_CLK_CSI0), + DEF_MOD("csi40", 716, R8A774A1_CLK_CSI0), + DEF_MOD("du2", 722, R8A774A1_CLK_S2D1), + DEF_MOD("du1", 723, R8A774A1_CLK_S2D1), + DEF_MOD("du0", 724, R8A774A1_CLK_S2D1), + DEF_MOD("lvds", 727, R8A774A1_CLK_S2D1), + DEF_MOD("hdmi0", 729, R8A774A1_CLK_HDMI), + DEF_MOD("vin7", 804, R8A774A1_CLK_S0D2), + DEF_MOD("vin6", 805, R8A774A1_CLK_S0D2), + DEF_MOD("vin5", 806, R8A774A1_CLK_S0D2), + DEF_MOD("vin4", 807, R8A774A1_CLK_S0D2), + DEF_MOD("vin3", 808, R8A774A1_CLK_S0D2), + DEF_MOD("vin2", 809, R8A774A1_CLK_S0D2), + DEF_MOD("vin1", 810, R8A774A1_CLK_S0D2), + DEF_MOD("vin0", 811, R8A774A1_CLK_S0D2), + DEF_MOD("etheravb", 812, R8A774A1_CLK_S0D6), + DEF_MOD("gpio7", 905, R8A774A1_CLK_S3D4), + DEF_MOD("gpio6", 906, R8A774A1_CLK_S3D4), + DEF_MOD("gpio5", 907, R8A774A1_CLK_S3D4), + DEF_MOD("gpio4", 908, R8A774A1_CLK_S3D4), + DEF_MOD("gpio3", 909, R8A774A1_CLK_S3D4), + DEF_MOD("gpio2", 910, R8A774A1_CLK_S3D4), + DEF_MOD("gpio1", 911, R8A774A1_CLK_S3D4), + DEF_MOD("gpio0", 912, R8A774A1_CLK_S3D4), + DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4), + DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4), + DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6), + DEF_MOD("i2c5", 919, R8A774A1_CLK_S0D6), + DEF_MOD("i2c-dvfs", 926, R8A774A1_CLK_CP), + DEF_MOD("i2c4", 927, R8A774A1_CLK_S0D6), + DEF_MOD("i2c3", 928, R8A774A1_CLK_S0D6), + DEF_MOD("i2c2", 929, R8A774A1_CLK_S3D2), + DEF_MOD("i2c1", 930, R8A774A1_CLK_S3D2), + DEF_MOD("i2c0", 931, R8A774A1_CLK_S3D2), + DEF_MOD("ssi-all", 1005, R8A774A1_CLK_S3D4), + DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)), + DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)), + DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)), + DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)), + DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)), + DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)), + DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)), + DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)), + DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)), + DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)), + DEF_MOD("scu-all", 1017, R8A774A1_CLK_S3D4), + DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)), + DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)), + DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)), + DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)), + DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)), + DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)), + DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)), + DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)), + DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)), + DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)), + DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)), + DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)), +}; + +static const unsigned int r8a774a1_crit_mod_clks[] __initconst = { + MOD_CLK_ID(408), /* INTC-AP (GIC) */ +}; + +/* + * CPG Clock Data + */ + +/* + * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4 OSC + * 14 13 19 17 (MHz) + *------------------------------------------------------------------------- + * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144 /16 + * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144 /16 + * 0 0 1 0 Prohibited setting + * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144 /16 + * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120 /19 + * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120 /19 + * 0 1 1 0 Prohibited setting + * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120 /19 + * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96 /24 + * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96 /24 + * 1 0 1 0 Prohibited setting + * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96 /24 + * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144 /32 + * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144 /32 + * 1 1 1 0 Prohibited setting + * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144 /32 + */ +#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \ + (((md) & BIT(13)) >> 11) | \ + (((md) & BIT(19)) >> 18) | \ + (((md) & BIT(17)) >> 17)) + +static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = { + /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */ + { 1, 192, 1, 192, 1, 16, }, + { 1, 192, 1, 128, 1, 16, }, + { 0, /* Prohibited setting */ }, + { 1, 192, 1, 192, 1, 16, }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 160, 1, 106, 1, 19, }, + { 0, /* Prohibited setting */ }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 128, 1, 128, 1, 24, }, + { 1, 128, 1, 84, 1, 24, }, + { 0, /* Prohibited setting */ }, + { 1, 128, 1, 128, 1, 24, }, + { 2, 192, 1, 192, 1, 32, }, + { 2, 192, 1, 128, 1, 32, }, + { 0, /* Prohibited setting */ }, + { 2, 192, 1, 192, 1, 32, }, +}; + +static int __init r8a774a1_cpg_mssr_init(struct device *dev) +{ + const struct rcar_gen3_cpg_pll_config *cpg_pll_config; + u32 cpg_mode; + int error; + + error = rcar_rst_read_mode_pins(&cpg_mode); + if (error) + return error; + + cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; + if (!cpg_pll_config->extal_div) { + dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode); + return -EINVAL; + } + + return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode); +} + +const struct cpg_mssr_info r8a774a1_cpg_mssr_info __initconst = { + /* Core Clocks */ + .core_clks = r8a774a1_core_clks, + .num_core_clks = ARRAY_SIZE(r8a774a1_core_clks), + .last_dt_core_clk = LAST_DT_CORE_CLK, + .num_total_core_clks = MOD_CLK_BASE, + + /* Module Clocks */ + .mod_clks = r8a774a1_mod_clks, + .num_mod_clks = ARRAY_SIZE(r8a774a1_mod_clks), + .num_hw_mod_clks = 12 * 32, + + /* Critical Module Clocks */ + .crit_mod_clks = r8a774a1_crit_mod_clks, + .num_crit_mod_clks = ARRAY_SIZE(r8a774a1_crit_mod_clks), + + /* Callbacks */ + .init = r8a774a1_cpg_mssr_init, + .cpg_clk_register = rcar_gen3_cpg_clk_register, +}; diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c new file mode 100644 index 000000000000..10b96895d452 --- /dev/null +++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c @@ -0,0 +1,286 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * r8a774c0 Clock Pulse Generator / Module Standby and Software Reset + * + * Copyright (C) 2018 Renesas Electronics Corp. + * + * Based on r8a77990-cpg-mssr.c + * + * Copyright (C) 2015 Glider bvba + * Copyright (C) 2015 Renesas Electronics Corp. + */ + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/soc/renesas/rcar-rst.h> + +#include <dt-bindings/clock/r8a774c0-cpg-mssr.h> + +#include "renesas-cpg-mssr.h" +#include "rcar-gen3-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R8A774C0_CLK_CPEX, + + /* External Input Clocks */ + CLK_EXTAL, + + /* Internal Core Clocks */ + CLK_MAIN, + CLK_PLL0, + CLK_PLL1, + CLK_PLL3, + CLK_PLL0D4, + CLK_PLL0D8, + CLK_PLL0D20, + CLK_PLL0D24, + CLK_PLL1D2, + CLK_PE, + CLK_S0, + CLK_S1, + CLK_S2, + CLK_S3, + CLK_SDSRC, + CLK_RINT, + CLK_OCO, + + /* Module Clocks */ + MOD_CLK_BASE +}; + +static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + + /* Internal Core Clocks */ + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL), + DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN), + DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN), + + DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 1, 100), + DEF_FIXED(".pll0d4", CLK_PLL0D4, CLK_PLL0, 4, 1), + DEF_FIXED(".pll0d8", CLK_PLL0D8, CLK_PLL0, 8, 1), + DEF_FIXED(".pll0d20", CLK_PLL0D20, CLK_PLL0, 20, 1), + DEF_FIXED(".pll0d24", CLK_PLL0D24, CLK_PLL0, 24, 1), + DEF_FIXED(".pll1d2", CLK_PLL1D2, CLK_PLL1, 2, 1), + DEF_FIXED(".pe", CLK_PE, CLK_PLL0D20, 1, 1), + DEF_FIXED(".s0", CLK_S0, CLK_PLL1, 2, 1), + DEF_FIXED(".s1", CLK_S1, CLK_PLL1, 3, 1), + DEF_FIXED(".s2", CLK_S2, CLK_PLL1, 4, 1), + DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1), + DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1), + + DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32), + + DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000), + + /* Core Clock Outputs */ + DEF_FIXED("za2", R8A774C0_CLK_ZA2, CLK_PLL0D24, 1, 1), + DEF_FIXED("za8", R8A774C0_CLK_ZA8, CLK_PLL0D8, 1, 1), + DEF_FIXED("ztr", R8A774C0_CLK_ZTR, CLK_PLL1, 6, 1), + DEF_FIXED("zt", R8A774C0_CLK_ZT, CLK_PLL1, 4, 1), + DEF_FIXED("zx", R8A774C0_CLK_ZX, CLK_PLL1, 3, 1), + DEF_FIXED("s0d1", R8A774C0_CLK_S0D1, CLK_S0, 1, 1), + DEF_FIXED("s0d3", R8A774C0_CLK_S0D3, CLK_S0, 3, 1), + DEF_FIXED("s0d6", R8A774C0_CLK_S0D6, CLK_S0, 6, 1), + DEF_FIXED("s0d12", R8A774C0_CLK_S0D12, CLK_S0, 12, 1), + DEF_FIXED("s0d24", R8A774C0_CLK_S0D24, CLK_S0, 24, 1), + DEF_FIXED("s1d1", R8A774C0_CLK_S1D1, CLK_S1, 1, 1), + DEF_FIXED("s1d2", R8A774C0_CLK_S1D2, CLK_S1, 2, 1), + DEF_FIXED("s1d4", R8A774C0_CLK_S1D4, CLK_S1, 4, 1), + DEF_FIXED("s2d1", R8A774C0_CLK_S2D1, CLK_S2, 1, 1), + DEF_FIXED("s2d2", R8A774C0_CLK_S2D2, CLK_S2, 2, 1), + DEF_FIXED("s2d4", R8A774C0_CLK_S2D4, CLK_S2, 4, 1), + DEF_FIXED("s3d1", R8A774C0_CLK_S3D1, CLK_S3, 1, 1), + DEF_FIXED("s3d2", R8A774C0_CLK_S3D2, CLK_S3, 2, 1), + DEF_FIXED("s3d4", R8A774C0_CLK_S3D4, CLK_S3, 4, 1), + + DEF_GEN3_SD("sd0", R8A774C0_CLK_SD0, CLK_SDSRC, 0x0074), + DEF_GEN3_SD("sd1", R8A774C0_CLK_SD1, CLK_SDSRC, 0x0078), + DEF_GEN3_SD("sd3", R8A774C0_CLK_SD3, CLK_SDSRC, 0x026c), + + DEF_FIXED("cl", R8A774C0_CLK_CL, CLK_PLL1, 48, 1), + DEF_FIXED("cp", R8A774C0_CLK_CP, CLK_EXTAL, 2, 1), + DEF_FIXED("cpex", R8A774C0_CLK_CPEX, CLK_EXTAL, 4, 1), + + DEF_DIV6_RO("osc", R8A774C0_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8), + + DEF_GEN3_PE("s0d6c", R8A774C0_CLK_S0D6C, CLK_S0, 6, CLK_PE, 2), + DEF_GEN3_PE("s3d1c", R8A774C0_CLK_S3D1C, CLK_S3, 1, CLK_PE, 1), + DEF_GEN3_PE("s3d2c", R8A774C0_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2), + DEF_GEN3_PE("s3d4c", R8A774C0_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4), + + DEF_DIV6P1("csi0", R8A774C0_CLK_CSI0, CLK_PLL1D2, 0x00c), + DEF_DIV6P1("mso", R8A774C0_CLK_MSO, CLK_PLL1D2, 0x014), + + DEF_GEN3_RCKSEL("r", R8A774C0_CLK_R, CLK_RINT, 1, CLK_OCO, 61 * 4), +}; + +static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = { + DEF_MOD("scif5", 202, R8A774C0_CLK_S3D4C), + DEF_MOD("scif4", 203, R8A774C0_CLK_S3D4C), + DEF_MOD("scif3", 204, R8A774C0_CLK_S3D4C), + DEF_MOD("scif1", 206, R8A774C0_CLK_S3D4C), + DEF_MOD("scif0", 207, R8A774C0_CLK_S3D4C), + DEF_MOD("msiof3", 208, R8A774C0_CLK_MSO), + DEF_MOD("msiof2", 209, R8A774C0_CLK_MSO), + DEF_MOD("msiof1", 210, R8A774C0_CLK_MSO), + DEF_MOD("msiof0", 211, R8A774C0_CLK_MSO), + DEF_MOD("sys-dmac2", 217, R8A774C0_CLK_S3D1), + DEF_MOD("sys-dmac1", 218, R8A774C0_CLK_S3D1), + DEF_MOD("sys-dmac0", 219, R8A774C0_CLK_S3D1), + + DEF_MOD("cmt3", 300, R8A774C0_CLK_R), + DEF_MOD("cmt2", 301, R8A774C0_CLK_R), + DEF_MOD("cmt1", 302, R8A774C0_CLK_R), + DEF_MOD("cmt0", 303, R8A774C0_CLK_R), + DEF_MOD("scif2", 310, R8A774C0_CLK_S3D4C), + DEF_MOD("sdif3", 311, R8A774C0_CLK_SD3), + DEF_MOD("sdif1", 313, R8A774C0_CLK_SD1), + DEF_MOD("sdif0", 314, R8A774C0_CLK_SD0), + DEF_MOD("pcie0", 319, R8A774C0_CLK_S3D1), + DEF_MOD("usb3-if0", 328, R8A774C0_CLK_S3D1), + DEF_MOD("usb-dmac0", 330, R8A774C0_CLK_S3D1), + DEF_MOD("usb-dmac1", 331, R8A774C0_CLK_S3D1), + + DEF_MOD("rwdt", 402, R8A774C0_CLK_R), + DEF_MOD("intc-ex", 407, R8A774C0_CLK_CP), + DEF_MOD("intc-ap", 408, R8A774C0_CLK_S0D3), + + DEF_MOD("audmac0", 502, R8A774C0_CLK_S3D4), + DEF_MOD("hscif4", 516, R8A774C0_CLK_S3D1C), + DEF_MOD("hscif3", 517, R8A774C0_CLK_S3D1C), + DEF_MOD("hscif2", 518, R8A774C0_CLK_S3D1C), + DEF_MOD("hscif1", 519, R8A774C0_CLK_S3D1C), + DEF_MOD("hscif0", 520, R8A774C0_CLK_S3D1C), + DEF_MOD("thermal", 522, R8A774C0_CLK_CP), + DEF_MOD("pwm", 523, R8A774C0_CLK_S3D4C), + + DEF_MOD("fcpvd1", 602, R8A774C0_CLK_S1D2), + DEF_MOD("fcpvd0", 603, R8A774C0_CLK_S1D2), + DEF_MOD("fcpvb0", 607, R8A774C0_CLK_S0D1), + DEF_MOD("fcpvi0", 611, R8A774C0_CLK_S0D1), + DEF_MOD("fcpf0", 615, R8A774C0_CLK_S0D1), + DEF_MOD("fcpcs", 619, R8A774C0_CLK_S0D1), + DEF_MOD("vspd1", 622, R8A774C0_CLK_S1D2), + DEF_MOD("vspd0", 623, R8A774C0_CLK_S1D2), + DEF_MOD("vspb", 626, R8A774C0_CLK_S0D1), + DEF_MOD("vspi0", 631, R8A774C0_CLK_S0D1), + + DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4), + DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4), + DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0), + DEF_MOD("du1", 723, R8A774C0_CLK_S2D1), + DEF_MOD("du0", 724, R8A774C0_CLK_S2D1), + DEF_MOD("lvds", 727, R8A774C0_CLK_S2D1), + + DEF_MOD("vin5", 806, R8A774C0_CLK_S1D2), + DEF_MOD("vin4", 807, R8A774C0_CLK_S1D2), + DEF_MOD("etheravb", 812, R8A774C0_CLK_S3D2), + + DEF_MOD("gpio6", 906, R8A774C0_CLK_S3D4), + DEF_MOD("gpio5", 907, R8A774C0_CLK_S3D4), + DEF_MOD("gpio4", 908, R8A774C0_CLK_S3D4), + DEF_MOD("gpio3", 909, R8A774C0_CLK_S3D4), + DEF_MOD("gpio2", 910, R8A774C0_CLK_S3D4), + DEF_MOD("gpio1", 911, R8A774C0_CLK_S3D4), + DEF_MOD("gpio0", 912, R8A774C0_CLK_S3D4), + DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4), + DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4), + DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2), + DEF_MOD("i2c5", 919, R8A774C0_CLK_S3D2), + DEF_MOD("i2c-dvfs", 926, R8A774C0_CLK_CP), + DEF_MOD("i2c4", 927, R8A774C0_CLK_S3D2), + DEF_MOD("i2c3", 928, R8A774C0_CLK_S3D2), + DEF_MOD("i2c2", 929, R8A774C0_CLK_S3D2), + DEF_MOD("i2c1", 930, R8A774C0_CLK_S3D2), + DEF_MOD("i2c0", 931, R8A774C0_CLK_S3D2), + + DEF_MOD("i2c7", 1003, R8A774C0_CLK_S3D2), + DEF_MOD("ssi-all", 1005, R8A774C0_CLK_S3D4), + DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)), + DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)), + DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)), + DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)), + DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)), + DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)), + DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)), + DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)), + DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)), + DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)), + DEF_MOD("scu-all", 1017, R8A774C0_CLK_S3D4), + DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)), + DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)), + DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)), + DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)), + DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)), + DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)), + DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)), + DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)), + DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)), + DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)), + DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)), + DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)), +}; + +static const unsigned int r8a774c0_crit_mod_clks[] __initconst = { + MOD_CLK_ID(408), /* INTC-AP (GIC) */ +}; + +/* + * CPG Clock Data + */ + +/* + * MD19 EXTAL (MHz) PLL0 PLL1 PLL3 + *-------------------------------------------------------------------- + * 0 48 x 1 x100/1 x100/3 x100/3 + * 1 48 x 1 x100/1 x100/3 x58/3 + */ +#define CPG_PLL_CONFIG_INDEX(md) (((md) & BIT(19)) >> 19) + +static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[2] __initconst = { + /* EXTAL div PLL1 mult/div PLL3 mult/div */ + { 1, 100, 3, 100, 3, }, + { 1, 100, 3, 58, 3, }, +}; + +static int __init r8a774c0_cpg_mssr_init(struct device *dev) +{ + const struct rcar_gen3_cpg_pll_config *cpg_pll_config; + u32 cpg_mode; + int error; + + error = rcar_rst_read_mode_pins(&cpg_mode); + if (error) + return error; + + cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; + + return rcar_gen3_cpg_init(cpg_pll_config, 0, cpg_mode); +} + +const struct cpg_mssr_info r8a774c0_cpg_mssr_info __initconst = { + /* Core Clocks */ + .core_clks = r8a774c0_core_clks, + .num_core_clks = ARRAY_SIZE(r8a774c0_core_clks), + .last_dt_core_clk = LAST_DT_CORE_CLK, + .num_total_core_clks = MOD_CLK_BASE, + + /* Module Clocks */ + .mod_clks = r8a774c0_mod_clks, + .num_mod_clks = ARRAY_SIZE(r8a774c0_mod_clks), + .num_hw_mod_clks = 12 * 32, + + /* Critical Module Clocks */ + .crit_mod_clks = r8a774c0_crit_mod_clks, + .num_crit_mod_clks = ARRAY_SIZE(r8a774c0_crit_mod_clks), + + /* Callbacks */ + .init = r8a774c0_cpg_mssr_init, + .cpg_clk_register = rcar_gen3_cpg_clk_register, +}; diff --git a/drivers/clk/renesas/r8a7790-cpg-mssr.c b/drivers/clk/renesas/r8a7790-cpg-mssr.c index f936cb74b681..c57cb93f8315 100644 --- a/drivers/clk/renesas/r8a7790-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7790-cpg-mssr.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a7790 Clock Pulse Generator / Module Standby and Software Reset * @@ -6,10 +7,6 @@ * Based on clk-rcar-gen2.c * * Copyright (C) 2013 Ideas On Board SPRL - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/device.h> diff --git a/drivers/clk/renesas/r8a7791-cpg-mssr.c b/drivers/clk/renesas/r8a7791-cpg-mssr.c index 1b91f03b7598..65702debcabb 100644 --- a/drivers/clk/renesas/r8a7791-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7791-cpg-mssr.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a7791 Clock Pulse Generator / Module Standby and Software Reset * @@ -6,10 +7,6 @@ * Based on clk-rcar-gen2.c * * Copyright (C) 2013 Ideas On Board SPRL - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/device.h> diff --git a/drivers/clk/renesas/r8a7792-cpg-mssr.c b/drivers/clk/renesas/r8a7792-cpg-mssr.c index 493e07859f5f..cf8b84a3a060 100644 --- a/drivers/clk/renesas/r8a7792-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7792-cpg-mssr.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a7792 Clock Pulse Generator / Module Standby and Software Reset * @@ -6,10 +7,6 @@ * Based on clk-rcar-gen2.c * * Copyright (C) 2013 Ideas On Board SPRL - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/device.h> diff --git a/drivers/clk/renesas/r8a7794-cpg-mssr.c b/drivers/clk/renesas/r8a7794-cpg-mssr.c index 088f4b79fdfc..c1948693c5c1 100644 --- a/drivers/clk/renesas/r8a7794-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7794-cpg-mssr.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a7794 Clock Pulse Generator / Module Standby and Software Reset * @@ -6,10 +7,6 @@ * Based on clk-rcar-gen2.c * * Copyright (C) 2013 Ideas On Board SPRL - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/device.h> diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c index a85dd50e8911..119c02440726 100644 --- a/drivers/clk/renesas/r8a7795-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a7795 Clock Pulse Generator / Module Standby and Software Reset * @@ -6,10 +7,6 @@ * Based on clk-rcar-gen3.c * * Copyright (C) 2015 Renesas Electronics Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/device.h> @@ -73,6 +70,8 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = { DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), + /* Core Clock Outputs */ DEF_BASE("z", R8A7795_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0), DEF_BASE("z2", R8A7795_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2), @@ -111,8 +110,7 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = { DEF_DIV6P1("mso", R8A7795_CLK_MSO, CLK_PLL1_DIV4, 0x014), DEF_DIV6P1("hdmi", R8A7795_CLK_HDMI, CLK_PLL1_DIV4, 0x250), - DEF_DIV6_RO("osc", R8A7795_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8), - DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32), + DEF_GEN3_OSC("osc", R8A7795_CLK_OSC, CLK_EXTAL, 8), DEF_BASE("r", R8A7795_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT), }; @@ -283,25 +281,25 @@ static const unsigned int r8a7795_crit_mod_clks[] __initconst = { */ /* - * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4 + * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4 OSC * 14 13 19 17 (MHz) - *------------------------------------------------------------------- - * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144 - * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144 + *------------------------------------------------------------------------- + * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144 /16 + * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144 /16 * 0 0 1 0 Prohibited setting - * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144 - * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120 - * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120 + * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144 /16 + * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120 /19 + * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120 /19 * 0 1 1 0 Prohibited setting - * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120 - * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96 - * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96 + * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120 /19 + * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96 /24 + * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96 /24 * 1 0 1 0 Prohibited setting - * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96 - * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144 - * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144 + * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96 /24 + * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144 /32 + * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144 /32 * 1 1 1 0 Prohibited setting - * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144 + * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144 /32 */ #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \ (((md) & BIT(13)) >> 11) | \ @@ -309,23 +307,23 @@ static const unsigned int r8a7795_crit_mod_clks[] __initconst = { (((md) & BIT(17)) >> 17)) static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = { - /* EXTAL div PLL1 mult/div PLL3 mult/div */ - { 1, 192, 1, 192, 1, }, - { 1, 192, 1, 128, 1, }, - { 0, /* Prohibited setting */ }, - { 1, 192, 1, 192, 1, }, - { 1, 160, 1, 160, 1, }, - { 1, 160, 1, 106, 1, }, - { 0, /* Prohibited setting */ }, - { 1, 160, 1, 160, 1, }, - { 1, 128, 1, 128, 1, }, - { 1, 128, 1, 84, 1, }, - { 0, /* Prohibited setting */ }, - { 1, 128, 1, 128, 1, }, - { 2, 192, 1, 192, 1, }, - { 2, 192, 1, 128, 1, }, - { 0, /* Prohibited setting */ }, - { 2, 192, 1, 192, 1, }, + /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */ + { 1, 192, 1, 192, 1, 16, }, + { 1, 192, 1, 128, 1, 16, }, + { 0, /* Prohibited setting */ }, + { 1, 192, 1, 192, 1, 16, }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 160, 1, 106, 1, 19, }, + { 0, /* Prohibited setting */ }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 128, 1, 128, 1, 24, }, + { 1, 128, 1, 84, 1, 24, }, + { 0, /* Prohibited setting */ }, + { 1, 128, 1, 128, 1, 24, }, + { 2, 192, 1, 192, 1, 32, }, + { 2, 192, 1, 128, 1, 32, }, + { 0, /* Prohibited setting */ }, + { 2, 192, 1, 192, 1, 32, }, }; static const struct soc_device_attribute r8a7795es1[] __initconst = { diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c index dfb267a92f2a..10567386e6dd 100644 --- a/drivers/clk/renesas/r8a7796-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a7796 Clock Pulse Generator / Module Standby and Software Reset * @@ -7,10 +8,6 @@ * * Copyright (C) 2015 Glider bvba * Copyright (C) 2015 Renesas Electronics Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/device.h> @@ -73,6 +70,8 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = { DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), + /* Core Clock Outputs */ DEF_BASE("z", R8A7796_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0), DEF_BASE("z2", R8A7796_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2), @@ -110,8 +109,7 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = { DEF_DIV6P1("mso", R8A7796_CLK_MSO, CLK_PLL1_DIV4, 0x014), DEF_DIV6P1("hdmi", R8A7796_CLK_HDMI, CLK_PLL1_DIV4, 0x250), - DEF_DIV6_RO("osc", R8A7796_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8), - DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32), + DEF_GEN3_OSC("osc", R8A7796_CLK_OSC, CLK_EXTAL, 8), DEF_BASE("r", R8A7796_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT), }; @@ -255,25 +253,25 @@ static const unsigned int r8a7796_crit_mod_clks[] __initconst = { */ /* - * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4 + * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4 OSC * 14 13 19 17 (MHz) - *------------------------------------------------------------------- - * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144 - * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144 + *------------------------------------------------------------------------- + * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144 /16 + * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144 /16 * 0 0 1 0 Prohibited setting - * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144 - * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120 - * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120 + * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144 /16 + * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120 /19 + * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120 /19 * 0 1 1 0 Prohibited setting - * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120 - * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96 - * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96 + * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120 /19 + * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96 /24 + * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96 /24 * 1 0 1 0 Prohibited setting - * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96 - * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144 - * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144 + * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96 /24 + * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144 /32 + * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144 /32 * 1 1 1 0 Prohibited setting - * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144 + * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144 /32 */ #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \ (((md) & BIT(13)) >> 11) | \ @@ -281,23 +279,23 @@ static const unsigned int r8a7796_crit_mod_clks[] __initconst = { (((md) & BIT(17)) >> 17)) static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = { - /* EXTAL div PLL1 mult/div PLL3 mult/div */ - { 1, 192, 1, 192, 1, }, - { 1, 192, 1, 128, 1, }, - { 0, /* Prohibited setting */ }, - { 1, 192, 1, 192, 1, }, - { 1, 160, 1, 160, 1, }, - { 1, 160, 1, 106, 1, }, - { 0, /* Prohibited setting */ }, - { 1, 160, 1, 160, 1, }, - { 1, 128, 1, 128, 1, }, - { 1, 128, 1, 84, 1, }, - { 0, /* Prohibited setting */ }, - { 1, 128, 1, 128, 1, }, - { 2, 192, 1, 192, 1, }, - { 2, 192, 1, 128, 1, }, - { 0, /* Prohibited setting */ }, - { 2, 192, 1, 192, 1, }, + /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */ + { 1, 192, 1, 192, 1, 16, }, + { 1, 192, 1, 128, 1, 16, }, + { 0, /* Prohibited setting */ }, + { 1, 192, 1, 192, 1, 16, }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 160, 1, 106, 1, 19, }, + { 0, /* Prohibited setting */ }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 128, 1, 128, 1, 24, }, + { 1, 128, 1, 84, 1, 24, }, + { 0, /* Prohibited setting */ }, + { 1, 128, 1, 128, 1, 24, }, + { 2, 192, 1, 192, 1, 32, }, + { 2, 192, 1, 128, 1, 32, }, + { 0, /* Prohibited setting */ }, + { 2, 192, 1, 192, 1, 32, }, }; static int __init r8a7796_cpg_mssr_init(struct device *dev) diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c index 8fae5e9c4a77..1fcc411502da 100644 --- a/drivers/clk/renesas/r8a77965-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c @@ -68,6 +68,8 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = { DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), + /* Core Clock Outputs */ DEF_BASE("z", R8A77965_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0), DEF_FIXED("ztr", R8A77965_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), @@ -104,13 +106,13 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = { DEF_DIV6P1("mso", R8A77965_CLK_MSO, CLK_PLL1_DIV4, 0x014), DEF_DIV6P1("hdmi", R8A77965_CLK_HDMI, CLK_PLL1_DIV4, 0x250), - DEF_DIV6_RO("osc", R8A77965_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8), - DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32), + DEF_GEN3_OSC("osc", R8A77965_CLK_OSC, CLK_EXTAL, 8), DEF_BASE("r", R8A77965_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT), }; static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = { + DEF_MOD("fdp1-0", 119, R8A77965_CLK_S0D1), DEF_MOD("scif5", 202, R8A77965_CLK_S3D4), DEF_MOD("scif4", 203, R8A77965_CLK_S3D4), DEF_MOD("scif3", 204, R8A77965_CLK_S3D4), @@ -192,6 +194,7 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = { DEF_MOD("vin1", 810, R8A77965_CLK_S0D2), DEF_MOD("vin0", 811, R8A77965_CLK_S0D2), DEF_MOD("etheravb", 812, R8A77965_CLK_S0D6), + DEF_MOD("sata0", 815, R8A77965_CLK_S3D2), DEF_MOD("imr1", 822, R8A77965_CLK_S0D2), DEF_MOD("imr0", 823, R8A77965_CLK_S0D2), @@ -252,25 +255,25 @@ static const unsigned int r8a77965_crit_mod_clks[] __initconst = { */ /* - * MD EXTAL PLL0 PLL1 PLL3 PLL4 + * MD EXTAL PLL0 PLL1 PLL3 PLL4 OSC * 14 13 19 17 (MHz) - *----------------------------------------------------------- - * 0 0 0 0 16.66 x 1 x180 x192 x192 x144 - * 0 0 0 1 16.66 x 1 x180 x192 x128 x144 + *----------------------------------------------------------------- + * 0 0 0 0 16.66 x 1 x180 x192 x192 x144 /16 + * 0 0 0 1 16.66 x 1 x180 x192 x128 x144 /16 * 0 0 1 0 Prohibited setting - * 0 0 1 1 16.66 x 1 x180 x192 x192 x144 - * 0 1 0 0 20 x 1 x150 x160 x160 x120 - * 0 1 0 1 20 x 1 x150 x160 x106 x120 + * 0 0 1 1 16.66 x 1 x180 x192 x192 x144 /16 + * 0 1 0 0 20 x 1 x150 x160 x160 x120 /19 + * 0 1 0 1 20 x 1 x150 x160 x106 x120 /19 * 0 1 1 0 Prohibited setting - * 0 1 1 1 20 x 1 x150 x160 x160 x120 - * 1 0 0 0 25 x 1 x120 x128 x128 x96 - * 1 0 0 1 25 x 1 x120 x128 x84 x96 + * 0 1 1 1 20 x 1 x150 x160 x160 x120 /19 + * 1 0 0 0 25 x 1 x120 x128 x128 x96 /24 + * 1 0 0 1 25 x 1 x120 x128 x84 x96 /24 * 1 0 1 0 Prohibited setting - * 1 0 1 1 25 x 1 x120 x128 x128 x96 - * 1 1 0 0 33.33 / 2 x180 x192 x192 x144 - * 1 1 0 1 33.33 / 2 x180 x192 x128 x144 + * 1 0 1 1 25 x 1 x120 x128 x128 x96 /24 + * 1 1 0 0 33.33 / 2 x180 x192 x192 x144 /32 + * 1 1 0 1 33.33 / 2 x180 x192 x128 x144 /32 * 1 1 1 0 Prohibited setting - * 1 1 1 1 33.33 / 2 x180 x192 x192 x144 + * 1 1 1 1 33.33 / 2 x180 x192 x192 x144 /32 */ #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \ (((md) & BIT(13)) >> 11) | \ @@ -278,23 +281,23 @@ static const unsigned int r8a77965_crit_mod_clks[] __initconst = { (((md) & BIT(17)) >> 17)) static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = { - /* EXTAL div PLL1 mult/div PLL3 mult/div */ - { 1, 192, 1, 192, 1, }, - { 1, 192, 1, 128, 1, }, - { 0, /* Prohibited setting */ }, - { 1, 192, 1, 192, 1, }, - { 1, 160, 1, 160, 1, }, - { 1, 160, 1, 106, 1, }, - { 0, /* Prohibited setting */ }, - { 1, 160, 1, 160, 1, }, - { 1, 128, 1, 128, 1, }, - { 1, 128, 1, 84, 1, }, - { 0, /* Prohibited setting */ }, - { 1, 128, 1, 128, 1, }, - { 2, 192, 1, 192, 1, }, - { 2, 192, 1, 128, 1, }, - { 0, /* Prohibited setting */ }, - { 2, 192, 1, 192, 1, }, + /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */ + { 1, 192, 1, 192, 1, 16, }, + { 1, 192, 1, 128, 1, 16, }, + { 0, /* Prohibited setting */ }, + { 1, 192, 1, 192, 1, 16, }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 160, 1, 106, 1, 19, }, + { 0, /* Prohibited setting */ }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 128, 1, 128, 1, 24, }, + { 1, 128, 1, 84, 1, 24, }, + { 0, /* Prohibited setting */ }, + { 1, 128, 1, 128, 1, 24, }, + { 2, 192, 1, 192, 1, 32, }, + { 2, 192, 1, 128, 1, 32, }, + { 0, /* Prohibited setting */ }, + { 2, 192, 1, 192, 1, 32, }, }; static int __init r8a77965_cpg_mssr_init(struct device *dev) diff --git a/drivers/clk/renesas/r8a77970-cpg-mssr.c b/drivers/clk/renesas/r8a77970-cpg-mssr.c index f55842917e8d..2015e45543e9 100644 --- a/drivers/clk/renesas/r8a77970-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77970-cpg-mssr.c @@ -1,17 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a77970 Clock Pulse Generator / Module Standby and Software Reset * - * Copyright (C) 2017 Cogent Embedded Inc. + * Copyright (C) 2017-2018 Cogent Embedded Inc. * * Based on r8a7795-cpg-mssr.c * * Copyright (C) 2015 Glider bvba - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ +#include <linux/clk-provider.h> #include <linux/device.h> #include <linux/init.h> #include <linux/kernel.h> @@ -22,6 +20,13 @@ #include "renesas-cpg-mssr.h" #include "rcar-gen3-cpg.h" +#define CPG_SD0CKCR 0x0074 + +enum r8a77970_clk_types { + CLK_TYPE_R8A77970_SD0H = CLK_TYPE_GEN3_SOC_BASE, + CLK_TYPE_R8A77970_SD0, +}; + enum clk_ids { /* Core Clock Outputs exported to DT */ LAST_DT_CORE_CLK = R8A77970_CLK_OSC, @@ -42,6 +47,20 @@ enum clk_ids { MOD_CLK_BASE }; +static spinlock_t cpg_lock; + +static const struct clk_div_table cpg_sd0h_div_table[] = { + { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 }, + { 4, 8 }, { 5, 12 }, { 6, 16 }, { 7, 18 }, + { 8, 24 }, { 10, 36 }, { 11, 48 }, { 0, 0 }, +}; + +static const struct clk_div_table cpg_sd0_div_table[] = { + { 4, 8 }, { 5, 12 }, { 6, 16 }, { 7, 18 }, + { 8, 24 }, { 10, 36 }, { 11, 48 }, { 12, 10 }, + { 0, 0 }, +}; + static const struct cpg_core_clk r8a77970_core_clks[] __initconst = { /* External Clock Inputs */ DEF_INPUT("extal", CLK_EXTAL), @@ -68,6 +87,10 @@ static const struct cpg_core_clk r8a77970_core_clks[] __initconst = { DEF_FIXED("s2d2", R8A77970_CLK_S2D2, CLK_PLL1_DIV2, 12, 1), DEF_FIXED("s2d4", R8A77970_CLK_S2D4, CLK_PLL1_DIV2, 24, 1), + DEF_BASE("sd0h", R8A77970_CLK_SD0H, CLK_TYPE_R8A77970_SD0H, + CLK_PLL1_DIV2), + DEF_BASE("sd0", R8A77970_CLK_SD0, CLK_TYPE_R8A77970_SD0, CLK_PLL1_DIV2), + DEF_FIXED("cl", R8A77970_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cp", R8A77970_CLK_CP, CLK_EXTAL, 2, 1), @@ -80,6 +103,11 @@ static const struct cpg_core_clk r8a77970_core_clks[] __initconst = { }; static const struct mssr_mod_clk r8a77970_mod_clks[] __initconst = { + DEF_MOD("tmu4", 121, R8A77970_CLK_S2D2), + DEF_MOD("tmu3", 122, R8A77970_CLK_S2D2), + DEF_MOD("tmu2", 123, R8A77970_CLK_S2D2), + DEF_MOD("tmu1", 124, R8A77970_CLK_S2D2), + DEF_MOD("tmu0", 125, R8A77970_CLK_CP), DEF_MOD("ivcp1e", 127, R8A77970_CLK_S2D1), DEF_MOD("scif4", 203, R8A77970_CLK_S2D4), DEF_MOD("scif3", 204, R8A77970_CLK_S2D4), @@ -92,6 +120,12 @@ static const struct mssr_mod_clk r8a77970_mod_clks[] __initconst = { DEF_MOD("mfis", 213, R8A77970_CLK_S2D2), DEF_MOD("sys-dmac2", 217, R8A77970_CLK_S2D1), DEF_MOD("sys-dmac1", 218, R8A77970_CLK_S2D1), + DEF_MOD("cmt3", 300, R8A77970_CLK_R), + DEF_MOD("cmt2", 301, R8A77970_CLK_R), + DEF_MOD("cmt1", 302, R8A77970_CLK_R), + DEF_MOD("cmt0", 303, R8A77970_CLK_R), + DEF_MOD("tpu0", 304, R8A77970_CLK_S2D4), + DEF_MOD("sd-if", 314, R8A77970_CLK_SD0), DEF_MOD("rwdt", 402, R8A77970_CLK_R), DEF_MOD("intc-ex", 407, R8A77970_CLK_CP), DEF_MOD("intc-ap", 408, R8A77970_CLK_S2D1), @@ -173,11 +207,46 @@ static int __init r8a77970_cpg_mssr_init(struct device *dev) if (error) return error; + spin_lock_init(&cpg_lock); + cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode); } +static struct clk * __init r8a77970_cpg_clk_register(struct device *dev, + const struct cpg_core_clk *core, const struct cpg_mssr_info *info, + struct clk **clks, void __iomem *base, + struct raw_notifier_head *notifiers) +{ + const struct clk_div_table *table; + const struct clk *parent; + unsigned int shift; + + switch (core->type) { + case CLK_TYPE_R8A77970_SD0H: + table = cpg_sd0h_div_table; + shift = 8; + break; + case CLK_TYPE_R8A77970_SD0: + table = cpg_sd0_div_table; + shift = 4; + break; + default: + return rcar_gen3_cpg_clk_register(dev, core, info, clks, base, + notifiers); + } + + parent = clks[core->parent]; + if (IS_ERR(parent)) + return ERR_CAST(parent); + + return clk_register_divider_table(NULL, core->name, + __clk_get_name(parent), 0, + base + CPG_SD0CKCR, + shift, 4, 0, table, &cpg_lock); +} + const struct cpg_mssr_info r8a77970_cpg_mssr_info __initconst = { /* Core Clocks */ .core_clks = r8a77970_core_clks, @@ -196,5 +265,5 @@ const struct cpg_mssr_info r8a77970_cpg_mssr_info __initconst = { /* Callbacks */ .init = r8a77970_cpg_mssr_init, - .cpg_clk_register = rcar_gen3_cpg_clk_register, + .cpg_clk_register = r8a77970_cpg_clk_register, }; diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c index d7ebd9ec0059..25a3083b6764 100644 --- a/drivers/clk/renesas/r8a77980-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c @@ -41,6 +41,7 @@ enum clk_ids { CLK_S2, CLK_S3, CLK_SDSRC, + CLK_OCO, /* Module Clocks */ MOD_CLK_BASE @@ -64,6 +65,7 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = { DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + DEF_RATE(".oco", CLK_OCO, 32768), /* Core Clock Outputs */ DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), @@ -96,6 +98,9 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = { DEF_DIV6P1("canfd", R8A77980_CLK_CANFD, CLK_PLL1_DIV4, 0x244), DEF_DIV6P1("csi0", R8A77980_CLK_CSI0, CLK_PLL1_DIV4, 0x00c), DEF_DIV6P1("mso", R8A77980_CLK_MSO, CLK_PLL1_DIV4, 0x014), + + DEF_GEN3_OSC("osc", R8A77980_CLK_OSC, CLK_EXTAL, 8), + DEF_GEN3_MDSEL("r", R8A77980_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1), }; static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = { @@ -114,9 +119,14 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = { DEF_MOD("msiof0", 211, R8A77980_CLK_MSO), DEF_MOD("sys-dmac2", 217, R8A77980_CLK_S0D3), DEF_MOD("sys-dmac1", 218, R8A77980_CLK_S0D3), + DEF_MOD("cmt3", 300, R8A77980_CLK_R), + DEF_MOD("cmt2", 301, R8A77980_CLK_R), + DEF_MOD("cmt1", 302, R8A77980_CLK_R), + DEF_MOD("cmt0", 303, R8A77980_CLK_R), DEF_MOD("tpu0", 304, R8A77980_CLK_S3D4), DEF_MOD("sdif", 314, R8A77980_CLK_SD0), DEF_MOD("pciec0", 319, R8A77980_CLK_S2D2), + DEF_MOD("rwdt", 402, R8A77980_CLK_R), DEF_MOD("intc-ex", 407, R8A77980_CLK_CP), DEF_MOD("intc-ap", 408, R8A77980_CLK_S0D3), DEF_MOD("hscif3", 517, R8A77980_CLK_S3D1), @@ -171,23 +181,23 @@ static const unsigned int r8a77980_crit_mod_clks[] __initconst = { */ /* - * MD EXTAL PLL2 PLL1 PLL3 + * MD EXTAL PLL2 PLL1 PLL3 OSC * 14 13 (MHz) - * -------------------------------------------------- - * 0 0 16.66 x 1 x240 x192 x192 - * 0 1 20 x 1 x200 x160 x160 - * 1 0 27 x 1 x148 x118 x118 - * 1 1 33.33 / 2 x240 x192 x192 + * -------------------------------------------------------- + * 0 0 16.66 x 1 x240 x192 x192 /16 + * 0 1 20 x 1 x200 x160 x160 /19 + * 1 0 27 x 1 x148 x118 x118 /26 + * 1 1 33.33 / 2 x240 x192 x192 /32 */ #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \ (((md) & BIT(13)) >> 13)) static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[4] __initconst = { - /* EXTAL div PLL1 mult/div PLL3 mult/div */ - { 1, 192, 1, 192, 1, }, - { 1, 160, 1, 160, 1, }, - { 1, 118, 1, 118, 1, }, - { 2, 192, 1, 192, 1, }, + /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */ + { 1, 192, 1, 192, 1, 16, }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 118, 1, 118, 1, 26, }, + { 2, 192, 1, 192, 1, 32, }, }; static int __init r8a77980_cpg_mssr_init(struct device *dev) diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c index 9e14f1486fbb..9eb80180eea0 100644 --- a/drivers/clk/renesas/r8a77990-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c @@ -44,6 +44,8 @@ enum clk_ids { CLK_S2, CLK_S3, CLK_SDSRC, + CLK_RINT, + CLK_OCO, /* Module Clocks */ MOD_CLK_BASE @@ -72,6 +74,10 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = { DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1), + DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32), + + DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000), + /* Core Clock Outputs */ DEF_FIXED("za2", R8A77990_CLK_ZA2, CLK_PLL0D24, 1, 1), DEF_FIXED("za8", R8A77990_CLK_ZA8, CLK_PLL0D8, 1, 1), @@ -100,8 +106,8 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = { DEF_FIXED("cl", R8A77990_CLK_CL, CLK_PLL1, 48, 1), DEF_FIXED("cp", R8A77990_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("cpex", R8A77990_CLK_CPEX, CLK_EXTAL, 4, 1), - DEF_FIXED("osc", R8A77990_CLK_OSC, CLK_EXTAL, 384, 1), - DEF_FIXED("r", R8A77990_CLK_R, CLK_EXTAL, 1536, 1), + + DEF_DIV6_RO("osc", R8A77990_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8), DEF_GEN3_PE("s0d6c", R8A77990_CLK_S0D6C, CLK_S0, 6, CLK_PE, 2), DEF_GEN3_PE("s3d1c", R8A77990_CLK_S3D1C, CLK_S3, 1, CLK_PE, 1), @@ -111,6 +117,8 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = { DEF_DIV6P1("canfd", R8A77990_CLK_CANFD, CLK_PLL0D6, 0x244), DEF_DIV6P1("csi0", R8A77990_CLK_CSI0, CLK_PLL1D2, 0x00c), DEF_DIV6P1("mso", R8A77990_CLK_MSO, CLK_PLL1D2, 0x014), + + DEF_GEN3_RCKSEL("r", R8A77990_CLK_R, CLK_RINT, 1, CLK_OCO, 61 * 4), }; static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = { @@ -202,6 +210,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = { DEF_MOD("i2c1", 930, R8A77990_CLK_S3D2), DEF_MOD("i2c0", 931, R8A77990_CLK_S3D2), + DEF_MOD("i2c7", 1003, R8A77990_CLK_S3D2), DEF_MOD("ssi-all", 1005, R8A77990_CLK_S3D4), DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)), DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)), @@ -241,8 +250,8 @@ static const unsigned int r8a77990_crit_mod_clks[] __initconst = { /* * MD19 EXTAL (MHz) PLL0 PLL1 PLL3 *-------------------------------------------------------------------- - * 0 48 x 1 x100/4 x100/3 x100/3 - * 1 48 x 1 x100/4 x100/3 x58/3 + * 0 48 x 1 x100/1 x100/3 x100/3 + * 1 48 x 1 x100/1 x100/3 x58/3 */ #define CPG_PLL_CONFIG_INDEX(md) (((md) & BIT(19)) >> 19) diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c index ea4cafbe6e85..47e60e3dbe05 100644 --- a/drivers/clk/renesas/r8a77995-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * r8a77995 Clock Pulse Generator / Module Standby and Software Reset * @@ -7,10 +8,6 @@ * * Copyright (C) 2015 Glider bvba * Copyright (C) 2015 Renesas Electronics Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/device.h> @@ -46,6 +43,8 @@ enum clk_ids { CLK_S3, CLK_SDSRC, CLK_SSPSRC, + CLK_RINT, + CLK_OCO, /* Module Clocks */ MOD_CLK_BASE @@ -72,6 +71,10 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = { DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1), + DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32), + + DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000), + /* Core Clock Outputs */ DEF_FIXED("z2", R8A77995_CLK_Z2, CLK_PLL0D3, 1, 1), DEF_FIXED("ztr", R8A77995_CLK_ZTR, CLK_PLL1, 6, 1), @@ -90,8 +93,8 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = { DEF_FIXED("cl", R8A77995_CLK_CL, CLK_PLL1, 48, 1), DEF_FIXED("cp", R8A77995_CLK_CP, CLK_EXTAL, 2, 1), - DEF_FIXED("osc", R8A77995_CLK_OSC, CLK_EXTAL, 384, 1), - DEF_FIXED("r", R8A77995_CLK_R, CLK_EXTAL, 1536, 1), + + DEF_DIV6_RO("osc", R8A77995_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8), DEF_GEN3_PE("s1d4c", R8A77995_CLK_S1D4C, CLK_S1, 4, CLK_PE, 2), DEF_GEN3_PE("s3d1c", R8A77995_CLK_S3D1C, CLK_S3, 1, CLK_PE, 1), @@ -102,6 +105,8 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = { DEF_DIV6P1("canfd", R8A77995_CLK_CANFD, CLK_PLL0D3, 0x244), DEF_DIV6P1("mso", R8A77995_CLK_MSO, CLK_PLL1D2, 0x014), + + DEF_GEN3_RCKSEL("r", R8A77995_CLK_R, CLK_RINT, 1, CLK_OCO, 61 * 4), }; static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = { diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index a0b6ecdc63dd..6d2b56891559 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -539,7 +539,8 @@ r9a06g032_div_round_rate(struct clk_hw *hw, * several uarts attached to this divider, and changing this impacts * everyone. */ - if (clk->index == R9A06G032_DIV_UART) { + if (clk->index == R9A06G032_DIV_UART || + clk->index == R9A06G032_DIV_P2_PG) { pr_devel("%s div uart hack!\n", __func__); return clk_get_rate(hw->clk); } diff --git a/drivers/clk/renesas/rcar-gen2-cpg.c b/drivers/clk/renesas/rcar-gen2-cpg.c index daf88bc2cdae..f596a2dafcf4 100644 --- a/drivers/clk/renesas/rcar-gen2-cpg.c +++ b/drivers/clk/renesas/rcar-gen2-cpg.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * R-Car Gen2 Clock Pulse Generator * * Copyright (C) 2016 Cogent Embedded Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. */ #include <linux/bug.h> diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h index 020a3baad015..bff9551c7a38 100644 --- a/drivers/clk/renesas/rcar-gen2-cpg.h +++ b/drivers/clk/renesas/rcar-gen2-cpg.h @@ -1,11 +1,8 @@ -/* +/* SPDX-License-Identifier: GPL-2.0 + * * R-Car Gen2 Clock Pulse Generator * * Copyright (C) 2016 Cogent Embedded Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation; version 2 of the License. */ #ifndef __CLK_RENESAS_RCAR_GEN2_CPG_H__ diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c index 628b63b85d3f..4ba38f98cc7b 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.c +++ b/drivers/clk/renesas/rcar-gen3-cpg.c @@ -1,15 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 /* * R-Car Gen3 Clock Pulse Generator * - * Copyright (C) 2015-2016 Glider bvba + * Copyright (C) 2015-2018 Glider bvba * * Based on clk-rcar-gen3.c * * Copyright (C) 2015 Renesas Electronics Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/bug.h> @@ -31,6 +28,8 @@ #define CPG_PLL2CR 0x002c #define CPG_PLL4CR 0x01f4 +#define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */ + struct cpg_simple_notifier { struct notifier_block nb; void __iomem *reg; @@ -444,7 +443,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, unsigned int div = 1; u32 value; - parent = clks[core->parent & 0xffff]; /* CLK_TYPE_PE uses high bits */ + parent = clks[core->parent & 0xffff]; /* some types use high bits */ if (IS_ERR(parent)) return ERR_CAST(parent); @@ -524,7 +523,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, if (clk_get_rate(clks[cpg_clk_extalr])) { parent = clks[cpg_clk_extalr]; - value |= BIT(15); + value |= CPG_RCKCR_CKSEL; } writel(value, csn->reg); @@ -537,16 +536,14 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, parent = clks[cpg_clk_extalr]; break; - case CLK_TYPE_GEN3_PE: + case CLK_TYPE_GEN3_MDSEL: /* - * Peripheral clock with a fixed divider, selectable between - * clean and spread spectrum parents using MD12 + * Clock selectable between two parents and two fixed dividers + * using a mode pin */ - if (cpg_mode & BIT(12)) { - /* Clean */ + if (cpg_mode & BIT(core->offset)) { div = core->div & 0xffff; } else { - /* SCCG */ parent = clks[core->parent >> 16]; if (IS_ERR(parent)) return ERR_CAST(parent); @@ -563,6 +560,28 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, return cpg_z_clk_register(core->name, __clk_get_name(parent), base, CPG_FRQCRC_Z2FC_MASK); + case CLK_TYPE_GEN3_OSC: + /* + * Clock combining OSC EXTAL predivider and a fixed divider + */ + div = cpg_pll_config->osc_prediv * core->div; + break; + + case CLK_TYPE_GEN3_RCKSEL: + /* + * Clock selectable between two parents and two fixed dividers + * using RCKCR.CKSEL + */ + if (readl(base + CPG_RCKCR) & CPG_RCKCR_CKSEL) { + div = core->div & 0xffff; + } else { + parent = clks[core->parent >> 16]; + if (IS_ERR(parent)) + return ERR_CAST(parent); + div = core->div >> 16; + } + break; + default: return ERR_PTR(-EINVAL); } diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h index ea4f8fc3c4c9..f4fb6cf16688 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.h +++ b/drivers/clk/renesas/rcar-gen3-cpg.h @@ -1,11 +1,9 @@ -/* +/* SPDX-License-Identifier: GPL-2.0 + * * R-Car Gen3 Clock Pulse Generator * - * Copyright (C) 2015-2016 Glider bvba + * Copyright (C) 2015-2018 Glider bvba * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #ifndef __CLK_RENESAS_RCAR_GEN3_CPG_H__ @@ -20,19 +18,35 @@ enum rcar_gen3_clk_types { CLK_TYPE_GEN3_PLL4, CLK_TYPE_GEN3_SD, CLK_TYPE_GEN3_R, - CLK_TYPE_GEN3_PE, + CLK_TYPE_GEN3_MDSEL, /* Select parent/divider using mode pin */ CLK_TYPE_GEN3_Z, CLK_TYPE_GEN3_Z2, + CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */ + CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */ + + /* SoC specific definitions start here */ + CLK_TYPE_GEN3_SOC_BASE, }; #define DEF_GEN3_SD(_name, _id, _parent, _offset) \ DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset) +#define DEF_GEN3_MDSEL(_name, _id, _md, _parent0, _div0, _parent1, _div1) \ + DEF_BASE(_name, _id, CLK_TYPE_GEN3_MDSEL, \ + (_parent0) << 16 | (_parent1), \ + .div = (_div0) << 16 | (_div1), .offset = _md) + #define DEF_GEN3_PE(_name, _id, _parent_sscg, _div_sscg, _parent_clean, \ _div_clean) \ - DEF_BASE(_name, _id, CLK_TYPE_GEN3_PE, \ - (_parent_sscg) << 16 | (_parent_clean), \ - .div = (_div_sscg) << 16 | (_div_clean)) + DEF_GEN3_MDSEL(_name, _id, 12, _parent_sscg, _div_sscg, \ + _parent_clean, _div_clean) + +#define DEF_GEN3_OSC(_name, _id, _parent, _div) \ + DEF_BASE(_name, _id, CLK_TYPE_GEN3_OSC, _parent, .div = _div) + +#define DEF_GEN3_RCKSEL(_name, _id, _parent0, _div0, _parent1, _div1) \ + DEF_BASE(_name, _id, CLK_TYPE_GEN3_RCKSEL, \ + (_parent0) << 16 | (_parent1), .div = (_div0) << 16 | (_div1)) struct rcar_gen3_cpg_pll_config { u8 extal_div; @@ -40,6 +54,7 @@ struct rcar_gen3_cpg_pll_config { u8 pll1_div; u8 pll3_mult; u8 pll3_div; + u8 osc_prediv; }; #define CPG_RCKCR 0x240 diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c index 6cd030a58964..b241f9ca3d71 100644 --- a/drivers/clk/renesas/rcar-usb2-clock-sel.c +++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Renesas R-Car USB2.0 clock selector * @@ -6,10 +7,6 @@ * Based on renesas-cpg-mssr.c * * Copyright (C) 2015 Glider bvba - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/clk.h> diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index f4b013e9352d..f7bb817420b4 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Renesas Clock Pulse Generator / Module Standby and Software Reset * @@ -7,10 +8,6 @@ * * Copyright (C) 2013 Ideas On Board SPRL * Copyright (C) 2015 Renesas Electronics Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #include <linux/clk.h> @@ -73,6 +70,17 @@ static const u16 smstpcr[] = { #define SMSTPCR(i) smstpcr[i] +/* + * Standby Control Register offsets (RZ/A) + * Base address is FRQCR register + */ + +static const u16 stbcr[] = { + 0xFFFF/*dummy*/, 0x010, 0x014, 0x410, 0x414, 0x418, 0x41C, 0x420, + 0x424, 0x428, 0x42C, +}; + +#define STBCR(i) stbcr[i] /* * Software Reset Register offsets @@ -110,6 +118,7 @@ static const u16 srcr[] = { * @notifiers: Notifier chain to save/restore clock state for system resume * @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control * @smstpcr_saved[].val: Saved values of SMSTPCR[] + * @stbyctrl: This device has Standby Control Registers */ struct cpg_mssr_priv { #ifdef CONFIG_RESET_CONTROLLER @@ -118,11 +127,13 @@ struct cpg_mssr_priv { struct device *dev; void __iomem *base; spinlock_t rmw_lock; + struct device_node *np; struct clk **clks; unsigned int num_core_clks; unsigned int num_mod_clks; unsigned int last_dt_core_clk; + bool stbyctrl; struct raw_notifier_head notifiers; struct { @@ -131,6 +142,7 @@ struct cpg_mssr_priv { } smstpcr_saved[ARRAY_SIZE(smstpcr)]; }; +static struct cpg_mssr_priv *cpg_mssr_priv; /** * struct mstp_clock - MSTP gating clock @@ -162,16 +174,29 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) enable ? "ON" : "OFF"); spin_lock_irqsave(&priv->rmw_lock, flags); - value = readl(priv->base + SMSTPCR(reg)); - if (enable) - value &= ~bitmask; - else - value |= bitmask; - writel(value, priv->base + SMSTPCR(reg)); + if (priv->stbyctrl) { + value = readb(priv->base + STBCR(reg)); + if (enable) + value &= ~bitmask; + else + value |= bitmask; + writeb(value, priv->base + STBCR(reg)); + + /* dummy read to ensure write has completed */ + readb(priv->base + STBCR(reg)); + barrier_data(priv->base + STBCR(reg)); + } else { + value = readl(priv->base + SMSTPCR(reg)); + if (enable) + value &= ~bitmask; + else + value |= bitmask; + writel(value, priv->base + SMSTPCR(reg)); + } spin_unlock_irqrestore(&priv->rmw_lock, flags); - if (!enable) + if (!enable || priv->stbyctrl) return 0; for (i = 1000; i > 0; --i) { @@ -205,7 +230,10 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw) struct cpg_mssr_priv *priv = clock->priv; u32 value; - value = readl(priv->base + MSTPSR(clock->index / 32)); + if (priv->stbyctrl) + value = readb(priv->base + STBCR(clock->index / 32)); + else + value = readl(priv->base + MSTPSR(clock->index / 32)); return !(value & BIT(clock->index % 32)); } @@ -226,6 +254,7 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec, unsigned int idx; const char *type; struct clk *clk; + int range_check; switch (clkspec->args[0]) { case CPG_CORE: @@ -240,8 +269,14 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec, case CPG_MOD: type = "module"; - idx = MOD_CLK_PACK(clkidx); - if (clkidx % 100 > 31 || idx >= priv->num_mod_clks) { + if (priv->stbyctrl) { + idx = MOD_CLK_PACK_10(clkidx); + range_check = 7 - (clkidx % 10); + } else { + idx = MOD_CLK_PACK(clkidx); + range_check = 31 - (clkidx % 100); + } + if (range_check < 0 || idx >= priv->num_mod_clks) { dev_err(dev, "Invalid %s clock index %u\n", type, clkidx); return ERR_PTR(-EINVAL); @@ -283,7 +318,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core, switch (core->type) { case CLK_TYPE_IN: - clk = of_clk_get_by_name(priv->dev->of_node, core->name); + clk = of_clk_get_by_name(priv->np, core->name); break; case CLK_TYPE_FF: @@ -313,6 +348,11 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core, } break; + case CLK_TYPE_FR: + clk = clk_register_fixed_rate(NULL, core->name, NULL, 0, + core->mult); + break; + default: if (info->cpg_clk_register) clk = info->cpg_clk_register(dev, core, info, @@ -641,11 +681,22 @@ static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv) static const struct of_device_id cpg_mssr_match[] = { +#ifdef CONFIG_CLK_R7S9210 + { + .compatible = "renesas,r7s9210-cpg-mssr", + .data = &r7s9210_cpg_mssr_info, + }, +#endif #ifdef CONFIG_CLK_R8A7743 { .compatible = "renesas,r8a7743-cpg-mssr", .data = &r8a7743_cpg_mssr_info, }, + /* RZ/G1N is (almost) identical to RZ/G1M w.r.t. clocks. */ + { + .compatible = "renesas,r8a7744-cpg-mssr", + .data = &r8a7743_cpg_mssr_info, + }, #endif #ifdef CONFIG_CLK_R8A7745 { @@ -659,6 +710,18 @@ static const struct of_device_id cpg_mssr_match[] = { .data = &r8a77470_cpg_mssr_info, }, #endif +#ifdef CONFIG_CLK_R8A774A1 + { + .compatible = "renesas,r8a774a1-cpg-mssr", + .data = &r8a774a1_cpg_mssr_info, + }, +#endif +#ifdef CONFIG_CLK_R8A774C0 + { + .compatible = "renesas,r8a774c0-cpg-mssr", + .data = &r8a774c0_cpg_mssr_info, + }, +#endif #ifdef CONFIG_CLK_R8A7790 { .compatible = "renesas,r8a7790-cpg-mssr", @@ -780,13 +843,23 @@ static int cpg_mssr_resume_noirq(struct device *dev) if (!mask) continue; - oldval = readl(priv->base + SMSTPCR(reg)); + if (priv->stbyctrl) + oldval = readb(priv->base + STBCR(reg)); + else + oldval = readl(priv->base + SMSTPCR(reg)); newval = oldval & ~mask; newval |= priv->smstpcr_saved[reg].val & mask; if (newval == oldval) continue; - writel(newval, priv->base + SMSTPCR(reg)); + if (priv->stbyctrl) { + writeb(newval, priv->base + STBCR(reg)); + /* dummy read to ensure write has completed */ + readb(priv->base + STBCR(reg)); + barrier_data(priv->base + STBCR(reg)); + continue; + } else + writel(newval, priv->base + SMSTPCR(reg)); /* Wait until enabled clocks are really enabled */ mask &= ~priv->smstpcr_saved[reg].val; @@ -817,61 +890,115 @@ static const struct dev_pm_ops cpg_mssr_pm = { #define DEV_PM_OPS NULL #endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */ -static int __init cpg_mssr_probe(struct platform_device *pdev) +static int __init cpg_mssr_common_init(struct device *dev, + struct device_node *np, + const struct cpg_mssr_info *info) { - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - const struct cpg_mssr_info *info; struct cpg_mssr_priv *priv; + struct clk **clks = NULL; unsigned int nclks, i; - struct resource *res; - struct clk **clks; int error; - info = of_device_get_match_data(dev); if (info->init) { error = info->init(dev); if (error) return error; } - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + priv->np = np; priv->dev = dev; spin_lock_init(&priv->rmw_lock); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(dev, res); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); + priv->base = of_iomap(np, 0); + if (!priv->base) { + error = -ENOMEM; + goto out_err; + } nclks = info->num_total_core_clks + info->num_hw_mod_clks; - clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL); - if (!clks) - return -ENOMEM; + clks = kmalloc_array(nclks, sizeof(*clks), GFP_KERNEL); + if (!clks) { + error = -ENOMEM; + goto out_err; + } - dev_set_drvdata(dev, priv); + cpg_mssr_priv = priv; priv->clks = clks; priv->num_core_clks = info->num_total_core_clks; priv->num_mod_clks = info->num_hw_mod_clks; priv->last_dt_core_clk = info->last_dt_core_clk; RAW_INIT_NOTIFIER_HEAD(&priv->notifiers); + priv->stbyctrl = info->stbyctrl; for (i = 0; i < nclks; i++) clks[i] = ERR_PTR(-ENOENT); + error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv); + if (error) + goto out_err; + + return 0; + +out_err: + kfree(clks); + if (priv->base) + iounmap(priv->base); + kfree(priv); + + return error; +} + +void __init cpg_mssr_early_init(struct device_node *np, + const struct cpg_mssr_info *info) +{ + int error; + int i; + + error = cpg_mssr_common_init(NULL, np, info); + if (error) + return; + + for (i = 0; i < info->num_early_core_clks; i++) + cpg_mssr_register_core_clk(&info->early_core_clks[i], info, + cpg_mssr_priv); + + for (i = 0; i < info->num_early_mod_clks; i++) + cpg_mssr_register_mod_clk(&info->early_mod_clks[i], info, + cpg_mssr_priv); + +} + +static int __init cpg_mssr_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + const struct cpg_mssr_info *info; + struct cpg_mssr_priv *priv; + unsigned int i; + int error; + + info = of_device_get_match_data(dev); + + if (!cpg_mssr_priv) { + error = cpg_mssr_common_init(dev, dev->of_node, info); + if (error) + return error; + } + + priv = cpg_mssr_priv; + priv->dev = dev; + dev_set_drvdata(dev, priv); + for (i = 0; i < info->num_core_clks; i++) cpg_mssr_register_core_clk(&info->core_clks[i], info, priv); for (i = 0; i < info->num_mod_clks; i++) cpg_mssr_register_mod_clk(&info->mod_clks[i], info, priv); - error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv); - if (error) - return error; - error = devm_add_action_or_reset(dev, cpg_mssr_del_clk_provider, np); @@ -883,6 +1010,10 @@ static int __init cpg_mssr_probe(struct platform_device *pdev) if (error) return error; + /* Reset Controller not supported for Standby Control SoCs */ + if (info->stbyctrl) + return 0; + error = cpg_mssr_reset_controller_register(priv); if (error) return error; diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h index 642f720b9b05..c4ec9df146fd 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.h +++ b/drivers/clk/renesas/renesas-cpg-mssr.h @@ -1,11 +1,8 @@ -/* +/* SPDX-License-Identifier: GPL-2.0 + * * Renesas Clock Pulse Generator / Module Standby and Software Reset * * Copyright (C) 2015 Glider bvba - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #ifndef __CLK_RENESAS_CPG_MSSR_H__ @@ -38,6 +35,7 @@ enum clk_types { CLK_TYPE_FF, /* Fixed Factor Clock */ CLK_TYPE_DIV6P1, /* DIV6 Clock with 1 parent clock */ CLK_TYPE_DIV6_RO, /* DIV6 Clock read only with extra divisor */ + CLK_TYPE_FR, /* Fixed Rate Clock */ /* Custom definitions start here */ CLK_TYPE_CUSTOM, @@ -56,6 +54,8 @@ enum clk_types { DEF_BASE(_name, _id, CLK_TYPE_DIV6P1, _parent, .offset = _offset) #define DEF_DIV6_RO(_name, _id, _parent, _offset, _div) \ DEF_BASE(_name, _id, CLK_TYPE_DIV6_RO, _parent, .offset = _offset, .div = _div, .mult = 1) +#define DEF_RATE(_name, _id, _rate) \ + DEF_TYPE(_name, _id, CLK_TYPE_FR, .mult = _rate) /* * Definitions of Module Clocks @@ -75,12 +75,24 @@ struct mssr_mod_clk { #define DEF_MOD(_name, _mod, _parent...) \ { .name = _name, .id = MOD_CLK_ID(_mod), .parent = _parent } +/* Convert from sparse base-10 to packed index space */ +#define MOD_CLK_PACK_10(x) ((x / 10) * 32 + (x % 10)) + +#define MOD_CLK_ID_10(x) (MOD_CLK_BASE + MOD_CLK_PACK_10(x)) + +#define DEF_MOD_STB(_name, _mod, _parent...) \ + { .name = _name, .id = MOD_CLK_ID_10(_mod), .parent = _parent } struct device_node; /** * SoC-specific CPG/MSSR Description * + * @early_core_clks: Array of Early Core Clock definitions + * @num_early_core_clks: Number of entries in early_core_clks[] + * @early_mod_clks: Array of Early Module Clock definitions + * @num_early_mod_clks: Number of entries in early_mod_clks[] + * * @core_clks: Array of Core Clock definitions * @num_core_clks: Number of entries in core_clks[] * @last_dt_core_clk: ID of the last Core Clock exported to DT @@ -100,14 +112,25 @@ struct device_node; * * @init: Optional callback to perform SoC-specific initialization * @cpg_clk_register: Optional callback to handle special Core Clock types + * + * @stbyctrl: This device has Standby Control Registers which are 8-bits + * wide, no status registers (MSTPSR) and have different address + * offsets. */ struct cpg_mssr_info { + /* Early Clocks */ + const struct cpg_core_clk *early_core_clks; + unsigned int num_early_core_clks; + const struct mssr_mod_clk *early_mod_clks; + unsigned int num_early_mod_clks; + /* Core Clocks */ const struct cpg_core_clk *core_clks; unsigned int num_core_clks; unsigned int last_dt_core_clk; unsigned int num_total_core_clks; + bool stbyctrl; /* Module Clocks */ const struct mssr_mod_clk *mod_clks; @@ -131,9 +154,12 @@ struct cpg_mssr_info { struct raw_notifier_head *notifiers); }; +extern const struct cpg_mssr_info r7s9210_cpg_mssr_info; extern const struct cpg_mssr_info r8a7743_cpg_mssr_info; extern const struct cpg_mssr_info r8a7745_cpg_mssr_info; extern const struct cpg_mssr_info r8a77470_cpg_mssr_info; +extern const struct cpg_mssr_info r8a774a1_cpg_mssr_info; +extern const struct cpg_mssr_info r8a774c0_cpg_mssr_info; extern const struct cpg_mssr_info r8a7790_cpg_mssr_info; extern const struct cpg_mssr_info r8a7791_cpg_mssr_info; extern const struct cpg_mssr_info r8a7792_cpg_mssr_info; @@ -146,6 +172,8 @@ extern const struct cpg_mssr_info r8a77980_cpg_mssr_info; extern const struct cpg_mssr_info r8a77990_cpg_mssr_info; extern const struct cpg_mssr_info r8a77995_cpg_mssr_info; +void __init cpg_mssr_early_init(struct device_node *np, + const struct cpg_mssr_info *info); /* * Helpers for fixing up clock tables depending on SoC revision diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c index e8075359366b..ebce5260068b 100644 --- a/drivers/clk/rockchip/clk-ddr.c +++ b/drivers/clk/rockchip/clk-ddr.c @@ -80,16 +80,12 @@ static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw, static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw) { struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw); - int num_parents = clk_hw_get_num_parents(hw); u32 val; val = clk_readl(ddrclk->reg_base + ddrclk->mux_offset) >> ddrclk->mux_shift; val &= GENMASK(ddrclk->mux_width - 1, 0); - if (val >= num_parents) - return -EINVAL; - return val; } diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c index 67e73fd71f09..fa25e35ce7d5 100644 --- a/drivers/clk/rockchip/clk-rk3188.c +++ b/drivers/clk/rockchip/clk-rk3188.c @@ -645,7 +645,7 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = { GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS), GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS), GATE(HCLK_CIF1, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS), - GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS), + GATE(HCLK_HDMI, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS), GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 14, GFLAGS), diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 450de24a1b42..5a67b7869960 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -83,22 +83,43 @@ static struct rockchip_pll_rate_table rk3288_pll_rates[] = { RK3066_PLL_RATE( 768000000, 1, 64, 2), RK3066_PLL_RATE( 742500000, 8, 495, 2), RK3066_PLL_RATE( 696000000, 1, 58, 2), + RK3066_PLL_RATE_NB(621000000, 1, 207, 8, 1), RK3066_PLL_RATE( 600000000, 1, 50, 2), RK3066_PLL_RATE_NB(594000000, 1, 198, 8, 1), RK3066_PLL_RATE( 552000000, 1, 46, 2), RK3066_PLL_RATE( 504000000, 1, 84, 4), RK3066_PLL_RATE( 500000000, 3, 125, 2), RK3066_PLL_RATE( 456000000, 1, 76, 4), + RK3066_PLL_RATE( 428000000, 1, 107, 6), RK3066_PLL_RATE( 408000000, 1, 68, 4), RK3066_PLL_RATE( 400000000, 3, 100, 2), + RK3066_PLL_RATE_NB( 394000000, 1, 197, 12, 1), RK3066_PLL_RATE( 384000000, 2, 128, 4), RK3066_PLL_RATE( 360000000, 1, 60, 4), + RK3066_PLL_RATE_NB( 356000000, 1, 178, 12, 1), + RK3066_PLL_RATE_NB( 324000000, 1, 189, 14, 1), RK3066_PLL_RATE( 312000000, 1, 52, 4), - RK3066_PLL_RATE( 300000000, 1, 50, 4), - RK3066_PLL_RATE( 297000000, 2, 198, 8), + RK3066_PLL_RATE_NB( 308000000, 1, 154, 12, 1), + RK3066_PLL_RATE_NB( 303000000, 1, 202, 16, 1), + RK3066_PLL_RATE( 300000000, 1, 75, 6), + RK3066_PLL_RATE_NB( 297750000, 2, 397, 16, 1), + RK3066_PLL_RATE_NB( 293250000, 2, 391, 16, 1), + RK3066_PLL_RATE_NB( 292500000, 1, 195, 16, 1), + RK3066_PLL_RATE( 273600000, 1, 114, 10), + RK3066_PLL_RATE_NB( 273000000, 1, 182, 16, 1), + RK3066_PLL_RATE_NB( 270000000, 1, 180, 16, 1), + RK3066_PLL_RATE_NB( 266250000, 2, 355, 16, 1), + RK3066_PLL_RATE_NB( 256500000, 1, 171, 16, 1), RK3066_PLL_RATE( 252000000, 1, 84, 8), - RK3066_PLL_RATE( 216000000, 1, 72, 8), - RK3066_PLL_RATE( 148500000, 2, 99, 8), + RK3066_PLL_RATE_NB( 250500000, 1, 167, 16, 1), + RK3066_PLL_RATE_NB( 243428571, 1, 142, 14, 1), + RK3066_PLL_RATE( 238000000, 1, 119, 12), + RK3066_PLL_RATE_NB( 219750000, 2, 293, 16, 1), + RK3066_PLL_RATE_NB( 216000000, 1, 144, 16, 1), + RK3066_PLL_RATE_NB( 213000000, 1, 142, 16, 1), + RK3066_PLL_RATE( 195428571, 1, 114, 14), + RK3066_PLL_RATE( 160000000, 1, 80, 12), + RK3066_PLL_RATE( 157500000, 1, 105, 16), RK3066_PLL_RATE( 126000000, 1, 84, 16), RK3066_PLL_RATE( 48000000, 1, 64, 32), { /* sentinel */ }, diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index 252366a5231f..2c5426607790 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -813,22 +813,22 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc", RK3328_SDMMC_CON0, 1), MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc", - RK3328_SDMMC_CON1, 1), + RK3328_SDMMC_CON1, 0), MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio", RK3328_SDIO_CON0, 1), MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio", - RK3328_SDIO_CON1, 1), + RK3328_SDIO_CON1, 0), MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc", RK3328_EMMC_CON0, 1), MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc", - RK3328_EMMC_CON1, 1), + RK3328_EMMC_CON1, 0), MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "clk_sdmmc_ext", RK3328_SDMMC_EXT_CON0, 1), MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "clk_sdmmc_ext", - RK3328_SDMMC_EXT_CON1, 1), + RK3328_SDMMC_EXT_CON1, 0), }; static const char *const rk3328_critical_clocks[] __initconst = { diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c index d2c99d8916b8..a5fddebbe530 100644 --- a/drivers/clk/samsung/clk-cpu.c +++ b/drivers/clk/samsung/clk-cpu.c @@ -152,7 +152,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, struct exynos_cpuclk *cpuclk, void __iomem *base) { const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg; - unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent); + unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent); unsigned long alt_div = 0, alt_div_mask = DIV_MASK; unsigned long div0, div1 = 0, mux_reg; unsigned long flags; @@ -280,7 +280,7 @@ static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, struct exynos_cpuclk *cpuclk, void __iomem *base) { const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg; - unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent); + unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent); unsigned long alt_div = 0, alt_div_mask = DIV_MASK; unsigned long div0, div1 = 0, mux_reg; unsigned long flags; @@ -432,7 +432,7 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx, else cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb; - cpuclk->alt_parent = __clk_lookup(alt_parent); + cpuclk->alt_parent = __clk_get_hw(__clk_lookup(alt_parent)); if (!cpuclk->alt_parent) { pr_err("%s: could not lookup alternate parent %s\n", __func__, alt_parent); diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h index d4b6b517fe1b..bd38c6aa3897 100644 --- a/drivers/clk/samsung/clk-cpu.h +++ b/drivers/clk/samsung/clk-cpu.h @@ -49,7 +49,7 @@ struct exynos_cpuclk_cfg_data { */ struct exynos_cpuclk { struct clk_hw hw; - struct clk *alt_parent; + struct clk_hw *alt_parent; void __iomem *ctrl_base; spinlock_t *lock; const struct exynos_cpuclk_cfg_data *cfg; diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index f659c5cbf1d5..8f8a0f9fc842 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c @@ -15,7 +15,6 @@ #include <linux/clk-provider.h> #include <linux/of_address.h> #include <linux/of_device.h> -#include <linux/syscore_ops.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c index 27c9d23657b3..0e9a41a4cac8 100644 --- a/drivers/clk/samsung/clk-exynos3250.c +++ b/drivers/clk/samsung/clk-exynos3250.c @@ -12,7 +12,6 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> -#include <linux/syscore_ops.h> #include <dt-bindings/clock/exynos3250.h> diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 0421960eb963..59d4d46667ce 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -16,7 +16,6 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/syscore_ops.h> #include "clk.h" #include "clk-cpu.h" @@ -123,10 +122,6 @@ #define CLKOUT_CMU_CPU 0x14a00 #define PWR_CTRL1 0x15020 #define E4X12_PWR_CTRL2 0x15024 -#define E4X12_DIV_ISP0 0x18300 -#define E4X12_DIV_ISP1 0x18304 -#define E4X12_GATE_ISP0 0x18800 -#define E4X12_GATE_ISP1 0x18804 /* Below definitions are used for PWR_CTRL settings */ #define PWR_CTRL1_CORE2_DOWN_RATIO(x) (((x) & 0x7) << 28) @@ -158,14 +153,6 @@ static void __iomem *reg_base; static enum exynos4_soc exynos4_soc; /* - * Support for CMU save/restore across system suspends - */ -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *exynos4_save_common; -static struct samsung_clk_reg_dump *exynos4_save_soc; -static struct samsung_clk_reg_dump *exynos4_save_pll; - -/* * list of controller registers to be saved and restored during a * suspend/resume cycle. */ @@ -192,7 +179,7 @@ static const unsigned long exynos4x12_clk_save[] __initconst = { E4X12_PWR_CTRL2, }; -static const unsigned long exynos4_clk_pll_regs[] __initconst = { +static const unsigned long exynos4_clk_regs[] __initconst = { EPLL_LOCK, VPLL_LOCK, EPLL_CON0, @@ -201,9 +188,6 @@ static const unsigned long exynos4_clk_pll_regs[] __initconst = { VPLL_CON0, VPLL_CON1, VPLL_CON2, -}; - -static const unsigned long exynos4_clk_regs[] __initconst = { SRC_LEFTBUS, DIV_LEFTBUS, GATE_IP_LEFTBUS, @@ -276,6 +260,8 @@ static const unsigned long exynos4_clk_regs[] __initconst = { }; static const struct samsung_clk_reg_dump src_mask_suspend[] = { + { .offset = VPLL_CON0, .value = 0x80600302, }, + { .offset = EPLL_CON0, .value = 0x806F0302, }, { .offset = SRC_MASK_TOP, .value = 0x00000001, }, { .offset = SRC_MASK_CAM, .value = 0x11111111, }, { .offset = SRC_MASK_TV, .value = 0x00000111, }, @@ -291,123 +277,6 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = { { .offset = E4210_SRC_MASK_LCD1, .value = 0x00001111, }, }; -#define PLL_ENABLED (1 << 31) -#define PLL_LOCKED (1 << 29) - -static void exynos4_clk_enable_pll(u32 reg) -{ - u32 pll_con = readl(reg_base + reg); - pll_con |= PLL_ENABLED; - writel(pll_con, reg_base + reg); - - while (!(pll_con & PLL_LOCKED)) { - cpu_relax(); - pll_con = readl(reg_base + reg); - } -} - -static void exynos4_clk_wait_for_pll(u32 reg) -{ - u32 pll_con; - - pll_con = readl(reg_base + reg); - if (!(pll_con & PLL_ENABLED)) - return; - - while (!(pll_con & PLL_LOCKED)) { - cpu_relax(); - pll_con = readl(reg_base + reg); - } -} - -static int exynos4_clk_suspend(void) -{ - samsung_clk_save(reg_base, exynos4_save_common, - ARRAY_SIZE(exynos4_clk_regs)); - samsung_clk_save(reg_base, exynos4_save_pll, - ARRAY_SIZE(exynos4_clk_pll_regs)); - - exynos4_clk_enable_pll(EPLL_CON0); - exynos4_clk_enable_pll(VPLL_CON0); - - if (exynos4_soc == EXYNOS4210) { - samsung_clk_save(reg_base, exynos4_save_soc, - ARRAY_SIZE(exynos4210_clk_save)); - samsung_clk_restore(reg_base, src_mask_suspend_e4210, - ARRAY_SIZE(src_mask_suspend_e4210)); - } else { - samsung_clk_save(reg_base, exynos4_save_soc, - ARRAY_SIZE(exynos4x12_clk_save)); - } - - samsung_clk_restore(reg_base, src_mask_suspend, - ARRAY_SIZE(src_mask_suspend)); - - return 0; -} - -static void exynos4_clk_resume(void) -{ - samsung_clk_restore(reg_base, exynos4_save_pll, - ARRAY_SIZE(exynos4_clk_pll_regs)); - - exynos4_clk_wait_for_pll(EPLL_CON0); - exynos4_clk_wait_for_pll(VPLL_CON0); - - samsung_clk_restore(reg_base, exynos4_save_common, - ARRAY_SIZE(exynos4_clk_regs)); - - if (exynos4_soc == EXYNOS4210) - samsung_clk_restore(reg_base, exynos4_save_soc, - ARRAY_SIZE(exynos4210_clk_save)); - else - samsung_clk_restore(reg_base, exynos4_save_soc, - ARRAY_SIZE(exynos4x12_clk_save)); -} - -static struct syscore_ops exynos4_clk_syscore_ops = { - .suspend = exynos4_clk_suspend, - .resume = exynos4_clk_resume, -}; - -static void __init exynos4_clk_sleep_init(void) -{ - exynos4_save_common = samsung_clk_alloc_reg_dump(exynos4_clk_regs, - ARRAY_SIZE(exynos4_clk_regs)); - if (!exynos4_save_common) - goto err_warn; - - if (exynos4_soc == EXYNOS4210) - exynos4_save_soc = samsung_clk_alloc_reg_dump( - exynos4210_clk_save, - ARRAY_SIZE(exynos4210_clk_save)); - else - exynos4_save_soc = samsung_clk_alloc_reg_dump( - exynos4x12_clk_save, - ARRAY_SIZE(exynos4x12_clk_save)); - if (!exynos4_save_soc) - goto err_common; - - exynos4_save_pll = samsung_clk_alloc_reg_dump(exynos4_clk_pll_regs, - ARRAY_SIZE(exynos4_clk_pll_regs)); - if (!exynos4_save_pll) - goto err_soc; - - register_syscore_ops(&exynos4_clk_syscore_ops); - return; - -err_soc: - kfree(exynos4_save_soc); -err_common: - kfree(exynos4_save_common); -err_warn: - pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", - __func__); -} -#else -static void __init exynos4_clk_sleep_init(void) {} -#endif - /* list of all parent clock list */ PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", }; @@ -841,18 +710,6 @@ static const struct samsung_div_clock exynos4x12_div_clks[] __initconst = { DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3), }; -static struct samsung_div_clock exynos4x12_isp_div_clks[] = { - DIV_F(CLK_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3, - CLK_GET_RATE_NOCACHE, 0), - DIV_F(CLK_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3, - CLK_GET_RATE_NOCACHE, 0), - DIV(0, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), - DIV_F(CLK_DIV_MCUISP0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, - 4, 3, CLK_GET_RATE_NOCACHE, 0), - DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, - 8, 3, CLK_GET_RATE_NOCACHE, 0), -}; - /* list of gate clocks supported in all exynos4 soc's */ static const struct samsung_gate_clock exynos4_gate_clks[] __initconst = { GATE(CLK_PPMULEFT, "ppmuleft", "aclk200", GATE_IP_LEFTBUS, 1, 0, 0), @@ -1150,61 +1007,6 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = { 0), }; -static struct samsung_gate_clock exynos4x12_isp_gate_clks[] = { - GATE(CLK_FIMC_ISP, "isp", "aclk200", E4X12_GATE_ISP0, 0, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_FIMC_DRC, "drc", "aclk200", E4X12_GATE_ISP0, 1, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_FIMC_FD, "fd", "aclk200", E4X12_GATE_ISP0, 2, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_FIMC_LITE0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_FIMC_LITE1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_MCUISP, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_GICISP, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_SMMU_ISP, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_SMMU_DRC, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_SMMU_FD, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_SMMU_LITE0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_SMMU_LITE1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_PPMUISPMX, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_PPMUISPX, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_MCUCTL_ISP, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_MPWM_ISP, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_I2C0_ISP, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_I2C1_ISP, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_MTCADC_ISP, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_PWM_ISP, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_WDT_ISP, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_UART_ISP, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_ASYNCAXIM, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_SMMU_ISPCX, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_SPI0_ISP, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), - GATE(CLK_SPI1_ISP, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, - CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), -}; - /* * The parent of the fin_pll clock is selected by the XOM[0] bit. This bit * resides in chipid register space, outside of the clock controller memory @@ -1504,8 +1306,6 @@ static void __init exynos4_clk_init(struct device_node *np, e4210_armclk_d, ARRAY_SIZE(e4210_armclk_d), CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1); } else { - struct resource res; - samsung_clk_register_mux(ctx, exynos4x12_mux_clks, ARRAY_SIZE(exynos4x12_mux_clks)); samsung_clk_register_div(ctx, exynos4x12_div_clks, @@ -1516,14 +1316,6 @@ static void __init exynos4_clk_init(struct device_node *np, exynos4x12_fixed_factor_clks, ARRAY_SIZE(exynos4x12_fixed_factor_clks)); - of_address_to_resource(np, 0, &res); - if (resource_size(&res) > 0x18000) { - samsung_clk_register_div(ctx, exynos4x12_isp_div_clks, - ARRAY_SIZE(exynos4x12_isp_div_clks)); - samsung_clk_register_gate(ctx, exynos4x12_isp_gate_clks, - ARRAY_SIZE(exynos4x12_isp_gate_clks)); - } - exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk", mout_core_p4x12[0], mout_core_p4x12[1], 0x14200, e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d), @@ -1532,7 +1324,17 @@ static void __init exynos4_clk_init(struct device_node *np, if (soc == EXYNOS4X12) exynos4x12_core_down_clock(); - exynos4_clk_sleep_init(); + + samsung_clk_extended_sleep_init(reg_base, + exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs), + src_mask_suspend, ARRAY_SIZE(src_mask_suspend)); + if (exynos4_soc == EXYNOS4210) + samsung_clk_extended_sleep_init(reg_base, + exynos4210_clk_save, ARRAY_SIZE(exynos4210_clk_save), + src_mask_suspend_e4210, ARRAY_SIZE(src_mask_suspend_e4210)); + else + samsung_clk_sleep_init(reg_base, exynos4x12_clk_save, + ARRAY_SIZE(exynos4x12_clk_save)); samsung_clk_of_add_provider(np, ctx); diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index 347fd80c351b..f14139bcb0c1 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -14,7 +14,6 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/syscore_ops.h> #include "clk.h" #include "clk-cpu.h" @@ -111,9 +110,6 @@ enum exynos5250_plls { static void __iomem *reg_base; -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *exynos5250_save; - /* * list of controller registers to be saved and restored during a * suspend/resume cycle. @@ -172,41 +168,6 @@ static const unsigned long exynos5250_clk_regs[] __initconst = { GATE_IP_ISP1, }; -static int exynos5250_clk_suspend(void) -{ - samsung_clk_save(reg_base, exynos5250_save, - ARRAY_SIZE(exynos5250_clk_regs)); - - return 0; -} - -static void exynos5250_clk_resume(void) -{ - samsung_clk_restore(reg_base, exynos5250_save, - ARRAY_SIZE(exynos5250_clk_regs)); -} - -static struct syscore_ops exynos5250_clk_syscore_ops = { - .suspend = exynos5250_clk_suspend, - .resume = exynos5250_clk_resume, -}; - -static void __init exynos5250_clk_sleep_init(void) -{ - exynos5250_save = samsung_clk_alloc_reg_dump(exynos5250_clk_regs, - ARRAY_SIZE(exynos5250_clk_regs)); - if (!exynos5250_save) { - pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", - __func__); - return; - } - - register_syscore_ops(&exynos5250_clk_syscore_ops); -} -#else -static void __init exynos5250_clk_sleep_init(void) {} -#endif - /* list of all parent clock list */ PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", }; @@ -882,7 +843,8 @@ static void __init exynos5250_clk_init(struct device_node *np) PWR_CTRL2_CORE2_UP_RATIO | PWR_CTRL2_CORE1_UP_RATIO); __raw_writel(tmp, reg_base + PWR_CTRL2); - exynos5250_clk_sleep_init(); + samsung_clk_sleep_init(reg_base, exynos5250_clk_regs, + ARRAY_SIZE(exynos5250_clk_regs)); exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu); samsung_clk_of_add_provider(np, ctx); diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 95e1bf69449b..34cce3c5898f 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -15,7 +15,6 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/syscore_ops.h> #include "clk.h" #include "clk-cpu.h" @@ -156,10 +155,6 @@ enum exynos5x_plls { static void __iomem *reg_base; static enum exynos5x_soc exynos5x_soc; -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *exynos5x_save; -static struct samsung_clk_reg_dump *exynos5800_save; - /* * list of controller registers to be saved and restored during a * suspend/resume cycle. @@ -281,68 +276,9 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = { { .offset = GATE_BUS_TOP, .value = 0xffffffff, }, { .offset = GATE_BUS_DISP1, .value = 0xffffffff, }, { .offset = GATE_IP_PERIC, .value = 0xffffffff, }, + { .offset = GATE_IP_PERIS, .value = 0xffffffff, }, }; -static int exynos5420_clk_suspend(void) -{ - samsung_clk_save(reg_base, exynos5x_save, - ARRAY_SIZE(exynos5x_clk_regs)); - - if (exynos5x_soc == EXYNOS5800) - samsung_clk_save(reg_base, exynos5800_save, - ARRAY_SIZE(exynos5800_clk_regs)); - - samsung_clk_restore(reg_base, exynos5420_set_clksrc, - ARRAY_SIZE(exynos5420_set_clksrc)); - - return 0; -} - -static void exynos5420_clk_resume(void) -{ - samsung_clk_restore(reg_base, exynos5x_save, - ARRAY_SIZE(exynos5x_clk_regs)); - - if (exynos5x_soc == EXYNOS5800) - samsung_clk_restore(reg_base, exynos5800_save, - ARRAY_SIZE(exynos5800_clk_regs)); -} - -static struct syscore_ops exynos5420_clk_syscore_ops = { - .suspend = exynos5420_clk_suspend, - .resume = exynos5420_clk_resume, -}; - -static void __init exynos5420_clk_sleep_init(void) -{ - exynos5x_save = samsung_clk_alloc_reg_dump(exynos5x_clk_regs, - ARRAY_SIZE(exynos5x_clk_regs)); - if (!exynos5x_save) { - pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", - __func__); - return; - } - - if (exynos5x_soc == EXYNOS5800) { - exynos5800_save = - samsung_clk_alloc_reg_dump(exynos5800_clk_regs, - ARRAY_SIZE(exynos5800_clk_regs)); - if (!exynos5800_save) - goto err_soc; - } - - register_syscore_ops(&exynos5420_clk_syscore_ops); - return; -err_soc: - kfree(exynos5x_save); - pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", - __func__); - return; -} -#else -static void __init exynos5420_clk_sleep_init(void) {} -#endif - /* list of all parent clocks */ PNAME(mout_mspll_cpu_p) = {"mout_sclk_cpll", "mout_sclk_dpll", "mout_sclk_mpll", "mout_sclk_spll"}; @@ -633,6 +569,7 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = { }; static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = { + GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), }; @@ -1162,8 +1099,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(CLK_TMU, "tmu", "aclk66_psgen", GATE_IP_PERIS, 21, 0, 0), GATE(CLK_TMU_GPU, "tmu_gpu", "aclk66_psgen", GATE_IP_PERIS, 22, 0, 0), - GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), - /* GEN Block */ GATE(CLK_ROTATOR, "rotator", "mout_user_aclk266", GATE_IP_GEN, 1, 0, 0), GATE(CLK_JPEG, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0), @@ -1540,7 +1475,12 @@ static void __init exynos5x_clk_init(struct device_node *np, mout_kfc_p[0], mout_kfc_p[1], 0x28200, exynos5420_kfcclk_d, ARRAY_SIZE(exynos5420_kfcclk_d), 0); - exynos5420_clk_sleep_init(); + samsung_clk_extended_sleep_init(reg_base, + exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs), + exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc)); + if (soc == EXYNOS5800) + samsung_clk_sleep_init(reg_base, exynos5800_clk_regs, + ARRAY_SIZE(exynos5800_clk_regs)); exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), exynos5x_subcmus); diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 162de44df099..751e2c4fb65b 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -177,6 +177,17 @@ static const unsigned long top_clk_regs[] __initconst = { ENABLE_CMU_TOP_DIV_STAT, }; +static const struct samsung_clk_reg_dump top_suspend_regs[] = { + /* force all aclk clocks enabled */ + { ENABLE_ACLK_TOP, 0x67ecffed }, + /* force all sclk_uart clocks enabled */ + { ENABLE_SCLK_TOP_PERIC, 0x38 }, + /* ISP PLL has to be enabled for suspend: reset value + ENABLE bit */ + { ISP_PLL_CON0, 0x85cc0502 }, + /* ISP PLL has to be enabled for suspend: reset value + ENABLE bit */ + { AUD_PLL_CON0, 0x84830202 }, +}; + /* list of all parent clock list */ PNAME(mout_aud_pll_p) = { "oscclk", "fout_aud_pll", }; PNAME(mout_isp_pll_p) = { "oscclk", "fout_isp_pll", }; @@ -792,6 +803,8 @@ static const struct samsung_cmu_info top_cmu_info __initconst = { .nr_clk_ids = TOP_NR_CLK, .clk_regs = top_clk_regs, .nr_clk_regs = ARRAY_SIZE(top_clk_regs), + .suspend_regs = top_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(top_suspend_regs), }; static void __init exynos5433_cmu_top_init(struct device_node *np) @@ -822,6 +835,13 @@ static const unsigned long cpif_clk_regs[] __initconst = { ENABLE_SCLK_CPIF, }; +static const struct samsung_clk_reg_dump cpif_suspend_regs[] = { + /* force all sclk clocks enabled */ + { ENABLE_SCLK_CPIF, 0x3ff }, + /* MPHY PLL has to be enabled for suspend: reset value + ENABLE bit */ + { MPHY_PLL_CON0, 0x81c70601 }, +}; + /* list of all parent clock list */ PNAME(mout_mphy_pll_p) = { "oscclk", "fout_mphy_pll", }; @@ -862,6 +882,8 @@ static const struct samsung_cmu_info cpif_cmu_info __initconst = { .nr_clk_ids = CPIF_NR_CLK, .clk_regs = cpif_clk_regs, .nr_clk_regs = ARRAY_SIZE(cpif_clk_regs), + .suspend_regs = cpif_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(cpif_suspend_regs), }; static void __init exynos5433_cmu_cpif_init(struct device_node *np) @@ -1547,6 +1569,13 @@ static const unsigned long peric_clk_regs[] __initconst = { ENABLE_IP_PERIC2, }; +static const struct samsung_clk_reg_dump peric_suspend_regs[] = { + /* pclk: sci, pmu, sysreg, gpio_{finger, ese, touch, nfc}, uart2-0 */ + { ENABLE_PCLK_PERIC0, 0xe00ff000 }, + /* sclk: uart2-0 */ + { ENABLE_SCLK_PERIC, 0x7 }, +}; + static const struct samsung_div_clock peric_div_clks[] __initconst = { /* DIV_PERIC */ DIV(CLK_DIV_SCLK_SCI, "div_sclk_sci", "oscclk", DIV_PERIC, 4, 4), @@ -1705,6 +1734,8 @@ static const struct samsung_cmu_info peric_cmu_info __initconst = { .nr_clk_ids = PERIC_NR_CLK, .clk_regs = peric_clk_regs, .nr_clk_regs = ARRAY_SIZE(peric_clk_regs), + .suspend_regs = peric_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(peric_suspend_regs), }; static void __init exynos5433_cmu_peric_init(struct device_node *np) @@ -5630,7 +5661,7 @@ static const struct of_device_id exynos5433_cmu_of_match[] = { static const struct dev_pm_ops exynos5433_cmu_pm_ops = { SET_RUNTIME_PM_OPS(exynos5433_cmu_suspend, exynos5433_cmu_resume, NULL) - SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c index a9c887475054..8cb868f06257 100644 --- a/drivers/clk/samsung/clk-s3c2410.c +++ b/drivers/clk/samsung/clk-s3c2410.c @@ -11,7 +11,6 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/syscore_ops.h> #include <dt-bindings/clock/s3c2410.h> @@ -40,9 +39,6 @@ enum s3c2410_plls { static void __iomem *reg_base; -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *s3c2410_save; - /* * list of controller registers to be saved and restored during a * suspend/resume cycle. @@ -57,42 +53,6 @@ static unsigned long s3c2410_clk_regs[] __initdata = { CAMDIVN, }; -static int s3c2410_clk_suspend(void) -{ - samsung_clk_save(reg_base, s3c2410_save, - ARRAY_SIZE(s3c2410_clk_regs)); - - return 0; -} - -static void s3c2410_clk_resume(void) -{ - samsung_clk_restore(reg_base, s3c2410_save, - ARRAY_SIZE(s3c2410_clk_regs)); -} - -static struct syscore_ops s3c2410_clk_syscore_ops = { - .suspend = s3c2410_clk_suspend, - .resume = s3c2410_clk_resume, -}; - -static void __init s3c2410_clk_sleep_init(void) -{ - s3c2410_save = samsung_clk_alloc_reg_dump(s3c2410_clk_regs, - ARRAY_SIZE(s3c2410_clk_regs)); - if (!s3c2410_save) { - pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", - __func__); - return; - } - - register_syscore_ops(&s3c2410_clk_syscore_ops); - return; -} -#else -static void __init s3c2410_clk_sleep_init(void) {} -#endif - PNAME(fclk_p) = { "mpll", "div_slow" }; static struct samsung_mux_clock s3c2410_common_muxes[] __initdata = { @@ -461,7 +421,8 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f, ARRAY_SIZE(s3c244x_common_aliases)); } - s3c2410_clk_sleep_init(); + samsung_clk_sleep_init(reg_base, s3c2410_clk_regs, + ARRAY_SIZE(s3c2410_clk_regs)); samsung_clk_of_add_provider(np, ctx); } diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c index 6bc94d3aff78..dd1159050a5a 100644 --- a/drivers/clk/samsung/clk-s3c2412.c +++ b/drivers/clk/samsung/clk-s3c2412.c @@ -11,7 +11,6 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/syscore_ops.h> #include <linux/reboot.h> #include <dt-bindings/clock/s3c2412.h> @@ -29,9 +28,6 @@ static void __iomem *reg_base; -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *s3c2412_save; - /* * list of controller registers to be saved and restored during a * suspend/resume cycle. @@ -45,42 +41,6 @@ static unsigned long s3c2412_clk_regs[] __initdata = { CLKSRC, }; -static int s3c2412_clk_suspend(void) -{ - samsung_clk_save(reg_base, s3c2412_save, - ARRAY_SIZE(s3c2412_clk_regs)); - - return 0; -} - -static void s3c2412_clk_resume(void) -{ - samsung_clk_restore(reg_base, s3c2412_save, - ARRAY_SIZE(s3c2412_clk_regs)); -} - -static struct syscore_ops s3c2412_clk_syscore_ops = { - .suspend = s3c2412_clk_suspend, - .resume = s3c2412_clk_resume, -}; - -static void __init s3c2412_clk_sleep_init(void) -{ - s3c2412_save = samsung_clk_alloc_reg_dump(s3c2412_clk_regs, - ARRAY_SIZE(s3c2412_clk_regs)); - if (!s3c2412_save) { - pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", - __func__); - return; - } - - register_syscore_ops(&s3c2412_clk_syscore_ops); - return; -} -#else -static void __init s3c2412_clk_sleep_init(void) {} -#endif - static struct clk_div_table divxti_d[] = { { .val = 0, .div = 1 }, { .val = 1, .div = 2 }, @@ -278,7 +238,8 @@ void __init s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f, samsung_clk_register_alias(ctx, s3c2412_aliases, ARRAY_SIZE(s3c2412_aliases)); - s3c2412_clk_sleep_init(); + samsung_clk_sleep_init(reg_base, s3c2412_clk_regs, + ARRAY_SIZE(s3c2412_clk_regs)); samsung_clk_of_add_provider(np, ctx); diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c index c46e6d5bc9bc..884067e4f1a1 100644 --- a/drivers/clk/samsung/clk-s3c2443.c +++ b/drivers/clk/samsung/clk-s3c2443.c @@ -11,7 +11,6 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/syscore_ops.h> #include <linux/reboot.h> #include <dt-bindings/clock/s3c2443.h> @@ -43,9 +42,6 @@ enum supported_socs { static void __iomem *reg_base; -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *s3c2443_save; - /* * list of controller registers to be saved and restored during a * suspend/resume cycle. @@ -65,42 +61,6 @@ static unsigned long s3c2443_clk_regs[] __initdata = { SCLKCON, }; -static int s3c2443_clk_suspend(void) -{ - samsung_clk_save(reg_base, s3c2443_save, - ARRAY_SIZE(s3c2443_clk_regs)); - - return 0; -} - -static void s3c2443_clk_resume(void) -{ - samsung_clk_restore(reg_base, s3c2443_save, - ARRAY_SIZE(s3c2443_clk_regs)); -} - -static struct syscore_ops s3c2443_clk_syscore_ops = { - .suspend = s3c2443_clk_suspend, - .resume = s3c2443_clk_resume, -}; - -static void __init s3c2443_clk_sleep_init(void) -{ - s3c2443_save = samsung_clk_alloc_reg_dump(s3c2443_clk_regs, - ARRAY_SIZE(s3c2443_clk_regs)); - if (!s3c2443_save) { - pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", - __func__); - return; - } - - register_syscore_ops(&s3c2443_clk_syscore_ops); - return; -} -#else -static void __init s3c2443_clk_sleep_init(void) {} -#endif - PNAME(epllref_p) = { "mpllref", "mpllref", "xti", "ext" }; PNAME(esysclk_p) = { "epllref", "epll" }; PNAME(mpllref_p) = { "xti", "mdivclk" }; @@ -450,7 +410,8 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f, break; } - s3c2443_clk_sleep_init(); + samsung_clk_sleep_init(reg_base, s3c2443_clk_regs, + ARRAY_SIZE(s3c2443_clk_regs)); samsung_clk_of_add_provider(np, ctx); diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c index 6db01cf5ab83..54916c7bdb06 100644 --- a/drivers/clk/samsung/clk-s3c64xx.c +++ b/drivers/clk/samsung/clk-s3c64xx.c @@ -12,7 +12,6 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/syscore_ops.h> #include <dt-bindings/clock/samsung,s3c64xx-clock.h> @@ -59,10 +58,6 @@ static void __iomem *reg_base; static bool is_s3c6400; -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *s3c64xx_save_common; -static struct samsung_clk_reg_dump *s3c64xx_save_soc; - /* * List of controller registers to be saved and restored during * a suspend/resume cycle. @@ -89,60 +84,6 @@ static unsigned long s3c6410_clk_regs[] __initdata = { MEM0_GATE, }; -static int s3c64xx_clk_suspend(void) -{ - samsung_clk_save(reg_base, s3c64xx_save_common, - ARRAY_SIZE(s3c64xx_clk_regs)); - - if (!is_s3c6400) - samsung_clk_save(reg_base, s3c64xx_save_soc, - ARRAY_SIZE(s3c6410_clk_regs)); - - return 0; -} - -static void s3c64xx_clk_resume(void) -{ - samsung_clk_restore(reg_base, s3c64xx_save_common, - ARRAY_SIZE(s3c64xx_clk_regs)); - - if (!is_s3c6400) - samsung_clk_restore(reg_base, s3c64xx_save_soc, - ARRAY_SIZE(s3c6410_clk_regs)); -} - -static struct syscore_ops s3c64xx_clk_syscore_ops = { - .suspend = s3c64xx_clk_suspend, - .resume = s3c64xx_clk_resume, -}; - -static void __init s3c64xx_clk_sleep_init(void) -{ - s3c64xx_save_common = samsung_clk_alloc_reg_dump(s3c64xx_clk_regs, - ARRAY_SIZE(s3c64xx_clk_regs)); - if (!s3c64xx_save_common) - goto err_warn; - - if (!is_s3c6400) { - s3c64xx_save_soc = samsung_clk_alloc_reg_dump(s3c6410_clk_regs, - ARRAY_SIZE(s3c6410_clk_regs)); - if (!s3c64xx_save_soc) - goto err_soc; - } - - register_syscore_ops(&s3c64xx_clk_syscore_ops); - return; - -err_soc: - kfree(s3c64xx_save_common); -err_warn: - pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", - __func__); -} -#else -static void __init s3c64xx_clk_sleep_init(void) {} -#endif - /* List of parent clocks common for all S3C64xx SoCs. */ PNAME(spi_mmc_p) = { "mout_epll", "dout_mpll", "fin_pll", "clk27m" }; PNAME(uart_p) = { "mout_epll", "dout_mpll" }; @@ -508,7 +449,12 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, samsung_clk_register_alias(ctx, s3c64xx_clock_aliases, ARRAY_SIZE(s3c64xx_clock_aliases)); - s3c64xx_clk_sleep_init(); + + samsung_clk_sleep_init(reg_base, s3c64xx_clk_regs, + ARRAY_SIZE(s3c64xx_clk_regs)); + if (!is_s3c6400) + samsung_clk_sleep_init(reg_base, s3c6410_clk_regs, + ARRAY_SIZE(s3c6410_clk_regs)); samsung_clk_of_add_provider(np, ctx); diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c index fd2725710a6f..41d2337fe030 100644 --- a/drivers/clk/samsung/clk-s5pv210.c +++ b/drivers/clk/samsung/clk-s5pv210.c @@ -14,7 +14,6 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/syscore_ops.h> #include "clk.h" #include "clk-pll.h" @@ -83,9 +82,6 @@ enum { static void __iomem *reg_base; -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *s5pv210_clk_dump; - /* List of registers that need to be preserved across suspend/resume. */ static unsigned long s5pv210_clk_regs[] __initdata = { CLK_SRC0, @@ -132,40 +128,6 @@ static unsigned long s5pv210_clk_regs[] __initdata = { CLK_OUT, }; -static int s5pv210_clk_suspend(void) -{ - samsung_clk_save(reg_base, s5pv210_clk_dump, - ARRAY_SIZE(s5pv210_clk_regs)); - return 0; -} - -static void s5pv210_clk_resume(void) -{ - samsung_clk_restore(reg_base, s5pv210_clk_dump, - ARRAY_SIZE(s5pv210_clk_regs)); -} - -static struct syscore_ops s5pv210_clk_syscore_ops = { - .suspend = s5pv210_clk_suspend, - .resume = s5pv210_clk_resume, -}; - -static void s5pv210_clk_sleep_init(void) -{ - s5pv210_clk_dump = - samsung_clk_alloc_reg_dump(s5pv210_clk_regs, - ARRAY_SIZE(s5pv210_clk_regs)); - if (!s5pv210_clk_dump) { - pr_warn("%s: Failed to allocate sleep save data\n", __func__); - return; - } - - register_syscore_ops(&s5pv210_clk_syscore_ops); -} -#else -static inline void s5pv210_clk_sleep_init(void) { } -#endif - /* Mux parent lists. */ static const char *const fin_pll_p[] __initconst = { "xxti", @@ -822,7 +784,8 @@ static void __init __s5pv210_clk_init(struct device_node *np, samsung_clk_register_alias(ctx, s5pv210_aliases, ARRAY_SIZE(s5pv210_aliases)); - s5pv210_clk_sleep_init(); + samsung_clk_sleep_init(reg_base, s5pv210_clk_regs, + ARRAY_SIZE(s5pv210_clk_regs)); samsung_clk_of_add_provider(np, ctx); diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index 8634884aa11c..1f6e47cd327d 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -290,9 +290,12 @@ static int samsung_clk_suspend(void) { struct samsung_clock_reg_cache *reg_cache; - list_for_each_entry(reg_cache, &clock_reg_cache_list, node) + list_for_each_entry(reg_cache, &clock_reg_cache_list, node) { samsung_clk_save(reg_cache->reg_base, reg_cache->rdump, reg_cache->rd_num); + samsung_clk_restore(reg_cache->reg_base, reg_cache->rsuspend, + reg_cache->rsuspend_num); + } return 0; } @@ -310,9 +313,11 @@ static struct syscore_ops samsung_clk_syscore_ops = { .resume = samsung_clk_resume, }; -void samsung_clk_sleep_init(void __iomem *reg_base, +void samsung_clk_extended_sleep_init(void __iomem *reg_base, const unsigned long *rdump, - unsigned long nr_rdump) + unsigned long nr_rdump, + const struct samsung_clk_reg_dump *rsuspend, + unsigned long nr_rsuspend) { struct samsung_clock_reg_cache *reg_cache; @@ -330,13 +335,10 @@ void samsung_clk_sleep_init(void __iomem *reg_base, reg_cache->reg_base = reg_base; reg_cache->rd_num = nr_rdump; + reg_cache->rsuspend = rsuspend; + reg_cache->rsuspend_num = nr_rsuspend; list_add_tail(®_cache->node, &clock_reg_cache_list); } - -#else -void samsung_clk_sleep_init(void __iomem *reg_base, - const unsigned long *rdump, - unsigned long nr_rdump) {} #endif /* @@ -380,8 +382,9 @@ struct samsung_clk_provider * __init samsung_cmu_register_one( samsung_clk_register_fixed_factor(ctx, cmu->fixed_factor_clks, cmu->nr_fixed_factor_clks); if (cmu->clk_regs) - samsung_clk_sleep_init(reg_base, cmu->clk_regs, - cmu->nr_clk_regs); + samsung_clk_extended_sleep_init(reg_base, + cmu->clk_regs, cmu->nr_clk_regs, + cmu->suspend_regs, cmu->nr_suspend_regs); samsung_clk_of_add_provider(np, ctx); diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h index 3880d2f9d582..c3f309d7100d 100644 --- a/drivers/clk/samsung/clk.h +++ b/drivers/clk/samsung/clk.h @@ -279,6 +279,8 @@ struct samsung_clock_reg_cache { void __iomem *reg_base; struct samsung_clk_reg_dump *rdump; unsigned int rd_num; + const struct samsung_clk_reg_dump *rsuspend; + unsigned int rsuspend_num; }; struct samsung_cmu_info { @@ -358,9 +360,21 @@ extern struct samsung_clk_provider __init *samsung_cmu_register_one( extern unsigned long _get_rate(const char *clk_name); -extern void samsung_clk_sleep_init(void __iomem *reg_base, +#ifdef CONFIG_PM_SLEEP +extern void samsung_clk_extended_sleep_init(void __iomem *reg_base, const unsigned long *rdump, - unsigned long nr_rdump); + unsigned long nr_rdump, + const struct samsung_clk_reg_dump *rsuspend, + unsigned long nr_rsuspend); +#else +static inline void samsung_clk_extended_sleep_init(void __iomem *reg_base, + const unsigned long *rdump, + unsigned long nr_rdump, + const struct samsung_clk_reg_dump *rsuspend, + unsigned long nr_rsuspend) {} +#endif +#define samsung_clk_sleep_init(reg_base, rdump, nr_rdump) \ + samsung_clk_extended_sleep_init(reg_base, rdump, nr_rdump, NULL, 0) extern void samsung_clk_save(void __iomem *base, struct samsung_clk_reg_dump *rd, diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index a79d81985c4e..cfa000007622 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -936,7 +936,7 @@ static void __init st_of_quadfs_setup(struct device_node *np, if (!clk_parent_name) return; - pll_name = kasprintf(GFP_KERNEL, "%s.pll", np->name); + pll_name = kasprintf(GFP_KERNEL, "%pOFn.pll", np); if (!pll_name) return; diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c index ee9c12cf3f08..5f80eb018014 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c @@ -64,17 +64,19 @@ static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base", BIT(28), /* lock */ CLK_SET_RATE_UNGATE); -static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0", - "osc24M", 0x010, - 8, 7, /* N */ - 0, 4, /* M */ - BIT(24), /* frac enable */ - BIT(25), /* frac select */ - 270000000, /* frac rate 0 */ - 297000000, /* frac rate 1 */ - BIT(31), /* gate */ - BIT(28), /* lock */ - CLK_SET_RATE_UNGATE); +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video0_clk, "pll-video0", + "osc24M", 0x010, + 192000000, /* Minimum rate */ + 1008000000, /* Maximum rate */ + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve", "osc24M", 0x018, @@ -125,17 +127,19 @@ static struct ccu_nk pll_periph1_clk = { }, }; -static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video1_clk, "pll-video1", - "osc24M", 0x030, - 8, 7, /* N */ - 0, 4, /* M */ - BIT(24), /* frac enable */ - BIT(25), /* frac select */ - 270000000, /* frac rate 0 */ - 297000000, /* frac rate 1 */ - BIT(31), /* gate */ - BIT(28), /* lock */ - CLK_SET_RATE_UNGATE); +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video1_clk, "pll-video1", + "osc24M", 0x030, + 192000000, /* Minimum rate */ + 1008000000, /* Maximum rate */ + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu", "osc24M", 0x038, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h index 061b6fbb4f95..cd415b968e8c 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h @@ -27,7 +27,9 @@ #define CLK_PLL_AUDIO_2X 4 #define CLK_PLL_AUDIO_4X 5 #define CLK_PLL_AUDIO_8X 6 -#define CLK_PLL_VIDEO0 7 + +/* PLL_VIDEO0 exported for HDMI PHY */ + #define CLK_PLL_VIDEO0_2X 8 #define CLK_PLL_VE 9 #define CLK_PLL_DDR0 10 diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index bdbfe78fe133..2193e1495086 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -224,7 +224,7 @@ static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2", psi_ahb1_ahb2_parents, 0x510, 0, 5, /* M */ - 16, 2, /* P */ + 8, 2, /* P */ 24, 2, /* mux */ 0); @@ -233,19 +233,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k", "pll-periph0" }; static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c, 0, 5, /* M */ - 16, 2, /* P */ + 8, 2, /* P */ 24, 2, /* mux */ 0); static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520, 0, 5, /* M */ - 16, 2, /* P */ + 8, 2, /* P */ 24, 2, /* mux */ 0); static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524, 0, 5, /* M */ - 16, 2, /* P */ + 8, 2, /* P */ 24, 2, /* mux */ 0); @@ -352,7 +352,7 @@ static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "psi-ahb1-ahb2", static SUNXI_CCU_GATE(bus_psi_clk, "bus-psi", "psi-ahb1-ahb2", 0x79c, BIT(0), 0); -static SUNXI_CCU_GATE(bus_pwm_clk, "bus-pwm", "apb1", 0x79c, BIT(0), 0); +static SUNXI_CCU_GATE(bus_pwm_clk, "bus-pwm", "apb1", 0x7ac, BIT(0), 0); static SUNXI_CCU_GATE(bus_iommu_clk, "bus-iommu", "apb1", 0x7bc, BIT(0), 0); @@ -408,26 +408,29 @@ static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb3", 0x82c, BIT(0), 0); static const char * const mmc_parents[] = { "osc24M", "pll-periph0-2x", "pll-periph1-2x" }; -static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc_parents, 0x830, - 0, 4, /* M */ - 8, 2, /* N */ - 24, 3, /* mux */ - BIT(31),/* gate */ - 0); - -static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc_parents, 0x834, - 0, 4, /* M */ - 8, 2, /* N */ - 24, 3, /* mux */ - BIT(31),/* gate */ - 0); - -static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc_parents, 0x838, - 0, 4, /* M */ - 8, 2, /* N */ - 24, 3, /* mux */ - BIT(31),/* gate */ - 0); +static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830, + 0, 4, /* M */ + 8, 2, /* N */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834, + 0, 4, /* M */ + 8, 2, /* N */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); + +static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838, + 0, 4, /* M */ + 8, 2, /* N */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0); static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c index 7d08015b980d..2d6555d73170 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c @@ -108,6 +108,7 @@ static struct ccu_nkmp pll_video0_clk = { .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), .m = _SUNXI_CCU_DIV(16, 1), /* input divider */ .p = _SUNXI_CCU_DIV(0, 2), /* output divider */ + .max_rate = 3000000000UL, .common = { .reg = 0x010, .lock_reg = CCU_SUN8I_A83T_LOCK_REG, @@ -220,6 +221,7 @@ static struct ccu_nkmp pll_video1_clk = { .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), .m = _SUNXI_CCU_DIV(16, 1), /* input divider */ .p = _SUNXI_CCU_DIV(0, 2), /* external divider p */ + .max_rate = 3000000000UL, .common = { .reg = 0x04c, .lock_reg = CCU_SUN8I_A83T_LOCK_REG, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c index 77ed0b0ba681..eb5c608428fa 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c @@ -69,18 +69,19 @@ static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base", BIT(28), /* lock */ CLK_SET_RATE_UNGATE); -static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video_clk, "pll-video", - "osc24M", 0x0010, - 192000000, /* Minimum rate */ - 8, 7, /* N */ - 0, 4, /* M */ - BIT(24), /* frac enable */ - BIT(25), /* frac select */ - 270000000, /* frac rate 0 */ - 297000000, /* frac rate 1 */ - BIT(31), /* gate */ - BIT(28), /* lock */ - CLK_SET_RATE_UNGATE); +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video_clk, "pll-video", + "osc24M", 0x0010, + 192000000, /* Minimum rate */ + 912000000, /* Maximum rate */ + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve", "osc24M", 0x0018, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c index 0f388f6944d5..582ebd41d20d 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c @@ -65,19 +65,19 @@ static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base", BIT(28), /* lock */ CLK_SET_RATE_UNGATE); -/* TODO: The result of N/M is required to be in [8, 25] range. */ -static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video0_clk, "pll-video0", - "osc24M", 0x0010, - 192000000, /* Minimum rate */ - 8, 7, /* N */ - 0, 4, /* M */ - BIT(24), /* frac enable */ - BIT(25), /* frac select */ - 270000000, /* frac rate 0 */ - 297000000, /* frac rate 1 */ - BIT(31), /* gate */ - BIT(28), /* lock */ - CLK_SET_RATE_UNGATE); +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video0_clk, "pll-video0", + "osc24M", 0x0010, + 192000000, /* Minimum rate */ + 1008000000, /* Maximum rate */ + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); /* TODO: The result of N/M is required to be in [8, 25] range. */ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve", @@ -152,19 +152,19 @@ static struct ccu_nk pll_periph1_clk = { }, }; -/* TODO: The result of N/M is required to be in [8, 25] range. */ -static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video1_clk, "pll-video1", - "osc24M", 0x030, - 192000000, /* Minimum rate */ - 8, 7, /* N */ - 0, 4, /* M */ - BIT(24), /* frac enable */ - BIT(25), /* frac select */ - 270000000, /* frac rate 0 */ - 297000000, /* frac rate 1 */ - BIT(31), /* gate */ - BIT(28), /* lock */ - CLK_SET_RATE_UNGATE); +static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video1_clk, "pll-video1", + "osc24M", 0x030, + 192000000, /* Minimum rate */ + 1008000000, /* Maximum rate */ + 8, 7, /* N */ + 0, 4, /* M */ + BIT(24), /* frac enable */ + BIT(25), /* frac select */ + 270000000, /* frac rate 0 */ + 297000000, /* frac rate 1 */ + BIT(31), /* gate */ + BIT(28), /* lock */ + CLK_SET_RATE_UNGATE); static struct ccu_nkm pll_sata_clk = { .enable = BIT(31), diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c index ebd9436d2c7c..9b49adb20d07 100644 --- a/drivers/clk/sunxi-ng/ccu_nkmp.c +++ b/drivers/clk/sunxi-ng/ccu_nkmp.c @@ -137,6 +137,13 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV) rate *= nkmp->fixed_post_div; + if (nkmp->max_rate && rate > nkmp->max_rate) { + rate = nkmp->max_rate; + if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate /= nkmp->fixed_post_div; + return rate; + } + _nkmp.min_n = nkmp->n.min ?: 1; _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width; _nkmp.min_k = nkmp->k.min ?: 1; diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.h b/drivers/clk/sunxi-ng/ccu_nkmp.h index 6940503e7fc4..a9f8c116a745 100644 --- a/drivers/clk/sunxi-ng/ccu_nkmp.h +++ b/drivers/clk/sunxi-ng/ccu_nkmp.h @@ -35,6 +35,7 @@ struct ccu_nkmp { struct ccu_div_internal p; unsigned int fixed_post_div; + unsigned int max_rate; struct ccu_common common; }; diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c index 4e2073307f34..6fe3c14f7b2d 100644 --- a/drivers/clk/sunxi-ng/ccu_nm.c +++ b/drivers/clk/sunxi-ng/ccu_nm.c @@ -124,6 +124,13 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate, return rate; } + if (nm->max_rate && rate > nm->max_rate) { + rate = nm->max_rate; + if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV) + rate /= nm->fixed_post_div; + return rate; + } + if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) { if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV) rate /= nm->fixed_post_div; diff --git a/drivers/clk/sunxi-ng/ccu_nm.h b/drivers/clk/sunxi-ng/ccu_nm.h index 1d8b459c50b7..de232f2199a6 100644 --- a/drivers/clk/sunxi-ng/ccu_nm.h +++ b/drivers/clk/sunxi-ng/ccu_nm.h @@ -38,6 +38,7 @@ struct ccu_nm { unsigned int fixed_post_div; unsigned int min_rate; + unsigned int max_rate; struct ccu_common common; }; @@ -115,6 +116,35 @@ struct ccu_nm { }, \ } +#define SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(_struct, _name, \ + _parent, _reg, \ + _min_rate, _max_rate, \ + _nshift, _nwidth, \ + _mshift, _mwidth, \ + _frac_en, _frac_sel, \ + _frac_rate_0, \ + _frac_rate_1, \ + _gate, _lock, _flags) \ + struct ccu_nm _struct = { \ + .enable = _gate, \ + .lock = _lock, \ + .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ + .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .frac = _SUNXI_CCU_FRAC(_frac_en, _frac_sel, \ + _frac_rate_0, \ + _frac_rate_1), \ + .min_rate = _min_rate, \ + .max_rate = _max_rate, \ + .common = { \ + .reg = _reg, \ + .features = CCU_FEATURE_FRACTIONAL, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &ccu_nm_ops, \ + _flags), \ + }, \ + } + #define SUNXI_CCU_NM_WITH_GATE_LOCK(_struct, _name, _parent, _reg, \ _nshift, _nwidth, \ _mshift, _mwidth, \ diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c index a27c264cc9b4..fc0278a1acc7 100644 --- a/drivers/clk/sunxi/clk-mod0.c +++ b/drivers/clk/sunxi/clk-mod0.c @@ -140,8 +140,8 @@ static void __init sun9i_a80_mod0_setup(struct device_node *node) reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("Could not get registers for mod0-clk: %s\n", - node->name); + pr_err("Could not get registers for mod0-clk: %pOFn\n", + node); return; } @@ -306,7 +306,7 @@ static void __init sunxi_mmc_setup(struct device_node *node, reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("Couldn't map the %s clock registers\n", node->name); + pr_err("Couldn't map the %pOFn clock registers\n", node); return; } diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c index e9295c286d5d..7e21b2b10c94 100644 --- a/drivers/clk/sunxi/clk-sun9i-core.c +++ b/drivers/clk/sunxi/clk-sun9i-core.c @@ -88,8 +88,8 @@ static void __init sun9i_a80_pll4_setup(struct device_node *node) reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("Could not get registers for a80-pll4-clk: %s\n", - node->name); + pr_err("Could not get registers for a80-pll4-clk: %pOFn\n", + node); return; } @@ -142,8 +142,8 @@ static void __init sun9i_a80_gt_setup(struct device_node *node) reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("Could not get registers for a80-gt-clk: %s\n", - node->name); + pr_err("Could not get registers for a80-gt-clk: %pOFn\n", + node); return; } @@ -197,8 +197,8 @@ static void __init sun9i_a80_ahb_setup(struct device_node *node) reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("Could not get registers for a80-ahb-clk: %s\n", - node->name); + pr_err("Could not get registers for a80-ahb-clk: %pOFn\n", + node); return; } @@ -223,8 +223,8 @@ static void __init sun9i_a80_apb0_setup(struct device_node *node) reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("Could not get registers for a80-apb0-clk: %s\n", - node->name); + pr_err("Could not get registers for a80-apb0-clk: %pOFn\n", + node); return; } @@ -280,8 +280,8 @@ static void __init sun9i_a80_apb1_setup(struct device_node *node) reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { - pr_err("Could not get registers for a80-apb1-clk: %s\n", - node->name); + pr_err("Could not get registers for a80-apb1-clk: %pOFn\n", + node); return; } diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 012714d94b42..892c29030b7b 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -568,8 +568,8 @@ static struct clk * __init sunxi_factors_clk_setup(struct device_node *node, reg = of_iomap(node, 0); if (!reg) { - pr_err("Could not get registers for factors-clk: %s\n", - node->name); + pr_err("Could not get registers for factors-clk: %pOFn\n", + node); return NULL; } diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c index 48ee43734e05..ebb0e1b6bf01 100644 --- a/drivers/clk/tegra/clk-dfll.c +++ b/drivers/clk/tegra/clk-dfll.c @@ -1609,8 +1609,12 @@ int tegra_dfll_register(struct platform_device *pdev, td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu"); if (IS_ERR(td->vdd_reg)) { - dev_err(td->dev, "couldn't get vdd_cpu regulator\n"); - return PTR_ERR(td->vdd_reg); + ret = PTR_ERR(td->vdd_reg); + if (ret != -EPROBE_DEFER) + dev_err(td->dev, "couldn't get vdd_cpu regulator: %d\n", + ret); + + return ret; } td->dvco_rst = devm_reset_control_get(td->dev, "dvco"); diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 9eb1cb14fce1..88f1943bd2b5 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -27,6 +27,7 @@ #include <dt-bindings/clock/tegra210-car.h> #include <dt-bindings/reset/tegra210-car.h> #include <linux/iopoll.h> +#include <linux/sizes.h> #include <soc/tegra/pmc.h> #include "clk.h" @@ -2603,7 +2604,7 @@ static struct tegra210_domain_mbist_war tegra210_pg_mbist_war[] = { [TEGRA_POWERGATE_MPE] = { .handle_lvl2_ovr = tegra210_generic_mbist_war, .lvl2_offset = LVL2_CLK_GATE_OVRE, - .lvl2_mask = BIT(2), + .lvl2_mask = BIT(29), }, [TEGRA_POWERGATE_SOR] = { .handle_lvl2_ovr = tegra210_generic_mbist_war, @@ -2654,14 +2655,14 @@ static struct tegra210_domain_mbist_war tegra210_pg_mbist_war[] = { .num_clks = ARRAY_SIZE(nvdec_slcg_clkids), .clk_init_data = nvdec_slcg_clkids, .handle_lvl2_ovr = tegra210_generic_mbist_war, - .lvl2_offset = LVL2_CLK_GATE_OVRC, + .lvl2_offset = LVL2_CLK_GATE_OVRE, .lvl2_mask = BIT(9) | BIT(31), }, [TEGRA_POWERGATE_NVJPG] = { .num_clks = ARRAY_SIZE(nvjpg_slcg_clkids), .clk_init_data = nvjpg_slcg_clkids, .handle_lvl2_ovr = tegra210_generic_mbist_war, - .lvl2_offset = LVL2_CLK_GATE_OVRC, + .lvl2_offset = LVL2_CLK_GATE_OVRE, .lvl2_mask = BIT(9) | BIT(31), }, [TEGRA_POWERGATE_AUD] = { diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile index 5ab295d2a3cb..5ca1e39dd88a 100644 --- a/drivers/clk/ti/Makefile +++ b/drivers/clk/ti/Makefile @@ -6,7 +6,8 @@ clk-common = dpll.o composite.o divider.o gate.o \ fixed-factor.o mux.o apll.o \ clkt_dpll.o clkt_iclk.o clkt_dflt.o \ clkctrl.o -obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o dpll3xxx.o +obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o dpll3xxx.o \ + clk-33xx-compat.o obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o clk-814x.o clk-816x.o obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o \ @@ -16,8 +17,10 @@ obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o \ obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o \ dpll3xxx.o dpll44xx.o obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \ - clk-dra7-atl.o dpll3xxx.o dpll44xx.o -obj-$(CONFIG_SOC_AM43XX) += $(clk-common) dpll3xxx.o clk-43xx.o + clk-dra7-atl.o dpll3xxx.o \ + dpll44xx.o clk-7xx-compat.o +obj-$(CONFIG_SOC_AM43XX) += $(clk-common) dpll3xxx.o clk-43xx.o \ + clk-43xx-compat.o endif # CONFIG_ARCH_OMAP2PLUS diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c index 61c126a5d26a..222f68bc3f2a 100644 --- a/drivers/clk/ti/apll.c +++ b/drivers/clk/ti/apll.c @@ -143,8 +143,8 @@ static void __init omap_clk_register_apll(void *user, clk = of_clk_get(node, 0); if (IS_ERR(clk)) { - pr_debug("clk-ref for %s not ready, retry\n", - node->name); + pr_debug("clk-ref for %pOFn not ready, retry\n", + node); if (!ti_clk_retry_init(node, hw, omap_clk_register_apll)) return; @@ -155,8 +155,8 @@ static void __init omap_clk_register_apll(void *user, clk = of_clk_get(node, 1); if (IS_ERR(clk)) { - pr_debug("clk-bypass for %s not ready, retry\n", - node->name); + pr_debug("clk-bypass for %pOFn not ready, retry\n", + node); if (!ti_clk_retry_init(node, hw, omap_clk_register_apll)) return; @@ -202,7 +202,7 @@ static void __init of_dra7_apll_setup(struct device_node *node) init->num_parents = of_clk_get_parent_count(node); if (init->num_parents < 1) { - pr_err("dra7 apll %s must have parent(s)\n", node->name); + pr_err("dra7 apll %pOFn must have parent(s)\n", node); goto cleanup; } @@ -366,7 +366,7 @@ static void __init of_omap2_apll_setup(struct device_node *node) init->num_parents = of_clk_get_parent_count(node); if (init->num_parents != 1) { - pr_err("%s must have one parent\n", node->name); + pr_err("%pOFn must have one parent\n", node); goto cleanup; } @@ -374,13 +374,13 @@ static void __init of_omap2_apll_setup(struct device_node *node) init->parent_names = &parent_name; if (of_property_read_u32(node, "ti,clock-frequency", &val)) { - pr_err("%s missing clock-frequency\n", node->name); + pr_err("%pOFn missing clock-frequency\n", node); goto cleanup; } clk_hw->fixed_rate = val; if (of_property_read_u32(node, "ti,bit-shift", &val)) { - pr_err("%s missing bit-shift\n", node->name); + pr_err("%pOFn missing bit-shift\n", node); goto cleanup; } @@ -389,7 +389,7 @@ static void __init of_omap2_apll_setup(struct device_node *node) ad->autoidle_mask = 0x3 << val; if (of_property_read_u32(node, "ti,idlest-shift", &val)) { - pr_err("%s missing idlest-shift\n", node->name); + pr_err("%pOFn missing idlest-shift\n", node); goto cleanup; } diff --git a/drivers/clk/ti/clk-33xx-compat.c b/drivers/clk/ti/clk-33xx-compat.c new file mode 100644 index 000000000000..3e07f127912a --- /dev/null +++ b/drivers/clk/ti/clk-33xx-compat.c @@ -0,0 +1,218 @@ +/* + * AM33XX Clock init + * + * Copyright (C) 2013 Texas Instruments, Inc + * Tero Kristo (t-kristo@ti.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/clk/ti.h> +#include <dt-bindings/clock/am3.h> + +#include "clock.h" + +static const char * const am3_gpio1_dbclk_parents[] __initconst = { + "l4_per_cm:clk:0138:0", + NULL, +}; + +static const struct omap_clkctrl_bit_data am3_gpio2_bit_data[] __initconst = { + { 18, TI_CLK_GATE, am3_gpio1_dbclk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data am3_gpio3_bit_data[] __initconst = { + { 18, TI_CLK_GATE, am3_gpio1_dbclk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data am3_gpio4_bit_data[] __initconst = { + { 18, TI_CLK_GATE, am3_gpio1_dbclk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = { + { AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, + { AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk", "lcdc_clkdm" }, + { AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" }, + { AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" }, + { AM3_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM3_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" }, + { AM3_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck", "l3s_clkdm" }, + { AM3_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, + { AM3_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck", "l3s_clkdm" }, + { AM3_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" }, + { AM3_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" }, + { AM3_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" }, + { AM3_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" }, + { AM3_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" }, + { AM3_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck", "l3_clkdm" }, + { AM3_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM3_GPIO2_CLKCTRL, am3_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_GPIO3_CLKCTRL, am3_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_GPIO4_CLKCTRL, am3_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM3_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" }, + { AM3_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" }, + { AM3_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM3_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM3_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk", "pruss_ocp_clkdm" }, + { AM3_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" }, + { AM3_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" }, + { AM3_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, + { AM3_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk", "l3s_clkdm" }, + { AM3_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM3_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM3_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk", "l4hs_clkdm" }, + { AM3_OCPWP_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_CLKDIV32K_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ck", "clk_24mhz_clkdm" }, + { 0 }, +}; + +static const char * const am3_gpio0_dbclk_parents[] __initconst = { + "gpio0_dbclk_mux_ck", + NULL, +}; + +static const struct omap_clkctrl_bit_data am3_gpio1_bit_data[] __initconst = { + { 18, TI_CLK_GATE, am3_gpio0_dbclk_parents, NULL }, + { 0 }, +}; + +static const char * const am3_dbg_sysclk_ck_parents[] __initconst = { + "sys_clkin_ck", + NULL, +}; + +static const char * const am3_trace_pmd_clk_mux_ck_parents[] __initconst = { + "l4_wkup_cm:clk:0010:19", + "l4_wkup_cm:clk:0010:30", + NULL, +}; + +static const char * const am3_trace_clk_div_ck_parents[] __initconst = { + "l4_wkup_cm:clk:0010:20", + NULL, +}; + +static const struct omap_clkctrl_div_data am3_trace_clk_div_ck_data __initconst = { + .max_div = 64, + .flags = CLK_DIVIDER_POWER_OF_TWO, +}; + +static const char * const am3_stm_clk_div_ck_parents[] __initconst = { + "l4_wkup_cm:clk:0010:22", + NULL, +}; + +static const struct omap_clkctrl_div_data am3_stm_clk_div_ck_data __initconst = { + .max_div = 64, + .flags = CLK_DIVIDER_POWER_OF_TWO, +}; + +static const char * const am3_dbg_clka_ck_parents[] __initconst = { + "dpll_core_m4_ck", + NULL, +}; + +static const struct omap_clkctrl_bit_data am3_debugss_bit_data[] __initconst = { + { 19, TI_CLK_GATE, am3_dbg_sysclk_ck_parents, NULL }, + { 20, TI_CLK_MUX, am3_trace_pmd_clk_mux_ck_parents, NULL }, + { 22, TI_CLK_MUX, am3_trace_pmd_clk_mux_ck_parents, NULL }, + { 24, TI_CLK_DIVIDER, am3_trace_clk_div_ck_parents, &am3_trace_clk_div_ck_data }, + { 27, TI_CLK_DIVIDER, am3_stm_clk_div_ck_parents, &am3_stm_clk_div_ck_data }, + { 30, TI_CLK_GATE, am3_dbg_clka_ck_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_l4_wkup_clkctrl_regs[] __initconst = { + { AM3_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" }, + { AM3_GPIO1_CLKCTRL, am3_gpio1_bit_data, CLKF_SW_SUP, "dpll_core_m4_div2_ck" }, + { AM3_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" }, + { AM3_DEBUGSS_CLKCTRL, am3_debugss_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0010:24", "l3_aon_clkdm" }, + { AM3_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "dpll_core_m4_div2_ck", "l4_wkup_aon_clkdm" }, + { AM3_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" }, + { AM3_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" }, + { AM3_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck" }, + { AM3_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck" }, + { AM3_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck" }, + { AM3_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck" }, + { AM3_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = { + { AM3_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = { + { AM3_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_gfx_l3_clkctrl_regs[] __initconst = { + { AM3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_l4_cefuse_clkctrl_regs[] __initconst = { + { AM3_CEFUSE_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" }, + { 0 }, +}; + +const struct omap_clkctrl_data am3_clkctrl_compat_data[] __initconst = { + { 0x44e00014, am3_l4_per_clkctrl_regs }, + { 0x44e00404, am3_l4_wkup_clkctrl_regs }, + { 0x44e00604, am3_mpu_clkctrl_regs }, + { 0x44e00800, am3_l4_rtc_clkctrl_regs }, + { 0x44e00904, am3_gfx_l3_clkctrl_regs }, + { 0x44e00a20, am3_l4_cefuse_clkctrl_regs }, + { 0 }, +}; + +struct ti_dt_clk am33xx_compat_clks[] = { + DT_CLK(NULL, "timer_32k_ck", "l4_per_cm:0138:0"), + DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"), + DT_CLK(NULL, "clkdiv32k_ick", "l4_per_cm:0138:0"), + DT_CLK(NULL, "dbg_clka_ck", "l4_wkup_cm:0010:30"), + DT_CLK(NULL, "dbg_sysclk_ck", "l4_wkup_cm:0010:19"), + DT_CLK(NULL, "gpio0_dbclk", "l4_wkup_cm:0004:18"), + DT_CLK(NULL, "gpio1_dbclk", "l4_per_cm:0098:18"), + DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:009c:18"), + DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:00a0:18"), + DT_CLK(NULL, "stm_clk_div_ck", "l4_wkup_cm:0010:27"), + DT_CLK(NULL, "stm_pmd_clock_mux_ck", "l4_wkup_cm:0010:22"), + DT_CLK(NULL, "trace_clk_div_ck", "l4_wkup_cm:0010:24"), + DT_CLK(NULL, "trace_pmd_clk_mux_ck", "l4_wkup_cm:0010:20"), + { .node_name = NULL }, +}; diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c index 12e0a2d19911..a360d3109555 100644 --- a/drivers/clk/ti/clk-33xx.c +++ b/drivers/clk/ti/clk-33xx.c @@ -24,7 +24,7 @@ #include "clock.h" static const char * const am3_gpio1_dbclk_parents[] __initconst = { - "l4_per_cm:clk:0138:0", + "clk-24mhz-clkctrl:0000:0", NULL, }; @@ -43,58 +43,86 @@ static const struct omap_clkctrl_bit_data am3_gpio4_bit_data[] __initconst = { { 0 }, }; -static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = { - { AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, - { AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk", "lcdc_clkdm" }, - { AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" }, - { AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" }, - { AM3_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM3_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" }, - { AM3_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck", "l3s_clkdm" }, - { AM3_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM3_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, - { AM3_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM3_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM3_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM3_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM3_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM3_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM3_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck", "l3s_clkdm" }, - { AM3_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM3_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM3_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM3_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM3_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" }, - { AM3_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" }, - { AM3_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" }, - { AM3_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" }, - { AM3_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" }, - { AM3_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck", "l3_clkdm" }, - { AM3_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM3_GPIO2_CLKCTRL, am3_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, - { AM3_GPIO3_CLKCTRL, am3_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, - { AM3_GPIO4_CLKCTRL, am3_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, - { AM3_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM3_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" }, - { AM3_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" }, - { AM3_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM3_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM3_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM3_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM3_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM3_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk", "pruss_ocp_clkdm" }, - { AM3_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" }, - { AM3_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" }, - { AM3_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, - { AM3_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk", "l3s_clkdm" }, - { AM3_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM3_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM3_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM3_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM3_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk", "l4hs_clkdm" }, - { AM3_OCPWP_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM3_CLKDIV32K_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ck", "clk_24mhz_clkdm" }, +static const struct omap_clkctrl_reg_data am3_l4ls_clkctrl_regs[] __initconst = { + { AM3_L4LS_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_L4LS_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, + { AM3_L4LS_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_L4LS_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_L4LS_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_L4LS_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_L4LS_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_L4LS_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_L4LS_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_L4LS_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_L4LS_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_L4LS_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM3_L4LS_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" }, + { AM3_L4LS_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" }, + { AM3_L4LS_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" }, + { AM3_L4LS_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" }, + { AM3_L4LS_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" }, + { AM3_L4LS_GPIO2_CLKCTRL, am3_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_L4LS_GPIO3_CLKCTRL, am3_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_L4LS_GPIO4_CLKCTRL, am3_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_L4LS_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" }, + { AM3_L4LS_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" }, + { AM3_L4LS_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_L4LS_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_L4LS_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_L4LS_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" }, + { AM3_L4LS_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" }, + { AM3_L4LS_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, + { AM3_L4LS_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_L4LS_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM3_L4LS_OCPWP_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_l3s_clkctrl_regs[] __initconst = { + { AM3_L3S_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck" }, + { AM3_L3S_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk" }, + { AM3_L3S_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck" }, + { AM3_L3S_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck" }, + { AM3_L3S_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_l3_clkctrl_regs[] __initconst = { + { AM3_L3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM3_L3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck" }, + { AM3_L3_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM3_L3_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck" }, + { AM3_L3_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM3_L3_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM3_L3_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM3_L3_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM3_L3_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM3_L3_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_l4hs_clkctrl_regs[] __initconst = { + { AM3_L4HS_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_pruss_ocp_clkctrl_regs[] __initconst = { + { AM3_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_cpsw_125mhz_clkctrl_regs[] __initconst = { + { AM3_CPSW_125MHZ_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_lcdc_clkctrl_regs[] __initconst = { + { AM3_LCDC_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_clk_24mhz_clkctrl_regs[] __initconst = { + { AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ck" }, { 0 }, }; @@ -108,19 +136,33 @@ static const struct omap_clkctrl_bit_data am3_gpio1_bit_data[] __initconst = { { 0 }, }; +static const struct omap_clkctrl_reg_data am3_l4_wkup_clkctrl_regs[] __initconst = { + { AM3_L4_WKUP_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" }, + { AM3_L4_WKUP_GPIO1_CLKCTRL, am3_gpio1_bit_data, CLKF_SW_SUP, "dpll_core_m4_div2_ck" }, + { AM3_L4_WKUP_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" }, + { AM3_L4_WKUP_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" }, + { AM3_L4_WKUP_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" }, + { AM3_L4_WKUP_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck" }, + { AM3_L4_WKUP_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck" }, + { AM3_L4_WKUP_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck" }, + { AM3_L4_WKUP_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck" }, + { AM3_L4_WKUP_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck" }, + { 0 }, +}; + static const char * const am3_dbg_sysclk_ck_parents[] __initconst = { "sys_clkin_ck", NULL, }; static const char * const am3_trace_pmd_clk_mux_ck_parents[] __initconst = { - "l4_wkup_cm:clk:0010:19", - "l4_wkup_cm:clk:0010:30", + "l3-aon-clkctrl:0000:19", + "l3-aon-clkctrl:0000:30", NULL, }; static const char * const am3_trace_clk_div_ck_parents[] __initconst = { - "l4_wkup_cm:clk:0010:20", + "l3-aon-clkctrl:0000:20", NULL, }; @@ -130,7 +172,7 @@ static const struct omap_clkctrl_div_data am3_trace_clk_div_ck_data __initconst }; static const char * const am3_stm_clk_div_ck_parents[] __initconst = { - "l4_wkup_cm:clk:0010:22", + "l3-aon-clkctrl:0000:22", NULL, }; @@ -154,66 +196,69 @@ static const struct omap_clkctrl_bit_data am3_debugss_bit_data[] __initconst = { { 0 }, }; -static const struct omap_clkctrl_reg_data am3_l4_wkup_clkctrl_regs[] __initconst = { - { AM3_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" }, - { AM3_GPIO1_CLKCTRL, am3_gpio1_bit_data, CLKF_SW_SUP, "dpll_core_m4_div2_ck" }, - { AM3_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" }, - { AM3_DEBUGSS_CLKCTRL, am3_debugss_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0010:24", "l3_aon_clkdm" }, - { AM3_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "dpll_core_m4_div2_ck", "l4_wkup_aon_clkdm" }, - { AM3_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" }, - { AM3_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" }, - { AM3_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck" }, - { AM3_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck" }, - { AM3_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck" }, - { AM3_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck" }, - { AM3_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck" }, +static const struct omap_clkctrl_reg_data am3_l3_aon_clkctrl_regs[] __initconst = { + { AM3_L3_AON_DEBUGSS_CLKCTRL, am3_debugss_bit_data, CLKF_SW_SUP, "l3-aon-clkctrl:0000:24" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am3_l4_wkup_aon_clkctrl_regs[] __initconst = { + { AM3_L4_WKUP_AON_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "dpll_core_m4_div2_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = { - { AM3_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" }, + { AM3_MPU_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = { - { AM3_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" }, + { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data am3_gfx_l3_clkctrl_regs[] __initconst = { - { AM3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" }, + { AM3_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data am3_l4_cefuse_clkctrl_regs[] __initconst = { - { AM3_CEFUSE_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" }, + { AM3_L4_CEFUSE_CEFUSE_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" }, { 0 }, }; const struct omap_clkctrl_data am3_clkctrl_data[] __initconst = { - { 0x44e00014, am3_l4_per_clkctrl_regs }, - { 0x44e00404, am3_l4_wkup_clkctrl_regs }, - { 0x44e00604, am3_mpu_clkctrl_regs }, + { 0x44e00038, am3_l4ls_clkctrl_regs }, + { 0x44e0001c, am3_l3s_clkctrl_regs }, + { 0x44e00024, am3_l3_clkctrl_regs }, + { 0x44e00120, am3_l4hs_clkctrl_regs }, + { 0x44e000e8, am3_pruss_ocp_clkctrl_regs }, + { 0x44e00000, am3_cpsw_125mhz_clkctrl_regs }, + { 0x44e00018, am3_lcdc_clkctrl_regs }, + { 0x44e0014c, am3_clk_24mhz_clkctrl_regs }, + { 0x44e00400, am3_l4_wkup_clkctrl_regs }, + { 0x44e00414, am3_l3_aon_clkctrl_regs }, + { 0x44e004b0, am3_l4_wkup_aon_clkctrl_regs }, + { 0x44e00600, am3_mpu_clkctrl_regs }, { 0x44e00800, am3_l4_rtc_clkctrl_regs }, - { 0x44e00904, am3_gfx_l3_clkctrl_regs }, - { 0x44e00a20, am3_l4_cefuse_clkctrl_regs }, + { 0x44e00900, am3_gfx_l3_clkctrl_regs }, + { 0x44e00a00, am3_l4_cefuse_clkctrl_regs }, { 0 }, }; static struct ti_dt_clk am33xx_clks[] = { - DT_CLK(NULL, "timer_32k_ck", "l4_per_cm:0138:0"), + DT_CLK(NULL, "timer_32k_ck", "clk-24mhz-clkctrl:0000:0"), DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"), - DT_CLK(NULL, "clkdiv32k_ick", "l4_per_cm:0138:0"), - DT_CLK(NULL, "dbg_clka_ck", "l4_wkup_cm:0010:30"), - DT_CLK(NULL, "dbg_sysclk_ck", "l4_wkup_cm:0010:19"), - DT_CLK(NULL, "gpio0_dbclk", "l4_wkup_cm:0004:18"), - DT_CLK(NULL, "gpio1_dbclk", "l4_per_cm:0098:18"), - DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:009c:18"), - DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:00a0:18"), - DT_CLK(NULL, "stm_clk_div_ck", "l4_wkup_cm:0010:27"), - DT_CLK(NULL, "stm_pmd_clock_mux_ck", "l4_wkup_cm:0010:22"), - DT_CLK(NULL, "trace_clk_div_ck", "l4_wkup_cm:0010:24"), - DT_CLK(NULL, "trace_pmd_clk_mux_ck", "l4_wkup_cm:0010:20"), + DT_CLK(NULL, "clkdiv32k_ick", "clk-24mhz-clkctrl:0000:0"), + DT_CLK(NULL, "dbg_clka_ck", "l3-aon-clkctrl:0000:30"), + DT_CLK(NULL, "dbg_sysclk_ck", "l3-aon-clkctrl:0000:19"), + DT_CLK(NULL, "gpio0_dbclk", "l4-wkup-clkctrl:0008:18"), + DT_CLK(NULL, "gpio1_dbclk", "l4ls-clkctrl:0074:18"), + DT_CLK(NULL, "gpio2_dbclk", "l4ls-clkctrl:0078:18"), + DT_CLK(NULL, "gpio3_dbclk", "l4ls-clkctrl:007c:18"), + DT_CLK(NULL, "stm_clk_div_ck", "l3-aon-clkctrl:0000:27"), + DT_CLK(NULL, "stm_pmd_clock_mux_ck", "l3-aon-clkctrl:0000:22"), + DT_CLK(NULL, "trace_clk_div_ck", "l3-aon-clkctrl:0000:24"), + DT_CLK(NULL, "trace_pmd_clk_mux_ck", "l3-aon-clkctrl:0000:20"), { .node_name = NULL }, }; @@ -232,7 +277,10 @@ int __init am33xx_dt_clk_init(void) { struct clk *clk1, *clk2; - ti_dt_clocks_register(am33xx_clks); + if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) + ti_dt_clocks_register(am33xx_compat_clks); + else + ti_dt_clocks_register(am33xx_clks); omap2_clk_disable_autoidle_all(); diff --git a/drivers/clk/ti/clk-43xx-compat.c b/drivers/clk/ti/clk-43xx-compat.c new file mode 100644 index 000000000000..513039843392 --- /dev/null +++ b/drivers/clk/ti/clk-43xx-compat.c @@ -0,0 +1,225 @@ +/* + * AM43XX Clock init + * + * Copyright (C) 2013 Texas Instruments, Inc + * Tero Kristo (t-kristo@ti.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/clk/ti.h> +#include <dt-bindings/clock/am4.h> + +#include "clock.h" + +static const char * const am4_synctimer_32kclk_parents[] __initconst = { + "mux_synctimer32k_ck", + NULL, +}; + +static const struct omap_clkctrl_bit_data am4_counter_32k_bit_data[] __initconst = { + { 8, TI_CLK_GATE, am4_synctimer_32kclk_parents, NULL }, + { 0 }, +}; + +static const char * const am4_gpio0_dbclk_parents[] __initconst = { + "gpio0_dbclk_mux_ck", + NULL, +}; + +static const struct omap_clkctrl_bit_data am4_gpio1_bit_data[] __initconst = { + { 8, TI_CLK_GATE, am4_gpio0_dbclk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am4_l4_wkup_clkctrl_regs[] __initconst = { + { AM4_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck", "l3s_tsc_clkdm" }, + { AM4_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" }, + { AM4_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "sys_clkin_ck" }, + { AM4_COUNTER_32K_CLKCTRL, am4_counter_32k_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0210:8" }, + { AM4_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck", "l4_wkup_clkdm" }, + { AM4_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck", "l4_wkup_clkdm" }, + { AM4_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck", "l4_wkup_clkdm" }, + { AM4_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck", "l4_wkup_clkdm" }, + { AM4_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck", "l4_wkup_clkdm" }, + { AM4_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck", "l4_wkup_clkdm" }, + { AM4_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" }, + { AM4_GPIO1_CLKCTRL, am4_gpio1_bit_data, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am4_mpu_clkctrl_regs[] __initconst = { + { AM4_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst = { + { AM4_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am4_l4_rtc_clkctrl_regs[] __initconst = { + { AM4_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" }, + { 0 }, +}; + +static const char * const am4_usb_otg_ss0_refclk960m_parents[] __initconst = { + "dpll_per_clkdcoldo", + NULL, +}; + +static const struct omap_clkctrl_bit_data am4_usb_otg_ss0_bit_data[] __initconst = { + { 8, TI_CLK_GATE, am4_usb_otg_ss0_refclk960m_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data am4_usb_otg_ss1_bit_data[] __initconst = { + { 8, TI_CLK_GATE, am4_usb_otg_ss0_refclk960m_parents, NULL }, + { 0 }, +}; + +static const char * const am4_gpio1_dbclk_parents[] __initconst = { + "clkdiv32k_ick", + NULL, +}; + +static const struct omap_clkctrl_bit_data am4_gpio2_bit_data[] __initconst = { + { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data am4_gpio3_bit_data[] __initconst = { + { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data am4_gpio4_bit_data[] __initconst = { + { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data am4_gpio5_bit_data[] __initconst = { + { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data am4_gpio6_bit_data[] __initconst = { + { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am4_l4_per_clkctrl_regs[] __initconst = { + { AM4_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM4_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck", "l3_clkdm" }, + { AM4_DES_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM4_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM4_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM4_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM4_VPFE0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3s_clkdm" }, + { AM4_VPFE1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3s_clkdm" }, + { AM4_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM4_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM4_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM4_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, + { AM4_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk", "l3_clkdm" }, + { AM4_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" }, + { AM4_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck", "l3s_clkdm" }, + { AM4_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck", "l3s_clkdm" }, + { AM4_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk", "l3s_clkdm" }, + { AM4_QSPI_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" }, + { AM4_USB_OTG_SS0_CLKCTRL, am4_usb_otg_ss0_bit_data, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" }, + { AM4_USB_OTG_SS1_CLKCTRL, am4_usb_otg_ss1_bit_data, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" }, + { AM4_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk", "pruss_ocp_clkdm" }, + { AM4_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" }, + { AM4_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" }, + { AM4_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_EPWMSS3_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_EPWMSS4_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_EPWMSS5_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_GPIO2_CLKCTRL, am4_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_GPIO3_CLKCTRL, am4_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_GPIO4_CLKCTRL, am4_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_GPIO5_CLKCTRL, am4_gpio5_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_GPIO6_CLKCTRL, am4_gpio6_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_clk" }, + { AM4_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, + { AM4_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, + { AM4_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" }, + { AM4_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_SPI2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_SPI3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_SPI4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" }, + { AM4_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" }, + { AM4_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" }, + { AM4_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" }, + { AM4_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" }, + { AM4_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" }, + { AM4_TIMER8_CLKCTRL, NULL, CLKF_SW_SUP, "timer8_fck" }, + { AM4_TIMER9_CLKCTRL, NULL, CLKF_SW_SUP, "timer9_fck" }, + { AM4_TIMER10_CLKCTRL, NULL, CLKF_SW_SUP, "timer10_fck" }, + { AM4_TIMER11_CLKCTRL, NULL, CLKF_SW_SUP, "timer11_fck" }, + { AM4_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" }, + { AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk", "dss_clkdm" }, + { AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, + { 0 }, +}; + +const struct omap_clkctrl_data am4_clkctrl_compat_data[] __initconst = { + { 0x44df2820, am4_l4_wkup_clkctrl_regs }, + { 0x44df8320, am4_mpu_clkctrl_regs }, + { 0x44df8420, am4_gfx_l3_clkctrl_regs }, + { 0x44df8520, am4_l4_rtc_clkctrl_regs }, + { 0x44df8820, am4_l4_per_clkctrl_regs }, + { 0 }, +}; + +const struct omap_clkctrl_data am438x_clkctrl_compat_data[] __initconst = { + { 0x44df2820, am4_l4_wkup_clkctrl_regs }, + { 0x44df8320, am4_mpu_clkctrl_regs }, + { 0x44df8420, am4_gfx_l3_clkctrl_regs }, + { 0x44df8820, am4_l4_per_clkctrl_regs }, + { 0 }, +}; + +struct ti_dt_clk am43xx_compat_clks[] = { + DT_CLK(NULL, "timer_32k_ck", "clkdiv32k_ick"), + DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"), + DT_CLK(NULL, "gpio0_dbclk", "l4_wkup_cm:0348:8"), + DT_CLK(NULL, "gpio1_dbclk", "l4_per_cm:0458:8"), + DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:0460:8"), + DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:0468:8"), + DT_CLK(NULL, "gpio4_dbclk", "l4_per_cm:0470:8"), + DT_CLK(NULL, "gpio5_dbclk", "l4_per_cm:0478:8"), + DT_CLK(NULL, "synctimer_32kclk", "l4_wkup_cm:0210:8"), + DT_CLK(NULL, "usb_otg_ss0_refclk960m", "l4_per_cm:0240:8"), + DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l4_per_cm:0248:8"), + { .node_name = NULL }, +}; diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c index 63c5ddb50187..2782d91838ac 100644 --- a/drivers/clk/ti/clk-43xx.c +++ b/drivers/clk/ti/clk-43xx.c @@ -23,6 +23,11 @@ #include "clock.h" +static const struct omap_clkctrl_reg_data am4_l3s_tsc_clkctrl_regs[] __initconst = { + { AM4_L3S_TSC_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck" }, + { 0 }, +}; + static const char * const am4_synctimer_32kclk_parents[] __initconst = { "mux_synctimer32k_ck", NULL, @@ -33,6 +38,12 @@ static const struct omap_clkctrl_bit_data am4_counter_32k_bit_data[] __initconst { 0 }, }; +static const struct omap_clkctrl_reg_data am4_l4_wkup_aon_clkctrl_regs[] __initconst = { + { AM4_L4_WKUP_AON_WKUP_M3_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sys_clkin_ck" }, + { AM4_L4_WKUP_AON_COUNTER_32K_CLKCTRL, am4_counter_32k_bit_data, CLKF_SW_SUP, "l4-wkup-aon-clkctrl:0008:8" }, + { 0 }, +}; + static const char * const am4_gpio0_dbclk_parents[] __initconst = { "gpio0_dbclk_mux_ck", NULL, @@ -44,33 +55,45 @@ static const struct omap_clkctrl_bit_data am4_gpio1_bit_data[] __initconst = { }; static const struct omap_clkctrl_reg_data am4_l4_wkup_clkctrl_regs[] __initconst = { - { AM4_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck", "l3s_tsc_clkdm" }, - { AM4_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" }, - { AM4_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "sys_clkin_ck" }, - { AM4_COUNTER_32K_CLKCTRL, am4_counter_32k_bit_data, CLKF_SW_SUP, "l4_wkup_cm:clk:0210:8" }, - { AM4_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck", "l4_wkup_clkdm" }, - { AM4_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck", "l4_wkup_clkdm" }, - { AM4_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck", "l4_wkup_clkdm" }, - { AM4_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck", "l4_wkup_clkdm" }, - { AM4_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck", "l4_wkup_clkdm" }, - { AM4_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck", "l4_wkup_clkdm" }, - { AM4_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" }, - { AM4_GPIO1_CLKCTRL, am4_gpio1_bit_data, CLKF_SW_SUP, "sys_clkin_ck", "l4_wkup_clkdm" }, + { AM4_L4_WKUP_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" }, + { AM4_L4_WKUP_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck" }, + { AM4_L4_WKUP_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck" }, + { AM4_L4_WKUP_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" }, + { AM4_L4_WKUP_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" }, + { AM4_L4_WKUP_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck" }, + { AM4_L4_WKUP_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck" }, + { AM4_L4_WKUP_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" }, + { AM4_L4_WKUP_GPIO1_CLKCTRL, am4_gpio1_bit_data, CLKF_SW_SUP, "sys_clkin_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data am4_mpu_clkctrl_regs[] __initconst = { - { AM4_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" }, + { AM4_MPU_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst = { - { AM4_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" }, + { AM4_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data am4_l4_rtc_clkctrl_regs[] __initconst = { - { AM4_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" }, + { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am4_l3_clkctrl_regs[] __initconst = { + { AM4_L3_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM4_L3_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck" }, + { AM4_L3_DES_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM4_L3_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM4_L3_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM4_L3_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM4_L3_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM4_L3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM4_L3_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM4_L3_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM4_L3_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk" }, { 0 }, }; @@ -89,6 +112,24 @@ static const struct omap_clkctrl_bit_data am4_usb_otg_ss1_bit_data[] __initconst { 0 }, }; +static const struct omap_clkctrl_reg_data am4_l3s_clkctrl_regs[] __initconst = { + { AM4_L3S_VPFE0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM4_L3S_VPFE1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" }, + { AM4_L3S_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk" }, + { AM4_L3S_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck" }, + { AM4_L3S_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck" }, + { AM4_L3S_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, + { AM4_L3S_QSPI_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk" }, + { AM4_L3S_USB_OTG_SS0_CLKCTRL, am4_usb_otg_ss0_bit_data, CLKF_SW_SUP, "l3s_gclk" }, + { AM4_L3S_USB_OTG_SS1_CLKCTRL, am4_usb_otg_ss1_bit_data, CLKF_SW_SUP, "l3s_gclk" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am4_pruss_ocp_clkctrl_regs[] __initconst = { + { AM4_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk" }, + { 0 }, +}; + static const char * const am4_gpio1_dbclk_parents[] __initconst = { "clkdiv32k_ick", NULL, @@ -119,108 +160,115 @@ static const struct omap_clkctrl_bit_data am4_gpio6_bit_data[] __initconst = { { 0 }, }; -static const struct omap_clkctrl_reg_data am4_l4_per_clkctrl_regs[] __initconst = { - { AM4_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM4_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck", "l3_clkdm" }, - { AM4_DES_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM4_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM4_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM4_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM4_VPFE0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3s_clkdm" }, - { AM4_VPFE1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3s_clkdm" }, - { AM4_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM4_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM4_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM4_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, - { AM4_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk", "l3_clkdm" }, - { AM4_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" }, - { AM4_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck", "l3s_clkdm" }, - { AM4_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck", "l3s_clkdm" }, - { AM4_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk", "l3s_clkdm" }, - { AM4_QSPI_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" }, - { AM4_USB_OTG_SS0_CLKCTRL, am4_usb_otg_ss0_bit_data, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" }, - { AM4_USB_OTG_SS1_CLKCTRL, am4_usb_otg_ss1_bit_data, CLKF_SW_SUP, "l3s_gclk", "l3s_clkdm" }, - { AM4_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk", "pruss_ocp_clkdm" }, - { AM4_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" }, - { AM4_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" }, - { AM4_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_EPWMSS3_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_EPWMSS4_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_EPWMSS5_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_GPIO2_CLKCTRL, am4_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_GPIO3_CLKCTRL, am4_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_GPIO4_CLKCTRL, am4_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_GPIO5_CLKCTRL, am4_gpio5_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_GPIO6_CLKCTRL, am4_gpio6_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_clk" }, - { AM4_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM4_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM4_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, - { AM4_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, - { AM4_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" }, - { AM4_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM4_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM4_SPI2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM4_SPI3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM4_SPI4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM4_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" }, - { AM4_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" }, - { AM4_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" }, - { AM4_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" }, - { AM4_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" }, - { AM4_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" }, - { AM4_TIMER8_CLKCTRL, NULL, CLKF_SW_SUP, "timer8_fck" }, - { AM4_TIMER9_CLKCTRL, NULL, CLKF_SW_SUP, "timer9_fck" }, - { AM4_TIMER10_CLKCTRL, NULL, CLKF_SW_SUP, "timer10_fck" }, - { AM4_TIMER11_CLKCTRL, NULL, CLKF_SW_SUP, "timer11_fck" }, - { AM4_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM4_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM4_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM4_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM4_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, - { AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, - { AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" }, - { AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk", "dss_clkdm" }, - { AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, +static const struct omap_clkctrl_reg_data am4_l4ls_clkctrl_regs[] __initconst = { + { AM4_L4LS_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" }, + { AM4_L4LS_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" }, + { AM4_L4LS_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_EPWMSS3_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_EPWMSS4_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_EPWMSS5_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_GPIO2_CLKCTRL, am4_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_GPIO3_CLKCTRL, am4_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_GPIO4_CLKCTRL, am4_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_GPIO5_CLKCTRL, am4_gpio5_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_GPIO6_CLKCTRL, am4_gpio6_bit_data, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_clk" }, + { AM4_L4LS_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_L4LS_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_L4LS_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, + { AM4_L4LS_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" }, + { AM4_L4LS_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" }, + { AM4_L4LS_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_L4LS_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_L4LS_SPI2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_L4LS_SPI3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_L4LS_SPI4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_L4LS_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" }, + { AM4_L4LS_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" }, + { AM4_L4LS_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" }, + { AM4_L4LS_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" }, + { AM4_L4LS_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" }, + { AM4_L4LS_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" }, + { AM4_L4LS_TIMER8_CLKCTRL, NULL, CLKF_SW_SUP, "timer8_fck" }, + { AM4_L4LS_TIMER9_CLKCTRL, NULL, CLKF_SW_SUP, "timer9_fck" }, + { AM4_L4LS_TIMER10_CLKCTRL, NULL, CLKF_SW_SUP, "timer10_fck" }, + { AM4_L4LS_TIMER11_CLKCTRL, NULL, CLKF_SW_SUP, "timer11_fck" }, + { AM4_L4LS_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_L4LS_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_L4LS_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_L4LS_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_L4LS_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" }, + { AM4_L4LS_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { AM4_L4LS_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am4_emif_clkctrl_regs[] __initconst = { + { AM4_EMIF_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am4_dss_clkctrl_regs[] __initconst = { + { AM4_DSS_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data am4_cpsw_125mhz_clkctrl_regs[] __initconst = { + { AM4_CPSW_125MHZ_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk" }, { 0 }, }; const struct omap_clkctrl_data am4_clkctrl_data[] __initconst = { - { 0x44df2820, am4_l4_wkup_clkctrl_regs }, + { 0x44df2920, am4_l3s_tsc_clkctrl_regs }, + { 0x44df2a28, am4_l4_wkup_aon_clkctrl_regs }, + { 0x44df2a20, am4_l4_wkup_clkctrl_regs }, { 0x44df8320, am4_mpu_clkctrl_regs }, { 0x44df8420, am4_gfx_l3_clkctrl_regs }, { 0x44df8520, am4_l4_rtc_clkctrl_regs }, - { 0x44df8820, am4_l4_per_clkctrl_regs }, + { 0x44df8820, am4_l3_clkctrl_regs }, + { 0x44df8868, am4_l3s_clkctrl_regs }, + { 0x44df8b20, am4_pruss_ocp_clkctrl_regs }, + { 0x44df8c20, am4_l4ls_clkctrl_regs }, + { 0x44df8f20, am4_emif_clkctrl_regs }, + { 0x44df9220, am4_dss_clkctrl_regs }, + { 0x44df9320, am4_cpsw_125mhz_clkctrl_regs }, { 0 }, }; const struct omap_clkctrl_data am438x_clkctrl_data[] __initconst = { - { 0x44df2820, am4_l4_wkup_clkctrl_regs }, + { 0x44df2920, am4_l3s_tsc_clkctrl_regs }, + { 0x44df2a28, am4_l4_wkup_aon_clkctrl_regs }, + { 0x44df2a20, am4_l4_wkup_clkctrl_regs }, { 0x44df8320, am4_mpu_clkctrl_regs }, { 0x44df8420, am4_gfx_l3_clkctrl_regs }, - { 0x44df8820, am4_l4_per_clkctrl_regs }, + { 0x44df8820, am4_l3_clkctrl_regs }, + { 0x44df8868, am4_l3s_clkctrl_regs }, + { 0x44df8b20, am4_pruss_ocp_clkctrl_regs }, + { 0x44df8c20, am4_l4ls_clkctrl_regs }, + { 0x44df8f20, am4_emif_clkctrl_regs }, + { 0x44df9220, am4_dss_clkctrl_regs }, + { 0x44df9320, am4_cpsw_125mhz_clkctrl_regs }, { 0 }, }; static struct ti_dt_clk am43xx_clks[] = { DT_CLK(NULL, "timer_32k_ck", "clkdiv32k_ick"), DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"), - DT_CLK(NULL, "gpio0_dbclk", "l4_wkup_cm:0348:8"), - DT_CLK(NULL, "gpio1_dbclk", "l4_per_cm:0458:8"), - DT_CLK(NULL, "gpio2_dbclk", "l4_per_cm:0460:8"), - DT_CLK(NULL, "gpio3_dbclk", "l4_per_cm:0468:8"), - DT_CLK(NULL, "gpio4_dbclk", "l4_per_cm:0470:8"), - DT_CLK(NULL, "gpio5_dbclk", "l4_per_cm:0478:8"), - DT_CLK(NULL, "synctimer_32kclk", "l4_wkup_cm:0210:8"), - DT_CLK(NULL, "usb_otg_ss0_refclk960m", "l4_per_cm:0240:8"), - DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l4_per_cm:0248:8"), + DT_CLK(NULL, "gpio0_dbclk", "l4-wkup-clkctrl:0148:8"), + DT_CLK(NULL, "gpio1_dbclk", "l4ls-clkctrl:0058:8"), + DT_CLK(NULL, "gpio2_dbclk", "l4ls-clkctrl:0060:8"), + DT_CLK(NULL, "gpio3_dbclk", "l4ls-clkctrl:0068:8"), + DT_CLK(NULL, "gpio4_dbclk", "l4ls-clkctrl:0070:8"), + DT_CLK(NULL, "gpio5_dbclk", "l4ls-clkctrl:0078:8"), + DT_CLK(NULL, "synctimer_32kclk", "l4-wkup-aon-clkctrl:0008:8"), + DT_CLK(NULL, "usb_otg_ss0_refclk960m", "l3s-clkctrl:01f8:8"), + DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l3s-clkctrl:0200:8"), { .node_name = NULL }, }; @@ -228,7 +276,10 @@ int __init am43xx_dt_clk_init(void) { struct clk *clk1, *clk2; - ti_dt_clocks_register(am43xx_clks); + if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) + ti_dt_clocks_register(am43xx_compat_clks); + else + ti_dt_clocks_register(am43xx_clks); omap2_clk_disable_autoidle_all(); diff --git a/drivers/clk/ti/clk-7xx-compat.c b/drivers/clk/ti/clk-7xx-compat.c new file mode 100644 index 000000000000..e3cb7f0b03ae --- /dev/null +++ b/drivers/clk/ti/clk-7xx-compat.c @@ -0,0 +1,823 @@ +/* + * DRA7 Clock init + * + * Copyright (C) 2013 Texas Instruments, Inc. + * + * Tero Kristo (t-kristo@ti.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/clk/ti.h> +#include <dt-bindings/clock/dra7.h> + +#include "clock.h" + +#define DRA7_DPLL_GMAC_DEFFREQ 1000000000 +#define DRA7_DPLL_USB_DEFFREQ 960000000 + +static const struct omap_clkctrl_reg_data dra7_mpu_clkctrl_regs[] __initconst = { + { DRA7_MPU_CLKCTRL, NULL, 0, "dpll_mpu_m2_ck" }, + { 0 }, +}; + +static const char * const dra7_mcasp1_aux_gfclk_mux_parents[] __initconst = { + "per_abe_x1_gfclk2_div", + "video1_clk2_div", + "video2_clk2_div", + "hdmi_clk2_div", + NULL, +}; + +static const char * const dra7_mcasp1_ahclkx_mux_parents[] __initconst = { + "abe_24m_fclk", + "abe_sys_clk_div", + "func_24m_clk", + "atl_clkin3_ck", + "atl_clkin2_ck", + "atl_clkin1_ck", + "atl_clkin0_ck", + "sys_clkin2", + "ref_clkin0_ck", + "ref_clkin1_ck", + "ref_clkin2_ck", + "ref_clkin3_ck", + "mlb_clk", + "mlbp_clk", + NULL, +}; + +static const struct omap_clkctrl_bit_data dra7_mcasp1_bit_data[] __initconst = { + { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, + { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, + { 28, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, + { 0 }, +}; + +static const char * const dra7_timer5_gfclk_mux_parents[] __initconst = { + "timer_sys_clk_div", + "sys_32k_ck", + "sys_clkin2", + "ref_clkin0_ck", + "ref_clkin1_ck", + "ref_clkin2_ck", + "ref_clkin3_ck", + "abe_giclk_div", + "video1_div_clk", + "video2_div_clk", + "hdmi_div_clk", + "clkoutmux0_clk_mux", + NULL, +}; + +static const struct omap_clkctrl_bit_data dra7_timer5_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer6_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer7_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer8_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const char * const dra7_uart6_gfclk_mux_parents[] __initconst = { + "func_48m_fclk", + "dpll_per_m2x2_ck", + NULL, +}; + +static const struct omap_clkctrl_bit_data dra7_uart6_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_ipu_clkctrl_regs[] __initconst = { + { DRA7_MCASP1_CLKCTRL, dra7_mcasp1_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0010:22" }, + { DRA7_TIMER5_CLKCTRL, dra7_timer5_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0018:24" }, + { DRA7_TIMER6_CLKCTRL, dra7_timer6_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0020:24" }, + { DRA7_TIMER7_CLKCTRL, dra7_timer7_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0028:24" }, + { DRA7_TIMER8_CLKCTRL, dra7_timer8_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0030:24" }, + { DRA7_I2C5_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, + { DRA7_UART6_CLKCTRL, dra7_uart6_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0040:24" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_rtc_clkctrl_regs[] __initconst = { + { DRA7_RTCSS_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_coreaon_clkctrl_regs[] __initconst = { + { DRA7_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" }, + { DRA7_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_l3main1_clkctrl_regs[] __initconst = { + { DRA7_L3_MAIN_1_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_GPMC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_TPCC_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_TPTC0_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_TPTC1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_VCP1_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_VCP2_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_dma_clkctrl_regs[] __initconst = { + { DRA7_DMA_SYSTEM_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_emif_clkctrl_regs[] __initconst = { + { DRA7_DMM_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { 0 }, +}; + +static const char * const dra7_atl_dpll_clk_mux_parents[] __initconst = { + "sys_32k_ck", + "video1_clkin_ck", + "video2_clkin_ck", + "hdmi_clkin_ck", + NULL, +}; + +static const char * const dra7_atl_gfclk_mux_parents[] __initconst = { + "l3_iclk_div", + "dpll_abe_m2_ck", + "atl_cm:clk:0000:24", + NULL, +}; + +static const struct omap_clkctrl_bit_data dra7_atl_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_atl_dpll_clk_mux_parents, NULL }, + { 26, TI_CLK_MUX, dra7_atl_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_atl_clkctrl_regs[] __initconst = { + { DRA7_ATL_CLKCTRL, dra7_atl_bit_data, CLKF_SW_SUP, "atl_cm:clk:0000:26" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_l4cfg_clkctrl_regs[] __initconst = { + { DRA7_L4_CFG_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_SPINLOCK_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX1_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX2_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX3_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX4_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX5_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX6_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX7_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX8_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX9_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX10_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX11_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX12_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_MAILBOX13_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_l3instr_clkctrl_regs[] __initconst = { + { DRA7_L3_MAIN_2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L3_INSTR_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { 0 }, +}; + +static const char * const dra7_dss_dss_clk_parents[] __initconst = { + "dpll_per_h12x2_ck", + NULL, +}; + +static const char * const dra7_dss_48mhz_clk_parents[] __initconst = { + "func_48m_fclk", + NULL, +}; + +static const char * const dra7_dss_hdmi_clk_parents[] __initconst = { + "hdmi_dpll_clk_mux", + NULL, +}; + +static const char * const dra7_dss_32khz_clk_parents[] __initconst = { + "sys_32k_ck", + NULL, +}; + +static const char * const dra7_dss_video1_clk_parents[] __initconst = { + "video1_dpll_clk_mux", + NULL, +}; + +static const char * const dra7_dss_video2_clk_parents[] __initconst = { + "video2_dpll_clk_mux", + NULL, +}; + +static const struct omap_clkctrl_bit_data dra7_dss_core_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_dss_clk_parents, NULL }, + { 9, TI_CLK_GATE, dra7_dss_48mhz_clk_parents, NULL }, + { 10, TI_CLK_GATE, dra7_dss_hdmi_clk_parents, NULL }, + { 11, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 12, TI_CLK_GATE, dra7_dss_video1_clk_parents, NULL }, + { 13, TI_CLK_GATE, dra7_dss_video2_clk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_dss_clkctrl_regs[] __initconst = { + { DRA7_DSS_CORE_CLKCTRL, dra7_dss_core_bit_data, CLKF_SW_SUP, "dss_cm:clk:0000:8" }, + { DRA7_BB2D_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_h24x2_ck" }, + { 0 }, +}; + +static const char * const dra7_mmc1_fclk_mux_parents[] __initconst = { + "func_128m_clk", + "dpll_per_m2x2_ck", + NULL, +}; + +static const char * const dra7_mmc1_fclk_div_parents[] __initconst = { + "l3init_cm:clk:0008:24", + NULL, +}; + +static const struct omap_clkctrl_div_data dra7_mmc1_fclk_div_data __initconst = { + .max_div = 4, + .flags = CLK_DIVIDER_POWER_OF_TWO, +}; + +static const struct omap_clkctrl_bit_data dra7_mmc1_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 24, TI_CLK_MUX, dra7_mmc1_fclk_mux_parents, NULL }, + { 25, TI_CLK_DIVIDER, dra7_mmc1_fclk_div_parents, &dra7_mmc1_fclk_div_data }, + { 0 }, +}; + +static const char * const dra7_mmc2_fclk_div_parents[] __initconst = { + "l3init_cm:clk:0010:24", + NULL, +}; + +static const struct omap_clkctrl_div_data dra7_mmc2_fclk_div_data __initconst = { + .max_div = 4, + .flags = CLK_DIVIDER_POWER_OF_TWO, +}; + +static const struct omap_clkctrl_bit_data dra7_mmc2_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 24, TI_CLK_MUX, dra7_mmc1_fclk_mux_parents, NULL }, + { 25, TI_CLK_DIVIDER, dra7_mmc2_fclk_div_parents, &dra7_mmc2_fclk_div_data }, + { 0 }, +}; + +static const char * const dra7_usb_otg_ss2_refclk960m_parents[] __initconst = { + "l3init_960m_gfclk", + NULL, +}; + +static const struct omap_clkctrl_bit_data dra7_usb_otg_ss2_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL }, + { 0 }, +}; + +static const char * const dra7_sata_ref_clk_parents[] __initconst = { + "sys_clkin1", + NULL, +}; + +static const struct omap_clkctrl_bit_data dra7_sata_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_sata_ref_clk_parents, NULL }, + { 0 }, +}; + +static const char * const dra7_optfclk_pciephy1_clk_parents[] __initconst = { + "apll_pcie_ck", + NULL, +}; + +static const char * const dra7_optfclk_pciephy1_div_clk_parents[] __initconst = { + "optfclk_pciephy_div", + NULL, +}; + +static const struct omap_clkctrl_bit_data dra7_pcie1_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 9, TI_CLK_GATE, dra7_optfclk_pciephy1_clk_parents, NULL }, + { 10, TI_CLK_GATE, dra7_optfclk_pciephy1_div_clk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_pcie2_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 9, TI_CLK_GATE, dra7_optfclk_pciephy1_clk_parents, NULL }, + { 10, TI_CLK_GATE, dra7_optfclk_pciephy1_div_clk_parents, NULL }, + { 0 }, +}; + +static const char * const dra7_rmii_50mhz_clk_mux_parents[] __initconst = { + "dpll_gmac_h11x2_ck", + "rmii_clk_ck", + NULL, +}; + +static const char * const dra7_gmac_rft_clk_mux_parents[] __initconst = { + "video1_clkin_ck", + "video2_clkin_ck", + "dpll_abe_m2_ck", + "hdmi_clkin_ck", + "l3_iclk_div", + NULL, +}; + +static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_rmii_50mhz_clk_mux_parents, NULL }, + { 25, TI_CLK_MUX, dra7_gmac_rft_clk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_usb_otg_ss1_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst = { + { DRA7_MMC1_CLKCTRL, dra7_mmc1_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0008:25" }, + { DRA7_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0010:25" }, + { DRA7_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, + { DRA7_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, + { DRA7_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, + { DRA7_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" }, + { DRA7_PCIE1_CLKCTRL, dra7_pcie1_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" }, + { DRA7_PCIE2_CLKCTRL, dra7_pcie2_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" }, + { DRA7_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck", "gmac_clkdm" }, + { DRA7_OCP2SCP1_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" }, + { DRA7_OCP2SCP3_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" }, + { DRA7_USB_OTG_SS1_CLKCTRL, dra7_usb_otg_ss1_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, + { 0 }, +}; + +static const char * const dra7_timer10_gfclk_mux_parents[] __initconst = { + "timer_sys_clk_div", + "sys_32k_ck", + "sys_clkin2", + "ref_clkin0_ck", + "ref_clkin1_ck", + "ref_clkin2_ck", + "ref_clkin3_ck", + "abe_giclk_div", + "video1_div_clk", + "video2_div_clk", + "hdmi_div_clk", + NULL, +}; + +static const struct omap_clkctrl_bit_data dra7_timer10_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer11_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer2_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer3_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer4_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer9_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_gpio2_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_gpio3_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_gpio4_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_gpio5_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_gpio6_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer13_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer14_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer15_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_gpio7_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_gpio8_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 0 }, +}; + +static const char * const dra7_mmc3_gfclk_div_parents[] __initconst = { + "l4per_cm:clk:0120:24", + NULL, +}; + +static const struct omap_clkctrl_div_data dra7_mmc3_gfclk_div_data __initconst = { + .max_div = 4, + .flags = CLK_DIVIDER_POWER_OF_TWO, +}; + +static const struct omap_clkctrl_bit_data dra7_mmc3_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 25, TI_CLK_DIVIDER, dra7_mmc3_gfclk_div_parents, &dra7_mmc3_gfclk_div_data }, + { 0 }, +}; + +static const char * const dra7_mmc4_gfclk_div_parents[] __initconst = { + "l4per_cm:clk:0128:24", + NULL, +}; + +static const struct omap_clkctrl_div_data dra7_mmc4_gfclk_div_data __initconst = { + .max_div = 4, + .flags = CLK_DIVIDER_POWER_OF_TWO, +}; + +static const struct omap_clkctrl_bit_data dra7_mmc4_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 25, TI_CLK_DIVIDER, dra7_mmc4_gfclk_div_parents, &dra7_mmc4_gfclk_div_data }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer16_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const char * const dra7_qspi_gfclk_mux_parents[] __initconst = { + "func_128m_clk", + "dpll_per_h13x2_ck", + NULL, +}; + +static const char * const dra7_qspi_gfclk_div_parents[] __initconst = { + "l4per_cm:clk:0138:24", + NULL, +}; + +static const struct omap_clkctrl_div_data dra7_qspi_gfclk_div_data __initconst = { + .max_div = 4, + .flags = CLK_DIVIDER_POWER_OF_TWO, +}; + +static const struct omap_clkctrl_bit_data dra7_qspi_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_qspi_gfclk_mux_parents, NULL }, + { 25, TI_CLK_DIVIDER, dra7_qspi_gfclk_div_parents, &dra7_qspi_gfclk_div_data }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart1_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart2_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart3_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart4_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_mcasp2_bit_data[] __initconst = { + { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, + { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, + { 28, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_mcasp3_bit_data[] __initconst = { + { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, + { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart5_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_mcasp5_bit_data[] __initconst = { + { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, + { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_mcasp8_bit_data[] __initconst = { + { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, + { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_mcasp4_bit_data[] __initconst = { + { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, + { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart7_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart8_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart9_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_mcasp6_bit_data[] __initconst = { + { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, + { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_mcasp7_bit_data[] __initconst = { + { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, + { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_l4per_clkctrl_regs[] __initconst = { + { DRA7_L4_PER2_CLKCTRL, NULL, 0, "l3_iclk_div", "l4per2_clkdm" }, + { DRA7_L4_PER3_CLKCTRL, NULL, 0, "l3_iclk_div", "l4per3_clkdm" }, + { DRA7_TIMER10_CLKCTRL, dra7_timer10_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0028:24" }, + { DRA7_TIMER11_CLKCTRL, dra7_timer11_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0030:24" }, + { DRA7_TIMER2_CLKCTRL, dra7_timer2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0038:24" }, + { DRA7_TIMER3_CLKCTRL, dra7_timer3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0040:24" }, + { DRA7_TIMER4_CLKCTRL, dra7_timer4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0048:24" }, + { DRA7_TIMER9_CLKCTRL, dra7_timer9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0050:24" }, + { DRA7_ELM_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_GPIO2_CLKCTRL, dra7_gpio2_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_GPIO3_CLKCTRL, dra7_gpio3_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_GPIO4_CLKCTRL, dra7_gpio4_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_GPIO5_CLKCTRL, dra7_gpio5_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_GPIO6_CLKCTRL, dra7_gpio6_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_fclk" }, + { DRA7_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" }, + { DRA7_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" }, + { DRA7_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, + { DRA7_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, + { DRA7_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, + { DRA7_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, + { DRA7_L4_PER1_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" }, + { DRA7_TIMER13_CLKCTRL, dra7_timer13_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00c8:24", "l4per3_clkdm" }, + { DRA7_TIMER14_CLKCTRL, dra7_timer14_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00d0:24", "l4per3_clkdm" }, + { DRA7_TIMER15_CLKCTRL, dra7_timer15_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00d8:24", "l4per3_clkdm" }, + { DRA7_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, + { DRA7_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, + { DRA7_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, + { DRA7_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, + { DRA7_GPIO7_CLKCTRL, dra7_gpio7_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_GPIO8_CLKCTRL, dra7_gpio8_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_MMC3_CLKCTRL, dra7_mmc3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0120:25" }, + { DRA7_MMC4_CLKCTRL, dra7_mmc4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0128:25" }, + { DRA7_TIMER16_CLKCTRL, dra7_timer16_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0130:24", "l4per3_clkdm" }, + { DRA7_QSPI_CLKCTRL, dra7_qspi_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0138:25", "l4per2_clkdm" }, + { DRA7_UART1_CLKCTRL, dra7_uart1_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0140:24" }, + { DRA7_UART2_CLKCTRL, dra7_uart2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0148:24" }, + { DRA7_UART3_CLKCTRL, dra7_uart3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0150:24" }, + { DRA7_UART4_CLKCTRL, dra7_uart4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0158:24" }, + { DRA7_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0160:22", "l4per2_clkdm" }, + { DRA7_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0168:22", "l4per2_clkdm" }, + { DRA7_UART5_CLKCTRL, dra7_uart5_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0170:24" }, + { DRA7_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0178:22", "l4per2_clkdm" }, + { DRA7_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0190:24", "l4per2_clkdm" }, + { DRA7_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0198:22", "l4per2_clkdm" }, + { DRA7_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" }, + { DRA7_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" }, + { DRA7_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" }, + { DRA7_RNG_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" }, + { DRA7_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" }, + { DRA7_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01d0:24", "l4per2_clkdm" }, + { DRA7_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01e0:24", "l4per2_clkdm" }, + { DRA7_UART9_CLKCTRL, dra7_uart9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01e8:24", "l4per2_clkdm" }, + { DRA7_DCAN2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin1", "l4per2_clkdm" }, + { DRA7_MCASP6_CLKCTRL, dra7_mcasp6_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0204:22", "l4per2_clkdm" }, + { DRA7_MCASP7_CLKCTRL, dra7_mcasp7_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0208:22", "l4per2_clkdm" }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_gpio1_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer1_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart10_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const char * const dra7_dcan1_sys_clk_mux_parents[] __initconst = { + "sys_clkin1", + "sys_clkin2", + NULL, +}; + +static const struct omap_clkctrl_bit_data dra7_dcan1_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_dcan1_sys_clk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initconst = { + { DRA7_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" }, + { DRA7_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, + { DRA7_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" }, + { DRA7_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0020:24" }, + { DRA7_TIMER12_CLKCTRL, NULL, 0, "secure_32k_clk_src_ck" }, + { DRA7_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" }, + { DRA7_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0060:24" }, + { DRA7_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0068:24" }, + { DRA7_ADC_CLKCTRL, NULL, CLKF_SW_SUP, "mcan_clk"}, + { 0 }, +}; + +const struct omap_clkctrl_data dra7_clkctrl_compat_data[] __initconst = { + { 0x4a005320, dra7_mpu_clkctrl_regs }, + { 0x4a005540, dra7_ipu_clkctrl_regs }, + { 0x4a005740, dra7_rtc_clkctrl_regs }, + { 0x4a008620, dra7_coreaon_clkctrl_regs }, + { 0x4a008720, dra7_l3main1_clkctrl_regs }, + { 0x4a008a20, dra7_dma_clkctrl_regs }, + { 0x4a008b20, dra7_emif_clkctrl_regs }, + { 0x4a008c00, dra7_atl_clkctrl_regs }, + { 0x4a008d20, dra7_l4cfg_clkctrl_regs }, + { 0x4a008e20, dra7_l3instr_clkctrl_regs }, + { 0x4a009120, dra7_dss_clkctrl_regs }, + { 0x4a009320, dra7_l3init_clkctrl_regs }, + { 0x4a009700, dra7_l4per_clkctrl_regs }, + { 0x4ae07820, dra7_wkupaon_clkctrl_regs }, + { 0 }, +}; + +struct ti_dt_clk dra7xx_compat_clks[] = { + DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"), + DT_CLK(NULL, "sys_clkin_ck", "timer_sys_clk_div"), + DT_CLK(NULL, "sys_clkin", "sys_clkin1"), + DT_CLK(NULL, "atl_dpll_clk_mux", "atl_cm:0000:24"), + DT_CLK(NULL, "atl_gfclk_mux", "atl_cm:0000:26"), + DT_CLK(NULL, "dcan1_sys_clk_mux", "wkupaon_cm:0068:24"), + DT_CLK(NULL, "dss_32khz_clk", "dss_cm:0000:11"), + DT_CLK(NULL, "dss_48mhz_clk", "dss_cm:0000:9"), + DT_CLK(NULL, "dss_dss_clk", "dss_cm:0000:8"), + DT_CLK(NULL, "dss_hdmi_clk", "dss_cm:0000:10"), + DT_CLK(NULL, "dss_video1_clk", "dss_cm:0000:12"), + DT_CLK(NULL, "dss_video2_clk", "dss_cm:0000:13"), + DT_CLK(NULL, "gmac_rft_clk_mux", "l3init_cm:00b0:25"), + DT_CLK(NULL, "gpio1_dbclk", "wkupaon_cm:0018:8"), + DT_CLK(NULL, "gpio2_dbclk", "l4per_cm:0060:8"), + DT_CLK(NULL, "gpio3_dbclk", "l4per_cm:0068:8"), + DT_CLK(NULL, "gpio4_dbclk", "l4per_cm:0070:8"), + DT_CLK(NULL, "gpio5_dbclk", "l4per_cm:0078:8"), + DT_CLK(NULL, "gpio6_dbclk", "l4per_cm:0080:8"), + DT_CLK(NULL, "gpio7_dbclk", "l4per_cm:0110:8"), + DT_CLK(NULL, "gpio8_dbclk", "l4per_cm:0118:8"), + DT_CLK(NULL, "mcasp1_ahclkr_mux", "ipu_cm:0010:28"), + DT_CLK(NULL, "mcasp1_ahclkx_mux", "ipu_cm:0010:24"), + DT_CLK(NULL, "mcasp1_aux_gfclk_mux", "ipu_cm:0010:22"), + DT_CLK(NULL, "mcasp2_ahclkr_mux", "l4per_cm:0160:28"), + DT_CLK(NULL, "mcasp2_ahclkx_mux", "l4per_cm:0160:24"), + DT_CLK(NULL, "mcasp2_aux_gfclk_mux", "l4per_cm:0160:22"), + DT_CLK(NULL, "mcasp3_ahclkx_mux", "l4per_cm:0168:24"), + DT_CLK(NULL, "mcasp3_aux_gfclk_mux", "l4per_cm:0168:22"), + DT_CLK(NULL, "mcasp4_ahclkx_mux", "l4per_cm:0198:24"), + DT_CLK(NULL, "mcasp4_aux_gfclk_mux", "l4per_cm:0198:22"), + DT_CLK(NULL, "mcasp5_ahclkx_mux", "l4per_cm:0178:24"), + DT_CLK(NULL, "mcasp5_aux_gfclk_mux", "l4per_cm:0178:22"), + DT_CLK(NULL, "mcasp6_ahclkx_mux", "l4per_cm:0204:24"), + DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per_cm:0204:22"), + DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per_cm:0208:24"), + DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per_cm:0208:22"), + DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per_cm:0190:22"), + DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per_cm:0190:24"), + DT_CLK(NULL, "mmc1_clk32k", "l3init_cm:0008:8"), + DT_CLK(NULL, "mmc1_fclk_div", "l3init_cm:0008:25"), + DT_CLK(NULL, "mmc1_fclk_mux", "l3init_cm:0008:24"), + DT_CLK(NULL, "mmc2_clk32k", "l3init_cm:0010:8"), + DT_CLK(NULL, "mmc2_fclk_div", "l3init_cm:0010:25"), + DT_CLK(NULL, "mmc2_fclk_mux", "l3init_cm:0010:24"), + DT_CLK(NULL, "mmc3_clk32k", "l4per_cm:0120:8"), + DT_CLK(NULL, "mmc3_gfclk_div", "l4per_cm:0120:25"), + DT_CLK(NULL, "mmc3_gfclk_mux", "l4per_cm:0120:24"), + DT_CLK(NULL, "mmc4_clk32k", "l4per_cm:0128:8"), + DT_CLK(NULL, "mmc4_gfclk_div", "l4per_cm:0128:25"), + DT_CLK(NULL, "mmc4_gfclk_mux", "l4per_cm:0128:24"), + DT_CLK(NULL, "optfclk_pciephy1_32khz", "l3init_cm:0090:8"), + DT_CLK(NULL, "optfclk_pciephy1_clk", "l3init_cm:0090:9"), + DT_CLK(NULL, "optfclk_pciephy1_div_clk", "l3init_cm:0090:10"), + DT_CLK(NULL, "optfclk_pciephy2_32khz", "l3init_cm:0098:8"), + DT_CLK(NULL, "optfclk_pciephy2_clk", "l3init_cm:0098:9"), + DT_CLK(NULL, "optfclk_pciephy2_div_clk", "l3init_cm:0098:10"), + DT_CLK(NULL, "qspi_gfclk_div", "l4per_cm:0138:25"), + DT_CLK(NULL, "qspi_gfclk_mux", "l4per_cm:0138:24"), + DT_CLK(NULL, "rmii_50mhz_clk_mux", "l3init_cm:00b0:24"), + DT_CLK(NULL, "sata_ref_clk", "l3init_cm:0068:8"), + DT_CLK(NULL, "timer10_gfclk_mux", "l4per_cm:0028:24"), + DT_CLK(NULL, "timer11_gfclk_mux", "l4per_cm:0030:24"), + DT_CLK(NULL, "timer13_gfclk_mux", "l4per_cm:00c8:24"), + DT_CLK(NULL, "timer14_gfclk_mux", "l4per_cm:00d0:24"), + DT_CLK(NULL, "timer15_gfclk_mux", "l4per_cm:00d8:24"), + DT_CLK(NULL, "timer16_gfclk_mux", "l4per_cm:0130:24"), + DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon_cm:0020:24"), + DT_CLK(NULL, "timer2_gfclk_mux", "l4per_cm:0038:24"), + DT_CLK(NULL, "timer3_gfclk_mux", "l4per_cm:0040:24"), + DT_CLK(NULL, "timer4_gfclk_mux", "l4per_cm:0048:24"), + DT_CLK(NULL, "timer5_gfclk_mux", "ipu_cm:0018:24"), + DT_CLK(NULL, "timer6_gfclk_mux", "ipu_cm:0020:24"), + DT_CLK(NULL, "timer7_gfclk_mux", "ipu_cm:0028:24"), + DT_CLK(NULL, "timer8_gfclk_mux", "ipu_cm:0030:24"), + DT_CLK(NULL, "timer9_gfclk_mux", "l4per_cm:0050:24"), + DT_CLK(NULL, "uart10_gfclk_mux", "wkupaon_cm:0060:24"), + DT_CLK(NULL, "uart1_gfclk_mux", "l4per_cm:0140:24"), + DT_CLK(NULL, "uart2_gfclk_mux", "l4per_cm:0148:24"), + DT_CLK(NULL, "uart3_gfclk_mux", "l4per_cm:0150:24"), + DT_CLK(NULL, "uart4_gfclk_mux", "l4per_cm:0158:24"), + DT_CLK(NULL, "uart5_gfclk_mux", "l4per_cm:0170:24"), + DT_CLK(NULL, "uart6_gfclk_mux", "ipu_cm:0040:24"), + DT_CLK(NULL, "uart7_gfclk_mux", "l4per_cm:01d0:24"), + DT_CLK(NULL, "uart8_gfclk_mux", "l4per_cm:01e0:24"), + DT_CLK(NULL, "uart9_gfclk_mux", "l4per_cm:01e8:24"), + DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l3init_cm:00d0:8"), + DT_CLK(NULL, "usb_otg_ss2_refclk960m", "l3init_cm:0020:8"), + { .node_name = NULL }, +}; diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 71a122b2dc67..597fb4a59318 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -23,7 +23,28 @@ #define DRA7_DPLL_USB_DEFFREQ 960000000 static const struct omap_clkctrl_reg_data dra7_mpu_clkctrl_regs[] __initconst = { - { DRA7_MPU_CLKCTRL, NULL, 0, "dpll_mpu_m2_ck" }, + { DRA7_MPU_MPU_CLKCTRL, NULL, 0, "dpll_mpu_m2_ck" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_dsp1_clkctrl_regs[] __initconst = { + { DRA7_DSP1_MMU0_DSP1_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_dsp_m2_ck" }, + { 0 }, +}; + +static const char * const dra7_ipu1_gfclk_mux_parents[] __initconst = { + "dpll_abe_m2x2_ck", + "dpll_core_h22x2_ck", + NULL, +}; + +static const struct omap_clkctrl_bit_data dra7_mmu_ipu1_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_ipu1_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_ipu1_clkctrl_regs[] __initconst = { + { DRA7_IPU1_MMU_IPU1_CLKCTRL, dra7_mmu_ipu1_bit_data, CLKF_HW_SUP, "ipu1-clkctrl:0000:24" }, { 0 }, }; @@ -108,45 +129,55 @@ static const struct omap_clkctrl_bit_data dra7_uart6_bit_data[] __initconst = { }; static const struct omap_clkctrl_reg_data dra7_ipu_clkctrl_regs[] __initconst = { - { DRA7_MCASP1_CLKCTRL, dra7_mcasp1_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0010:22" }, - { DRA7_TIMER5_CLKCTRL, dra7_timer5_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0018:24" }, - { DRA7_TIMER6_CLKCTRL, dra7_timer6_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0020:24" }, - { DRA7_TIMER7_CLKCTRL, dra7_timer7_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0028:24" }, - { DRA7_TIMER8_CLKCTRL, dra7_timer8_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0030:24" }, - { DRA7_I2C5_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, - { DRA7_UART6_CLKCTRL, dra7_uart6_bit_data, CLKF_SW_SUP, "ipu_cm:clk:0040:24" }, + { DRA7_IPU_MCASP1_CLKCTRL, dra7_mcasp1_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0000:22" }, + { DRA7_IPU_TIMER5_CLKCTRL, dra7_timer5_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0008:24" }, + { DRA7_IPU_TIMER6_CLKCTRL, dra7_timer6_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0010:24" }, + { DRA7_IPU_TIMER7_CLKCTRL, dra7_timer7_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0018:24" }, + { DRA7_IPU_TIMER8_CLKCTRL, dra7_timer8_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0020:24" }, + { DRA7_IPU_I2C5_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, + { DRA7_IPU_UART6_CLKCTRL, dra7_uart6_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0030:24" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_dsp2_clkctrl_regs[] __initconst = { + { DRA7_DSP2_MMU0_DSP2_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_dsp_m2_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_rtc_clkctrl_regs[] __initconst = { - { DRA7_RTCSS_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, + { DRA7_RTC_RTCSS_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_coreaon_clkctrl_regs[] __initconst = { - { DRA7_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" }, - { DRA7_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" }, + { DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" }, + { DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_l3main1_clkctrl_regs[] __initconst = { - { DRA7_L3_MAIN_1_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_GPMC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, - { DRA7_TPCC_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_TPTC0_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, - { DRA7_TPTC1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, - { DRA7_VCP1_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_VCP2_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L3MAIN1_L3_MAIN_1_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L3MAIN1_GPMC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L3MAIN1_TPCC_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L3MAIN1_TPTC0_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L3MAIN1_TPTC1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L3MAIN1_VCP1_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L3MAIN1_VCP2_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_ipu2_clkctrl_regs[] __initconst = { + { DRA7_IPU2_MMU_IPU2_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h22x2_ck" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_dma_clkctrl_regs[] __initconst = { - { DRA7_DMA_SYSTEM_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_DMA_DMA_SYSTEM_CLKCTRL, NULL, 0, "l3_iclk_div" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_emif_clkctrl_regs[] __initconst = { - { DRA7_DMM_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_EMIF_DMM_CLKCTRL, NULL, 0, "l3_iclk_div" }, { 0 }, }; @@ -161,7 +192,7 @@ static const char * const dra7_atl_dpll_clk_mux_parents[] __initconst = { static const char * const dra7_atl_gfclk_mux_parents[] __initconst = { "l3_iclk_div", "dpll_abe_m2_ck", - "atl_cm:clk:0000:24", + "atl-clkctrl:0000:24", NULL, }; @@ -172,32 +203,32 @@ static const struct omap_clkctrl_bit_data dra7_atl_bit_data[] __initconst = { }; static const struct omap_clkctrl_reg_data dra7_atl_clkctrl_regs[] __initconst = { - { DRA7_ATL_CLKCTRL, dra7_atl_bit_data, CLKF_SW_SUP, "atl_cm:clk:0000:26" }, + { DRA7_ATL_ATL_CLKCTRL, dra7_atl_bit_data, CLKF_SW_SUP, "atl-clkctrl:0000:26" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_l4cfg_clkctrl_regs[] __initconst = { - { DRA7_L4_CFG_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_SPINLOCK_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX1_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX2_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX3_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX4_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX5_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX6_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX7_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX8_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX9_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX10_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX11_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX12_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_MAILBOX13_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_L4_CFG_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_SPINLOCK_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX1_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX2_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX3_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX4_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX5_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX6_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX7_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX8_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX9_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX10_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX11_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX12_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4CFG_MAILBOX13_CLKCTRL, NULL, 0, "l3_iclk_div" }, { 0 }, }; static const struct omap_clkctrl_reg_data dra7_l3instr_clkctrl_regs[] __initconst = { - { DRA7_L3_MAIN_2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, - { DRA7_L3_INSTR_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L3INSTR_L3_MAIN_2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L3INSTR_L3_INSTR_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { 0 }, }; @@ -242,8 +273,8 @@ static const struct omap_clkctrl_bit_data dra7_dss_core_bit_data[] __initconst = }; static const struct omap_clkctrl_reg_data dra7_dss_clkctrl_regs[] __initconst = { - { DRA7_DSS_CORE_CLKCTRL, dra7_dss_core_bit_data, CLKF_SW_SUP, "dss_cm:clk:0000:8" }, - { DRA7_BB2D_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_h24x2_ck" }, + { DRA7_DSS_DSS_CORE_CLKCTRL, dra7_dss_core_bit_data, CLKF_SW_SUP, "dss-clkctrl:0000:8" }, + { DRA7_DSS_BB2D_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_h24x2_ck" }, { 0 }, }; @@ -254,7 +285,7 @@ static const char * const dra7_mmc1_fclk_mux_parents[] __initconst = { }; static const char * const dra7_mmc1_fclk_div_parents[] __initconst = { - "l3init_cm:clk:0008:24", + "l3init-clkctrl:0008:24", NULL, }; @@ -271,7 +302,7 @@ static const struct omap_clkctrl_bit_data dra7_mmc1_bit_data[] __initconst = { }; static const char * const dra7_mmc2_fclk_div_parents[] __initconst = { - "l3init_cm:clk:0010:24", + "l3init-clkctrl:0010:24", NULL, }; @@ -307,6 +338,24 @@ static const struct omap_clkctrl_bit_data dra7_sata_bit_data[] __initconst = { { 0 }, }; +static const struct omap_clkctrl_bit_data dra7_usb_otg_ss1_bit_data[] __initconst = { + { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst = { + { DRA7_L3INIT_MMC1_CLKCTRL, dra7_mmc1_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0008:25" }, + { DRA7_L3INIT_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0010:25" }, + { DRA7_L3INIT_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, + { DRA7_L3INIT_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, + { DRA7_L3INIT_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, + { DRA7_L3INIT_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" }, + { DRA7_L3INIT_OCP2SCP1_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" }, + { DRA7_L3INIT_OCP2SCP3_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" }, + { DRA7_L3INIT_USB_OTG_SS1_CLKCTRL, dra7_usb_otg_ss1_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, + { 0 }, +}; + static const char * const dra7_optfclk_pciephy1_clk_parents[] __initconst = { "apll_pcie_ck", NULL, @@ -331,6 +380,12 @@ static const struct omap_clkctrl_bit_data dra7_pcie2_bit_data[] __initconst = { { 0 }, }; +static const struct omap_clkctrl_reg_data dra7_pcie_clkctrl_regs[] __initconst = { + { DRA7_PCIE_PCIE1_CLKCTRL, dra7_pcie1_bit_data, CLKF_SW_SUP, "l4_root_clk_div" }, + { DRA7_PCIE_PCIE2_CLKCTRL, dra7_pcie2_bit_data, CLKF_SW_SUP, "l4_root_clk_div" }, + { 0 }, +}; + static const char * const dra7_rmii_50mhz_clk_mux_parents[] __initconst = { "dpll_gmac_h11x2_ck", "rmii_clk_ck", @@ -352,24 +407,8 @@ static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = { { 0 }, }; -static const struct omap_clkctrl_bit_data dra7_usb_otg_ss1_bit_data[] __initconst = { - { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL }, - { 0 }, -}; - -static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst = { - { DRA7_MMC1_CLKCTRL, dra7_mmc1_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0008:25" }, - { DRA7_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0010:25" }, - { DRA7_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, - { DRA7_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, - { DRA7_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, - { DRA7_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" }, - { DRA7_PCIE1_CLKCTRL, dra7_pcie1_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" }, - { DRA7_PCIE2_CLKCTRL, dra7_pcie2_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" }, - { DRA7_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck", "gmac_clkdm" }, - { DRA7_OCP2SCP1_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" }, - { DRA7_OCP2SCP3_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" }, - { DRA7_USB_OTG_SS1_CLKCTRL, dra7_usb_otg_ss1_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" }, +static const struct omap_clkctrl_reg_data dra7_gmac_clkctrl_regs[] __initconst = { + { DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "dpll_gmac_ck" }, { 0 }, }; @@ -443,21 +482,6 @@ static const struct omap_clkctrl_bit_data dra7_gpio6_bit_data[] __initconst = { { 0 }, }; -static const struct omap_clkctrl_bit_data dra7_timer13_bit_data[] __initconst = { - { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, - { 0 }, -}; - -static const struct omap_clkctrl_bit_data dra7_timer14_bit_data[] __initconst = { - { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, - { 0 }, -}; - -static const struct omap_clkctrl_bit_data dra7_timer15_bit_data[] __initconst = { - { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, - { 0 }, -}; - static const struct omap_clkctrl_bit_data dra7_gpio7_bit_data[] __initconst = { { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL }, { 0 }, @@ -469,7 +493,7 @@ static const struct omap_clkctrl_bit_data dra7_gpio8_bit_data[] __initconst = { }; static const char * const dra7_mmc3_gfclk_div_parents[] __initconst = { - "l4per_cm:clk:0120:24", + "l4per-clkctrl:00f8:24", NULL, }; @@ -486,7 +510,7 @@ static const struct omap_clkctrl_bit_data dra7_mmc3_bit_data[] __initconst = { }; static const char * const dra7_mmc4_gfclk_div_parents[] __initconst = { - "l4per_cm:clk:0128:24", + "l4per-clkctrl:0100:24", NULL, }; @@ -502,8 +526,72 @@ static const struct omap_clkctrl_bit_data dra7_mmc4_bit_data[] __initconst = { { 0 }, }; -static const struct omap_clkctrl_bit_data dra7_timer16_bit_data[] __initconst = { - { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, +static const struct omap_clkctrl_bit_data dra7_uart1_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart2_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart3_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart4_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_uart5_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_l4per_clkctrl_regs[] __initconst = { + { DRA7_L4PER_TIMER10_CLKCTRL, dra7_timer10_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0000:24" }, + { DRA7_L4PER_TIMER11_CLKCTRL, dra7_timer11_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0008:24" }, + { DRA7_L4PER_TIMER2_CLKCTRL, dra7_timer2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0010:24" }, + { DRA7_L4PER_TIMER3_CLKCTRL, dra7_timer3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0018:24" }, + { DRA7_L4PER_TIMER4_CLKCTRL, dra7_timer4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0020:24" }, + { DRA7_L4PER_TIMER9_CLKCTRL, dra7_timer9_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0028:24" }, + { DRA7_L4PER_ELM_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4PER_GPIO2_CLKCTRL, dra7_gpio2_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L4PER_GPIO3_CLKCTRL, dra7_gpio3_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L4PER_GPIO4_CLKCTRL, dra7_gpio4_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L4PER_GPIO5_CLKCTRL, dra7_gpio5_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L4PER_GPIO6_CLKCTRL, dra7_gpio6_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L4PER_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_fclk" }, + { DRA7_L4PER_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, + { DRA7_L4PER_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, + { DRA7_L4PER_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, + { DRA7_L4PER_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, + { DRA7_L4PER_L4_PER1_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4PER_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, + { DRA7_L4PER_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, + { DRA7_L4PER_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, + { DRA7_L4PER_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, + { DRA7_L4PER_GPIO7_CLKCTRL, dra7_gpio7_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L4PER_GPIO8_CLKCTRL, dra7_gpio8_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L4PER_MMC3_CLKCTRL, dra7_mmc3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:00f8:25" }, + { DRA7_L4PER_MMC4_CLKCTRL, dra7_mmc4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0100:25" }, + { DRA7_L4PER_UART1_CLKCTRL, dra7_uart1_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0118:24" }, + { DRA7_L4PER_UART2_CLKCTRL, dra7_uart2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0120:24" }, + { DRA7_L4PER_UART3_CLKCTRL, dra7_uart3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0128:24" }, + { DRA7_L4PER_UART4_CLKCTRL, dra7_uart4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0130:24" }, + { DRA7_L4PER_UART5_CLKCTRL, dra7_uart5_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0148:24" }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_l4sec_clkctrl_regs[] __initconst = { + { DRA7_L4SEC_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L4SEC_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L4SEC_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, + { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP, "" }, + { DRA7_L4SEC_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" }, { 0 }, }; @@ -514,7 +602,7 @@ static const char * const dra7_qspi_gfclk_mux_parents[] __initconst = { }; static const char * const dra7_qspi_gfclk_div_parents[] __initconst = { - "l4per_cm:clk:0138:24", + "l4per2-clkctrl:012c:24", NULL, }; @@ -529,26 +617,6 @@ static const struct omap_clkctrl_bit_data dra7_qspi_bit_data[] __initconst = { { 0 }, }; -static const struct omap_clkctrl_bit_data dra7_uart1_bit_data[] __initconst = { - { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, - { 0 }, -}; - -static const struct omap_clkctrl_bit_data dra7_uart2_bit_data[] __initconst = { - { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, - { 0 }, -}; - -static const struct omap_clkctrl_bit_data dra7_uart3_bit_data[] __initconst = { - { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, - { 0 }, -}; - -static const struct omap_clkctrl_bit_data dra7_uart4_bit_data[] __initconst = { - { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, - { 0 }, -}; - static const struct omap_clkctrl_bit_data dra7_mcasp2_bit_data[] __initconst = { { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, @@ -562,11 +630,6 @@ static const struct omap_clkctrl_bit_data dra7_mcasp3_bit_data[] __initconst = { { 0 }, }; -static const struct omap_clkctrl_bit_data dra7_uart5_bit_data[] __initconst = { - { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL }, - { 0 }, -}; - static const struct omap_clkctrl_bit_data dra7_mcasp5_bit_data[] __initconst = { { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL }, { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL }, @@ -612,64 +675,54 @@ static const struct omap_clkctrl_bit_data dra7_mcasp7_bit_data[] __initconst = { { 0 }, }; -static const struct omap_clkctrl_reg_data dra7_l4per_clkctrl_regs[] __initconst = { - { DRA7_L4_PER2_CLKCTRL, NULL, 0, "l3_iclk_div", "l4per2_clkdm" }, - { DRA7_L4_PER3_CLKCTRL, NULL, 0, "l3_iclk_div", "l4per3_clkdm" }, - { DRA7_TIMER10_CLKCTRL, dra7_timer10_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0028:24" }, - { DRA7_TIMER11_CLKCTRL, dra7_timer11_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0030:24" }, - { DRA7_TIMER2_CLKCTRL, dra7_timer2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0038:24" }, - { DRA7_TIMER3_CLKCTRL, dra7_timer3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0040:24" }, - { DRA7_TIMER4_CLKCTRL, dra7_timer4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0048:24" }, - { DRA7_TIMER9_CLKCTRL, dra7_timer9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0050:24" }, - { DRA7_ELM_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_GPIO2_CLKCTRL, dra7_gpio2_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, - { DRA7_GPIO3_CLKCTRL, dra7_gpio3_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, - { DRA7_GPIO4_CLKCTRL, dra7_gpio4_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, - { DRA7_GPIO5_CLKCTRL, dra7_gpio5_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, - { DRA7_GPIO6_CLKCTRL, dra7_gpio6_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, - { DRA7_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_fclk" }, - { DRA7_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" }, - { DRA7_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" }, - { DRA7_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, - { DRA7_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, - { DRA7_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, - { DRA7_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" }, - { DRA7_L4_PER1_CLKCTRL, NULL, 0, "l3_iclk_div" }, - { DRA7_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div", "l4per2_clkdm" }, - { DRA7_TIMER13_CLKCTRL, dra7_timer13_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00c8:24", "l4per3_clkdm" }, - { DRA7_TIMER14_CLKCTRL, dra7_timer14_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00d0:24", "l4per3_clkdm" }, - { DRA7_TIMER15_CLKCTRL, dra7_timer15_bit_data, CLKF_SW_SUP, "l4per_cm:clk:00d8:24", "l4per3_clkdm" }, - { DRA7_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, - { DRA7_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, - { DRA7_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, - { DRA7_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" }, - { DRA7_GPIO7_CLKCTRL, dra7_gpio7_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, - { DRA7_GPIO8_CLKCTRL, dra7_gpio8_bit_data, CLKF_HW_SUP, "l3_iclk_div" }, - { DRA7_MMC3_CLKCTRL, dra7_mmc3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0120:25" }, - { DRA7_MMC4_CLKCTRL, dra7_mmc4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0128:25" }, - { DRA7_TIMER16_CLKCTRL, dra7_timer16_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0130:24", "l4per3_clkdm" }, - { DRA7_QSPI_CLKCTRL, dra7_qspi_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0138:25", "l4per2_clkdm" }, - { DRA7_UART1_CLKCTRL, dra7_uart1_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0140:24" }, - { DRA7_UART2_CLKCTRL, dra7_uart2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0148:24" }, - { DRA7_UART3_CLKCTRL, dra7_uart3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0150:24" }, - { DRA7_UART4_CLKCTRL, dra7_uart4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0158:24" }, - { DRA7_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0160:22", "l4per2_clkdm" }, - { DRA7_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0168:22", "l4per2_clkdm" }, - { DRA7_UART5_CLKCTRL, dra7_uart5_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0170:24" }, - { DRA7_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0178:22", "l4per2_clkdm" }, - { DRA7_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0190:24", "l4per2_clkdm" }, - { DRA7_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0198:22", "l4per2_clkdm" }, - { DRA7_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" }, - { DRA7_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" }, - { DRA7_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" }, - { DRA7_RNG_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" }, - { DRA7_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" }, - { DRA7_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01d0:24", "l4per2_clkdm" }, - { DRA7_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01e0:24", "l4per2_clkdm" }, - { DRA7_UART9_CLKCTRL, dra7_uart9_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01e8:24", "l4per2_clkdm" }, - { DRA7_DCAN2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin1", "l4per2_clkdm" }, - { DRA7_MCASP6_CLKCTRL, dra7_mcasp6_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0204:22", "l4per2_clkdm" }, - { DRA7_MCASP7_CLKCTRL, dra7_mcasp7_bit_data, CLKF_SW_SUP, "l4per_cm:clk:0208:22", "l4per2_clkdm" }, +static const struct omap_clkctrl_reg_data dra7_l4per2_clkctrl_regs[] __initconst = { + { DRA7_L4PER2_L4_PER2_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4PER2_PRUSS1_CLKCTRL, NULL, CLKF_SW_SUP, "" }, + { DRA7_L4PER2_PRUSS2_CLKCTRL, NULL, CLKF_SW_SUP, "" }, + { DRA7_L4PER2_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" }, + { DRA7_L4PER2_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" }, + { DRA7_L4PER2_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" }, + { DRA7_L4PER2_QSPI_CLKCTRL, dra7_qspi_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:012c:25" }, + { DRA7_L4PER2_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0154:22" }, + { DRA7_L4PER2_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:015c:22" }, + { DRA7_L4PER2_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:016c:22" }, + { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:24" }, + { DRA7_L4PER2_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:018c:22" }, + { DRA7_L4PER2_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01c4:24" }, + { DRA7_L4PER2_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01d4:24" }, + { DRA7_L4PER2_UART9_CLKCTRL, dra7_uart9_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01dc:24" }, + { DRA7_L4PER2_DCAN2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin1" }, + { DRA7_L4PER2_MCASP6_CLKCTRL, dra7_mcasp6_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01f8:22" }, + { DRA7_L4PER2_MCASP7_CLKCTRL, dra7_mcasp7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01fc:22" }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer13_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer14_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer15_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_bit_data dra7_timer16_bit_data[] __initconst = { + { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL }, + { 0 }, +}; + +static const struct omap_clkctrl_reg_data dra7_l4per3_clkctrl_regs[] __initconst = { + { DRA7_L4PER3_L4_PER3_CLKCTRL, NULL, 0, "l3_iclk_div" }, + { DRA7_L4PER3_TIMER13_CLKCTRL, dra7_timer13_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:00b4:24" }, + { DRA7_L4PER3_TIMER14_CLKCTRL, dra7_timer14_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:00bc:24" }, + { DRA7_L4PER3_TIMER15_CLKCTRL, dra7_timer15_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:00c4:24" }, + { DRA7_L4PER3_TIMER16_CLKCTRL, dra7_timer16_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:011c:24" }, { 0 }, }; @@ -700,24 +753,28 @@ static const struct omap_clkctrl_bit_data dra7_dcan1_bit_data[] __initconst = { }; static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initconst = { - { DRA7_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" }, - { DRA7_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, - { DRA7_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" }, - { DRA7_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0020:24" }, - { DRA7_TIMER12_CLKCTRL, NULL, 0, "secure_32k_clk_src_ck" }, - { DRA7_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" }, - { DRA7_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0060:24" }, - { DRA7_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0068:24" }, - { DRA7_ADC_CLKCTRL, NULL, CLKF_SW_SUP, "mcan_clk"}, + { DRA7_WKUPAON_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" }, + { DRA7_WKUPAON_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" }, + { DRA7_WKUPAON_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" }, + { DRA7_WKUPAON_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0020:24" }, + { DRA7_WKUPAON_TIMER12_CLKCTRL, NULL, 0, "secure_32k_clk_src_ck" }, + { DRA7_WKUPAON_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" }, + { DRA7_WKUPAON_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0060:24" }, + { DRA7_WKUPAON_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0068:24" }, + { DRA7_WKUPAON_ADC_CLKCTRL, NULL, CLKF_SW_SUP, "mcan_clk" }, { 0 }, }; const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = { { 0x4a005320, dra7_mpu_clkctrl_regs }, - { 0x4a005540, dra7_ipu_clkctrl_regs }, - { 0x4a005740, dra7_rtc_clkctrl_regs }, + { 0x4a005420, dra7_dsp1_clkctrl_regs }, + { 0x4a005520, dra7_ipu1_clkctrl_regs }, + { 0x4a005550, dra7_ipu_clkctrl_regs }, + { 0x4a005620, dra7_dsp2_clkctrl_regs }, + { 0x4a005720, dra7_rtc_clkctrl_regs }, { 0x4a008620, dra7_coreaon_clkctrl_regs }, { 0x4a008720, dra7_l3main1_clkctrl_regs }, + { 0x4a008920, dra7_ipu2_clkctrl_regs }, { 0x4a008a20, dra7_dma_clkctrl_regs }, { 0x4a008b20, dra7_emif_clkctrl_regs }, { 0x4a008c00, dra7_atl_clkctrl_regs }, @@ -725,7 +782,12 @@ const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = { { 0x4a008e20, dra7_l3instr_clkctrl_regs }, { 0x4a009120, dra7_dss_clkctrl_regs }, { 0x4a009320, dra7_l3init_clkctrl_regs }, - { 0x4a009700, dra7_l4per_clkctrl_regs }, + { 0x4a0093b0, dra7_pcie_clkctrl_regs }, + { 0x4a0093d0, dra7_gmac_clkctrl_regs }, + { 0x4a009728, dra7_l4per_clkctrl_regs }, + { 0x4a0098a0, dra7_l4sec_clkctrl_regs }, + { 0x4a00970c, dra7_l4per2_clkctrl_regs }, + { 0x4a009714, dra7_l4per3_clkctrl_regs }, { 0x4ae07820, dra7_wkupaon_clkctrl_regs }, { 0 }, }; @@ -734,91 +796,92 @@ static struct ti_dt_clk dra7xx_clks[] = { DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"), DT_CLK(NULL, "sys_clkin_ck", "timer_sys_clk_div"), DT_CLK(NULL, "sys_clkin", "sys_clkin1"), - DT_CLK(NULL, "atl_dpll_clk_mux", "atl_cm:0000:24"), - DT_CLK(NULL, "atl_gfclk_mux", "atl_cm:0000:26"), - DT_CLK(NULL, "dcan1_sys_clk_mux", "wkupaon_cm:0068:24"), - DT_CLK(NULL, "dss_32khz_clk", "dss_cm:0000:11"), - DT_CLK(NULL, "dss_48mhz_clk", "dss_cm:0000:9"), - DT_CLK(NULL, "dss_dss_clk", "dss_cm:0000:8"), - DT_CLK(NULL, "dss_hdmi_clk", "dss_cm:0000:10"), - DT_CLK(NULL, "dss_video1_clk", "dss_cm:0000:12"), - DT_CLK(NULL, "dss_video2_clk", "dss_cm:0000:13"), - DT_CLK(NULL, "gmac_rft_clk_mux", "l3init_cm:00b0:25"), - DT_CLK(NULL, "gpio1_dbclk", "wkupaon_cm:0018:8"), - DT_CLK(NULL, "gpio2_dbclk", "l4per_cm:0060:8"), - DT_CLK(NULL, "gpio3_dbclk", "l4per_cm:0068:8"), - DT_CLK(NULL, "gpio4_dbclk", "l4per_cm:0070:8"), - DT_CLK(NULL, "gpio5_dbclk", "l4per_cm:0078:8"), - DT_CLK(NULL, "gpio6_dbclk", "l4per_cm:0080:8"), - DT_CLK(NULL, "gpio7_dbclk", "l4per_cm:0110:8"), - DT_CLK(NULL, "gpio8_dbclk", "l4per_cm:0118:8"), - DT_CLK(NULL, "mcasp1_ahclkr_mux", "ipu_cm:0010:28"), - DT_CLK(NULL, "mcasp1_ahclkx_mux", "ipu_cm:0010:24"), - DT_CLK(NULL, "mcasp1_aux_gfclk_mux", "ipu_cm:0010:22"), - DT_CLK(NULL, "mcasp2_ahclkr_mux", "l4per_cm:0160:28"), - DT_CLK(NULL, "mcasp2_ahclkx_mux", "l4per_cm:0160:24"), - DT_CLK(NULL, "mcasp2_aux_gfclk_mux", "l4per_cm:0160:22"), - DT_CLK(NULL, "mcasp3_ahclkx_mux", "l4per_cm:0168:24"), - DT_CLK(NULL, "mcasp3_aux_gfclk_mux", "l4per_cm:0168:22"), - DT_CLK(NULL, "mcasp4_ahclkx_mux", "l4per_cm:0198:24"), - DT_CLK(NULL, "mcasp4_aux_gfclk_mux", "l4per_cm:0198:22"), - DT_CLK(NULL, "mcasp5_ahclkx_mux", "l4per_cm:0178:24"), - DT_CLK(NULL, "mcasp5_aux_gfclk_mux", "l4per_cm:0178:22"), - DT_CLK(NULL, "mcasp6_ahclkx_mux", "l4per_cm:0204:24"), - DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per_cm:0204:22"), - DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per_cm:0208:24"), - DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per_cm:0208:22"), - DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per_cm:0190:22"), - DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per_cm:0190:24"), - DT_CLK(NULL, "mmc1_clk32k", "l3init_cm:0008:8"), - DT_CLK(NULL, "mmc1_fclk_div", "l3init_cm:0008:25"), - DT_CLK(NULL, "mmc1_fclk_mux", "l3init_cm:0008:24"), - DT_CLK(NULL, "mmc2_clk32k", "l3init_cm:0010:8"), - DT_CLK(NULL, "mmc2_fclk_div", "l3init_cm:0010:25"), - DT_CLK(NULL, "mmc2_fclk_mux", "l3init_cm:0010:24"), - DT_CLK(NULL, "mmc3_clk32k", "l4per_cm:0120:8"), - DT_CLK(NULL, "mmc3_gfclk_div", "l4per_cm:0120:25"), - DT_CLK(NULL, "mmc3_gfclk_mux", "l4per_cm:0120:24"), - DT_CLK(NULL, "mmc4_clk32k", "l4per_cm:0128:8"), - DT_CLK(NULL, "mmc4_gfclk_div", "l4per_cm:0128:25"), - DT_CLK(NULL, "mmc4_gfclk_mux", "l4per_cm:0128:24"), - DT_CLK(NULL, "optfclk_pciephy1_32khz", "l3init_cm:0090:8"), - DT_CLK(NULL, "optfclk_pciephy1_clk", "l3init_cm:0090:9"), - DT_CLK(NULL, "optfclk_pciephy1_div_clk", "l3init_cm:0090:10"), - DT_CLK(NULL, "optfclk_pciephy2_32khz", "l3init_cm:0098:8"), - DT_CLK(NULL, "optfclk_pciephy2_clk", "l3init_cm:0098:9"), - DT_CLK(NULL, "optfclk_pciephy2_div_clk", "l3init_cm:0098:10"), - DT_CLK(NULL, "qspi_gfclk_div", "l4per_cm:0138:25"), - DT_CLK(NULL, "qspi_gfclk_mux", "l4per_cm:0138:24"), - DT_CLK(NULL, "rmii_50mhz_clk_mux", "l3init_cm:00b0:24"), - DT_CLK(NULL, "sata_ref_clk", "l3init_cm:0068:8"), - DT_CLK(NULL, "timer10_gfclk_mux", "l4per_cm:0028:24"), - DT_CLK(NULL, "timer11_gfclk_mux", "l4per_cm:0030:24"), - DT_CLK(NULL, "timer13_gfclk_mux", "l4per_cm:00c8:24"), - DT_CLK(NULL, "timer14_gfclk_mux", "l4per_cm:00d0:24"), - DT_CLK(NULL, "timer15_gfclk_mux", "l4per_cm:00d8:24"), - DT_CLK(NULL, "timer16_gfclk_mux", "l4per_cm:0130:24"), - DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon_cm:0020:24"), - DT_CLK(NULL, "timer2_gfclk_mux", "l4per_cm:0038:24"), - DT_CLK(NULL, "timer3_gfclk_mux", "l4per_cm:0040:24"), - DT_CLK(NULL, "timer4_gfclk_mux", "l4per_cm:0048:24"), - DT_CLK(NULL, "timer5_gfclk_mux", "ipu_cm:0018:24"), - DT_CLK(NULL, "timer6_gfclk_mux", "ipu_cm:0020:24"), - DT_CLK(NULL, "timer7_gfclk_mux", "ipu_cm:0028:24"), - DT_CLK(NULL, "timer8_gfclk_mux", "ipu_cm:0030:24"), - DT_CLK(NULL, "timer9_gfclk_mux", "l4per_cm:0050:24"), - DT_CLK(NULL, "uart10_gfclk_mux", "wkupaon_cm:0060:24"), - DT_CLK(NULL, "uart1_gfclk_mux", "l4per_cm:0140:24"), - DT_CLK(NULL, "uart2_gfclk_mux", "l4per_cm:0148:24"), - DT_CLK(NULL, "uart3_gfclk_mux", "l4per_cm:0150:24"), - DT_CLK(NULL, "uart4_gfclk_mux", "l4per_cm:0158:24"), - DT_CLK(NULL, "uart5_gfclk_mux", "l4per_cm:0170:24"), - DT_CLK(NULL, "uart6_gfclk_mux", "ipu_cm:0040:24"), - DT_CLK(NULL, "uart7_gfclk_mux", "l4per_cm:01d0:24"), - DT_CLK(NULL, "uart8_gfclk_mux", "l4per_cm:01e0:24"), - DT_CLK(NULL, "uart9_gfclk_mux", "l4per_cm:01e8:24"), - DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l3init_cm:00d0:8"), - DT_CLK(NULL, "usb_otg_ss2_refclk960m", "l3init_cm:0020:8"), + DT_CLK(NULL, "atl_dpll_clk_mux", "atl-clkctrl:0000:24"), + DT_CLK(NULL, "atl_gfclk_mux", "atl-clkctrl:0000:26"), + DT_CLK(NULL, "dcan1_sys_clk_mux", "wkupaon-clkctrl:0068:24"), + DT_CLK(NULL, "dss_32khz_clk", "dss-clkctrl:0000:11"), + DT_CLK(NULL, "dss_48mhz_clk", "dss-clkctrl:0000:9"), + DT_CLK(NULL, "dss_dss_clk", "dss-clkctrl:0000:8"), + DT_CLK(NULL, "dss_hdmi_clk", "dss-clkctrl:0000:10"), + DT_CLK(NULL, "dss_video1_clk", "dss-clkctrl:0000:12"), + DT_CLK(NULL, "dss_video2_clk", "dss-clkctrl:0000:13"), + DT_CLK(NULL, "gmac_rft_clk_mux", "gmac-clkctrl:0000:25"), + DT_CLK(NULL, "gpio1_dbclk", "wkupaon-clkctrl:0018:8"), + DT_CLK(NULL, "gpio2_dbclk", "l4per-clkctrl:0038:8"), + DT_CLK(NULL, "gpio3_dbclk", "l4per-clkctrl:0040:8"), + DT_CLK(NULL, "gpio4_dbclk", "l4per-clkctrl:0048:8"), + DT_CLK(NULL, "gpio5_dbclk", "l4per-clkctrl:0050:8"), + DT_CLK(NULL, "gpio6_dbclk", "l4per-clkctrl:0058:8"), + DT_CLK(NULL, "gpio7_dbclk", "l4per-clkctrl:00e8:8"), + DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f0:8"), + DT_CLK(NULL, "ipu1_gfclk_mux", "ipu1-clkctrl:0000:24"), + DT_CLK(NULL, "mcasp1_ahclkr_mux", "ipu-clkctrl:0000:28"), + DT_CLK(NULL, "mcasp1_ahclkx_mux", "ipu-clkctrl:0000:24"), + DT_CLK(NULL, "mcasp1_aux_gfclk_mux", "ipu-clkctrl:0000:22"), + DT_CLK(NULL, "mcasp2_ahclkr_mux", "l4per2-clkctrl:0154:28"), + DT_CLK(NULL, "mcasp2_ahclkx_mux", "l4per2-clkctrl:0154:24"), + DT_CLK(NULL, "mcasp2_aux_gfclk_mux", "l4per2-clkctrl:0154:22"), + DT_CLK(NULL, "mcasp3_ahclkx_mux", "l4per2-clkctrl:015c:24"), + DT_CLK(NULL, "mcasp3_aux_gfclk_mux", "l4per2-clkctrl:015c:22"), + DT_CLK(NULL, "mcasp4_ahclkx_mux", "l4per2-clkctrl:018c:24"), + DT_CLK(NULL, "mcasp4_aux_gfclk_mux", "l4per2-clkctrl:018c:22"), + DT_CLK(NULL, "mcasp5_ahclkx_mux", "l4per2-clkctrl:016c:24"), + DT_CLK(NULL, "mcasp5_aux_gfclk_mux", "l4per2-clkctrl:016c:22"), + DT_CLK(NULL, "mcasp6_ahclkx_mux", "l4per2-clkctrl:01f8:24"), + DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per2-clkctrl:01f8:22"), + DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per2-clkctrl:01fc:24"), + DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per2-clkctrl:01fc:22"), + DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:22"), + DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:24"), + DT_CLK(NULL, "mmc1_clk32k", "l3init-clkctrl:0008:8"), + DT_CLK(NULL, "mmc1_fclk_div", "l3init-clkctrl:0008:25"), + DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"), + DT_CLK(NULL, "mmc2_clk32k", "l3init-clkctrl:0010:8"), + DT_CLK(NULL, "mmc2_fclk_div", "l3init-clkctrl:0010:25"), + DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"), + DT_CLK(NULL, "mmc3_clk32k", "l4per-clkctrl:00f8:8"), + DT_CLK(NULL, "mmc3_gfclk_div", "l4per-clkctrl:00f8:25"), + DT_CLK(NULL, "mmc3_gfclk_mux", "l4per-clkctrl:00f8:24"), + DT_CLK(NULL, "mmc4_clk32k", "l4per-clkctrl:0100:8"), + DT_CLK(NULL, "mmc4_gfclk_div", "l4per-clkctrl:0100:25"), + DT_CLK(NULL, "mmc4_gfclk_mux", "l4per-clkctrl:0100:24"), + DT_CLK(NULL, "optfclk_pciephy1_32khz", "pcie-clkctrl:0000:8"), + DT_CLK(NULL, "optfclk_pciephy1_clk", "pcie-clkctrl:0000:9"), + DT_CLK(NULL, "optfclk_pciephy1_div_clk", "pcie-clkctrl:0000:10"), + DT_CLK(NULL, "optfclk_pciephy2_32khz", "pcie-clkctrl:0008:8"), + DT_CLK(NULL, "optfclk_pciephy2_clk", "pcie-clkctrl:0008:9"), + DT_CLK(NULL, "optfclk_pciephy2_div_clk", "pcie-clkctrl:0008:10"), + DT_CLK(NULL, "qspi_gfclk_div", "l4per2-clkctrl:012c:25"), + DT_CLK(NULL, "qspi_gfclk_mux", "l4per2-clkctrl:012c:24"), + DT_CLK(NULL, "rmii_50mhz_clk_mux", "gmac-clkctrl:0000:24"), + DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"), + DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0000:24"), + DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0008:24"), + DT_CLK(NULL, "timer13_gfclk_mux", "l4per3-clkctrl:00b4:24"), + DT_CLK(NULL, "timer14_gfclk_mux", "l4per3-clkctrl:00bc:24"), + DT_CLK(NULL, "timer15_gfclk_mux", "l4per3-clkctrl:00c4:24"), + DT_CLK(NULL, "timer16_gfclk_mux", "l4per3-clkctrl:011c:24"), + DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon-clkctrl:0020:24"), + DT_CLK(NULL, "timer2_gfclk_mux", "l4per-clkctrl:0010:24"), + DT_CLK(NULL, "timer3_gfclk_mux", "l4per-clkctrl:0018:24"), + DT_CLK(NULL, "timer4_gfclk_mux", "l4per-clkctrl:0020:24"), + DT_CLK(NULL, "timer5_gfclk_mux", "ipu-clkctrl:0008:24"), + DT_CLK(NULL, "timer6_gfclk_mux", "ipu-clkctrl:0010:24"), + DT_CLK(NULL, "timer7_gfclk_mux", "ipu-clkctrl:0018:24"), + DT_CLK(NULL, "timer8_gfclk_mux", "ipu-clkctrl:0020:24"), + DT_CLK(NULL, "timer9_gfclk_mux", "l4per-clkctrl:0028:24"), + DT_CLK(NULL, "uart10_gfclk_mux", "wkupaon-clkctrl:0060:24"), + DT_CLK(NULL, "uart1_gfclk_mux", "l4per-clkctrl:0118:24"), + DT_CLK(NULL, "uart2_gfclk_mux", "l4per-clkctrl:0120:24"), + DT_CLK(NULL, "uart3_gfclk_mux", "l4per-clkctrl:0128:24"), + DT_CLK(NULL, "uart4_gfclk_mux", "l4per-clkctrl:0130:24"), + DT_CLK(NULL, "uart5_gfclk_mux", "l4per-clkctrl:0148:24"), + DT_CLK(NULL, "uart6_gfclk_mux", "ipu-clkctrl:0030:24"), + DT_CLK(NULL, "uart7_gfclk_mux", "l4per2-clkctrl:01c4:24"), + DT_CLK(NULL, "uart8_gfclk_mux", "l4per2-clkctrl:01d4:24"), + DT_CLK(NULL, "uart9_gfclk_mux", "l4per2-clkctrl:01dc:24"), + DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l3init-clkctrl:00d0:8"), + DT_CLK(NULL, "usb_otg_ss2_refclk960m", "l3init-clkctrl:0020:8"), { .node_name = NULL }, }; @@ -827,7 +890,10 @@ int __init dra7xx_dt_clk_init(void) int rc; struct clk *dpll_ck, *hdcp_ck; - ti_dt_clocks_register(dra7xx_clks); + if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) + ti_dt_clocks_register(dra7xx_compat_clks); + else + ti_dt_clocks_register(dra7xx_clks); omap2_clk_disable_autoidle_all(); diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 148815470431..a01ca9395179 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -190,8 +190,8 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node) init.num_parents = of_clk_get_parent_count(node); if (init.num_parents != 1) { - pr_err("%s: atl clock %s must have 1 parent\n", __func__, - node->name); + pr_err("%s: atl clock %pOFn must have 1 parent\n", __func__, + node); goto cleanup; } diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index e205af814582..d0cd58534781 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -34,7 +34,7 @@ struct ti_clk_ll_ops *ti_clk_ll_ops; static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS]; -static struct ti_clk_features ti_clk_features; +struct ti_clk_features ti_clk_features; struct clk_iomap { struct regmap *regmap; @@ -129,7 +129,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops) void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) { struct ti_dt_clk *c; - struct device_node *node; + struct device_node *node, *parent; struct clk *clk; struct of_phandle_args clkspec; char buf[64]; @@ -140,6 +140,9 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) int ret; static bool clkctrl_nodes_missing; static bool has_clkctrl_data; + static bool compat_mode; + + compat_mode = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT; for (c = oclks; c->node_name != NULL; c++) { strcpy(buf, c->node_name); @@ -164,8 +167,12 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) continue; node = of_find_node_by_name(NULL, buf); - if (num_args) - node = of_find_node_by_name(node, "clk"); + if (num_args && compat_mode) { + parent = node; + node = of_get_child_by_name(parent, "clk"); + of_node_put(parent); + } + clkspec.np = node; clkspec.args_count = num_args; for (i = 0; i < num_args; i++) { @@ -173,11 +180,12 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) if (ret) { pr_warn("Bad tag in %s at %d: %s\n", c->node_name, i, tags[i]); + of_node_put(node); return; } } clk = of_clk_get_from_provider(&clkspec); - + of_node_put(node); if (!IS_ERR(clk)) { c->lk.clk = clk; clkdev_add(&c->lk); @@ -223,7 +231,7 @@ int __init ti_clk_retry_init(struct device_node *node, void *user, { struct clk_init_item *retry; - pr_debug("%s: adding to retry list...\n", node->name); + pr_debug("%pOFn: adding to retry list...\n", node); retry = kzalloc(sizeof(*retry), GFP_KERNEL); if (!retry) return -ENOMEM; @@ -258,14 +266,14 @@ int ti_clk_get_reg_addr(struct device_node *node, int index, } if (i == CLK_MAX_MEMMAPS) { - pr_err("clk-provider not found for %s!\n", node->name); + pr_err("clk-provider not found for %pOFn!\n", node); return -ENOENT; } reg->index = i; if (of_property_read_u32_index(node, "reg", index, &val)) { - pr_err("%s must have reg[%d]!\n", node->name, index); + pr_err("%pOFn must have reg[%d]!\n", node, index); return -EINVAL; } @@ -312,7 +320,7 @@ int __init omap2_clk_provider_init(struct device_node *parent, int index, /* get clocks for this parent */ clocks = of_get_child_by_name(parent, "clocks"); if (!clocks) { - pr_err("%s missing 'clocks' child node.\n", parent->name); + pr_err("%pOFn missing 'clocks' child node.\n", parent); return -EINVAL; } @@ -365,7 +373,7 @@ void ti_dt_clk_init_retry_clks(void) while (!list_empty(&retry_list) && retries) { list_for_each_entry_safe(retry, tmp, &retry_list, link) { - pr_debug("retry-init: %s\n", retry->node->name); + pr_debug("retry-init: %pOFn\n", retry->node); retry->func(retry->user, retry->node); list_del(&retry->link); kfree(retry); diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c index 421b05392220..469f560ae1cf 100644 --- a/drivers/clk/ti/clkctrl.c +++ b/drivers/clk/ti/clkctrl.c @@ -259,8 +259,13 @@ _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider, struct omap_clkctrl_clk *clkctrl_clk; int ret = 0; - init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d", node->parent->name, - node->name, offset, bit); + if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) + init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d", + node->parent, node, offset, + bit); + else + init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", node, + offset, bit); clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL); if (!init.name || !clkctrl_clk) { ret = -ENOMEM; @@ -440,6 +445,11 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node) const __be32 *addrp; u32 addr; int ret; + char *c; + + if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) && + !strcmp(node->name, "clk")) + ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT; addrp = of_get_address(node, 0, NULL, NULL); addr = (u32)of_translate_address(node, addrp); @@ -453,18 +463,35 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node) data = omap5_clkctrl_data; #endif #ifdef CONFIG_SOC_DRA7XX - if (of_machine_is_compatible("ti,dra7")) - data = dra7_clkctrl_data; + if (of_machine_is_compatible("ti,dra7")) { + if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) + data = dra7_clkctrl_compat_data; + else + data = dra7_clkctrl_data; + } #endif #ifdef CONFIG_SOC_AM33XX - if (of_machine_is_compatible("ti,am33xx")) - data = am3_clkctrl_data; + if (of_machine_is_compatible("ti,am33xx")) { + if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) + data = am3_clkctrl_compat_data; + else + data = am3_clkctrl_data; + } #endif #ifdef CONFIG_SOC_AM43XX - if (of_machine_is_compatible("ti,am4372")) - data = am4_clkctrl_data; - if (of_machine_is_compatible("ti,am438x")) - data = am438x_clkctrl_data; + if (of_machine_is_compatible("ti,am4372")) { + if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) + data = am4_clkctrl_compat_data; + else + data = am4_clkctrl_data; + } + + if (of_machine_is_compatible("ti,am438x")) { + if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) + data = am438x_clkctrl_compat_data; + else + data = am438x_clkctrl_data; + } #endif #ifdef CONFIG_SOC_TI81XX if (of_machine_is_compatible("ti,dm814")) @@ -492,21 +519,43 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node) provider->base = of_iomap(node, 0); - provider->clkdm_name = kmalloc(strlen(node->parent->name) + 3, - GFP_KERNEL); - if (!provider->clkdm_name) { - kfree(provider); - return; + if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) { + provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent); + if (!provider->clkdm_name) { + kfree(provider); + return; + } + + /* + * Create default clkdm name, replace _cm from end of parent + * node name with _clkdm + */ + provider->clkdm_name[strlen(provider->clkdm_name) - 5] = 0; + } else { + provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node); + if (!provider->clkdm_name) { + kfree(provider); + return; + } + + /* + * Create default clkdm name, replace _clkctrl from end of + * node name with _clkdm + */ + provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0; } - /* - * Create default clkdm name, replace _cm from end of parent node - * name with _clkdm - */ - strcpy(provider->clkdm_name, node->parent->name); - provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0; strcat(provider->clkdm_name, "clkdm"); + /* Replace any dash from the clkdm name with underscore */ + c = provider->clkdm_name; + + while (*c) { + if (*c == '-') + *c = '_'; + c++; + } + INIT_LIST_HEAD(&provider->clocks); /* Generate clocks */ @@ -539,9 +588,13 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node) init.flags = 0; if (reg_data->flags & CLKF_SET_RATE_PARENT) init.flags |= CLK_SET_RATE_PARENT; - init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d", - node->parent->name, node->name, - reg_data->offset, 0); + if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) + init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d", + node->parent, node, + reg_data->offset, 0); + else + init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", + node, reg_data->offset, 0); clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL); if (!init.name || !clkctrl_clk) goto cleanup; diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h index b58278077226..9f312a219510 100644 --- a/drivers/clk/ti/clock.h +++ b/drivers/clk/ti/clock.h @@ -24,6 +24,7 @@ struct clk_omap_divider { u8 flags; s8 latch; const struct clk_div_table *table; + u32 context; }; #define to_clk_omap_divider(_hw) container_of(_hw, struct clk_omap_divider, hw) @@ -36,6 +37,7 @@ struct clk_omap_mux { u8 shift; s8 latch; u8 flags; + u8 saved_parent; }; #define to_clk_omap_mux(_hw) container_of(_hw, struct clk_omap_mux, hw) @@ -184,9 +186,16 @@ struct omap_clkctrl_data { extern const struct omap_clkctrl_data omap4_clkctrl_data[]; extern const struct omap_clkctrl_data omap5_clkctrl_data[]; extern const struct omap_clkctrl_data dra7_clkctrl_data[]; +extern const struct omap_clkctrl_data dra7_clkctrl_compat_data[]; +extern struct ti_dt_clk dra7xx_compat_clks[]; extern const struct omap_clkctrl_data am3_clkctrl_data[]; +extern const struct omap_clkctrl_data am3_clkctrl_compat_data[]; +extern struct ti_dt_clk am33xx_compat_clks[]; extern const struct omap_clkctrl_data am4_clkctrl_data[]; +extern const struct omap_clkctrl_data am4_clkctrl_compat_data[]; +extern struct ti_dt_clk am43xx_compat_clks[]; extern const struct omap_clkctrl_data am438x_clkctrl_data[]; +extern const struct omap_clkctrl_data am438x_clkctrl_compat_data[]; extern const struct omap_clkctrl_data dm814_clkctrl_data[]; extern const struct omap_clkctrl_data dm816_clkctrl_data[]; @@ -233,6 +242,8 @@ extern const struct clk_ops ti_clk_divider_ops; extern const struct clk_ops ti_clk_mux_ops; extern const struct clk_ops omap_gate_clk_ops; +extern struct ti_clk_features ti_clk_features; + void omap2_init_clk_clkdm(struct clk_hw *hw); int omap2_clkops_enable_clkdm(struct clk_hw *hw); void omap2_clkops_disable_clkdm(struct clk_hw *hw); diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c index 030e8b2c1050..6a89936ba03a 100644 --- a/drivers/clk/ti/composite.c +++ b/drivers/clk/ti/composite.c @@ -135,8 +135,8 @@ static void __init _register_composite(void *user, comp = _lookup_component(cclk->comp_nodes[i]); if (!comp) { - pr_debug("component %s not ready for %s, retry\n", - cclk->comp_nodes[i]->name, node->name); + pr_debug("component %s not ready for %pOFn, retry\n", + cclk->comp_nodes[i]->name, node); if (!ti_clk_retry_init(node, hw, _register_composite)) return; @@ -144,8 +144,8 @@ static void __init _register_composite(void *user, goto cleanup; } if (cclk->comp_clks[comp->type] != NULL) { - pr_err("duplicate component types for %s (%s)!\n", - node->name, component_clk_types[comp->type]); + pr_err("duplicate component types for %pOFn (%s)!\n", + node, component_clk_types[comp->type]); goto cleanup; } @@ -168,7 +168,7 @@ static void __init _register_composite(void *user, } if (!num_parents) { - pr_err("%s: no parents found for %s!\n", __func__, node->name); + pr_err("%s: no parents found for %pOFn!\n", __func__, node); goto cleanup; } @@ -212,7 +212,7 @@ static void __init of_ti_composite_clk_setup(struct device_node *node) num_clks = of_clk_get_parent_count(node); if (!num_clks) { - pr_err("composite clk %s must have component(s)\n", node->name); + pr_err("composite clk %pOFn must have component(s)\n", node); return; } @@ -248,7 +248,7 @@ int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw, num_parents = of_clk_get_parent_count(node); if (!num_parents) { - pr_err("component-clock %s must have parent(s)\n", node->name); + pr_err("component-clock %pOFn must have parent(s)\n", node); return -EINVAL; } diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index ccfb4d9a152a..8d77090ad94a 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -268,10 +268,46 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } +/** + * clk_divider_save_context - Save the divider value + * @hw: pointer struct clk_hw + * + * Save the divider value + */ +static int clk_divider_save_context(struct clk_hw *hw) +{ + struct clk_omap_divider *divider = to_clk_omap_divider(hw); + u32 val; + + val = ti_clk_ll_ops->clk_readl(÷r->reg) >> divider->shift; + divider->context = val & div_mask(divider); + + return 0; +} + +/** + * clk_divider_restore_context - restore the saved the divider value + * @hw: pointer struct clk_hw + * + * Restore the saved the divider value + */ +static void clk_divider_restore_context(struct clk_hw *hw) +{ + struct clk_omap_divider *divider = to_clk_omap_divider(hw); + u32 val; + + val = ti_clk_ll_ops->clk_readl(÷r->reg); + val &= ~(div_mask(divider) << divider->shift); + val |= divider->context << divider->shift; + ti_clk_ll_ops->clk_writel(val, ÷r->reg); +} + const struct clk_ops ti_clk_divider_ops = { .recalc_rate = ti_clk_divider_recalc_rate, .round_rate = ti_clk_divider_round_rate, .set_rate = ti_clk_divider_set_rate, + .save_context = clk_divider_save_context, + .restore_context = clk_divider_restore_context, }; static struct clk *_register_divider(struct device *dev, const char *name, @@ -492,7 +528,7 @@ __init ti_clk_get_div_table(struct device_node *node) } if (!valid_div) { - pr_err("no valid dividers for %s table\n", node->name); + pr_err("no valid dividers for %pOFn table\n", node); return ERR_PTR(-EINVAL); } @@ -530,7 +566,7 @@ static int _get_divider_width(struct device_node *node, min_div = 1; if (of_property_read_u32(node, "ti,max-div", &max_div)) { - pr_err("no max-div for %s!\n", node->name); + pr_err("no max-div for %pOFn!\n", node); return -EINVAL; } diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index dc86d07d0921..92e28af7afba 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -39,6 +39,8 @@ static const struct clk_ops dpll_m4xen_ck_ops = { .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, .determine_rate = &omap4_dpll_regm4xen_determine_rate, .get_parent = &omap2_init_dpll_parent, + .save_context = &omap3_core_dpll_save_context, + .restore_context = &omap3_core_dpll_restore_context, }; #else static const struct clk_ops dpll_m4xen_ck_ops = {}; @@ -62,6 +64,8 @@ static const struct clk_ops dpll_ck_ops = { .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, .determine_rate = &omap3_noncore_dpll_determine_rate, .get_parent = &omap2_init_dpll_parent, + .save_context = &omap3_noncore_dpll_save_context, + .restore_context = &omap3_noncore_dpll_restore_context, }; static const struct clk_ops dpll_no_gate_ck_ops = { @@ -72,6 +76,8 @@ static const struct clk_ops dpll_no_gate_ck_ops = { .set_parent = &omap3_noncore_dpll_set_parent, .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, .determine_rate = &omap3_noncore_dpll_determine_rate, + .save_context = &omap3_noncore_dpll_save_context, + .restore_context = &omap3_noncore_dpll_restore_context }; #else static const struct clk_ops dpll_core_ck_ops = {}; @@ -162,8 +168,8 @@ static void __init _register_dpll(void *user, clk = of_clk_get(node, 0); if (IS_ERR(clk)) { - pr_debug("clk-ref missing for %s, retry later\n", - node->name); + pr_debug("clk-ref missing for %pOFn, retry later\n", + node); if (!ti_clk_retry_init(node, hw, _register_dpll)) return; @@ -175,8 +181,8 @@ static void __init _register_dpll(void *user, clk = of_clk_get(node, 1); if (IS_ERR(clk)) { - pr_debug("clk-bypass missing for %s, retry later\n", - node->name); + pr_debug("clk-bypass missing for %pOFn, retry later\n", + node); if (!ti_clk_retry_init(node, hw, _register_dpll)) return; @@ -226,7 +232,7 @@ static void _register_dpll_x2(struct device_node *node, parent_name = of_clk_get_parent_name(node, 0); if (!parent_name) { - pr_err("%s must have parent\n", node->name); + pr_err("%pOFn must have parent\n", node); return; } @@ -305,7 +311,7 @@ static void __init of_ti_dpll_setup(struct device_node *node, init->num_parents = of_clk_get_parent_count(node); if (!init->num_parents) { - pr_err("%s must have parent(s)\n", node->name); + pr_err("%pOFn must have parent(s)\n", node); goto cleanup; } diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c index 4534de2ef455..44b6b6403753 100644 --- a/drivers/clk/ti/dpll3xxx.c +++ b/drivers/clk/ti/dpll3xxx.c @@ -782,6 +782,130 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, return rate; } +/** + * omap3_core_dpll_save_context - Save the m and n values of the divider + * @hw: pointer struct clk_hw + * + * Before the dpll registers are lost save the last rounded rate m and n + * and the enable mask. + */ +int omap3_core_dpll_save_context(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + struct dpll_data *dd; + u32 v; + + dd = clk->dpll_data; + + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); + clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask); + + if (clk->context == DPLL_LOCKED) { + v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); + dd->last_rounded_m = (v & dd->mult_mask) >> + __ffs(dd->mult_mask); + dd->last_rounded_n = ((v & dd->div1_mask) >> + __ffs(dd->div1_mask)) + 1; + } + + return 0; +} + +/** + * omap3_core_dpll_restore_context - restore the m and n values of the divider + * @hw: pointer struct clk_hw + * + * Restore the last rounded rate m and n + * and the enable mask. + */ +void omap3_core_dpll_restore_context(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + const struct dpll_data *dd; + u32 v; + + dd = clk->dpll_data; + + if (clk->context == DPLL_LOCKED) { + _omap3_dpll_write_clken(clk, 0x4); + _omap3_wait_dpll_status(clk, 0); + + v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); + v &= ~(dd->mult_mask | dd->div1_mask); + v |= dd->last_rounded_m << __ffs(dd->mult_mask); + v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask); + ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg); + + _omap3_dpll_write_clken(clk, DPLL_LOCKED); + _omap3_wait_dpll_status(clk, 1); + } else { + _omap3_dpll_write_clken(clk, clk->context); + } +} + +/** + * omap3_non_core_dpll_save_context - Save the m and n values of the divider + * @hw: pointer struct clk_hw + * + * Before the dpll registers are lost save the last rounded rate m and n + * and the enable mask. + */ +int omap3_noncore_dpll_save_context(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + struct dpll_data *dd; + u32 v; + + dd = clk->dpll_data; + + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); + clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask); + + if (clk->context == DPLL_LOCKED) { + v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); + dd->last_rounded_m = (v & dd->mult_mask) >> + __ffs(dd->mult_mask); + dd->last_rounded_n = ((v & dd->div1_mask) >> + __ffs(dd->div1_mask)) + 1; + } + + return 0; +} + +/** + * omap3_core_dpll_restore_context - restore the m and n values of the divider + * @hw: pointer struct clk_hw + * + * Restore the last rounded rate m and n + * and the enable mask. + */ +void omap3_noncore_dpll_restore_context(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + const struct dpll_data *dd; + u32 ctrl, mult_div1; + + dd = clk->dpll_data; + + ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg); + mult_div1 = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); + + if (clk->context == ((ctrl & dd->enable_mask) >> + __ffs(dd->enable_mask)) && + dd->last_rounded_m == ((mult_div1 & dd->mult_mask) >> + __ffs(dd->mult_mask)) && + dd->last_rounded_n == ((mult_div1 & dd->div1_mask) >> + __ffs(dd->div1_mask)) + 1) { + /* nothing to be done */ + return; + } + + if (clk->context == DPLL_LOCKED) + omap3_noncore_dpll_program(clk, 0); + else + _omap3_dpll_write_clken(clk, clk->context); +} + /* OMAP3/4 non-CORE DPLL clkops */ const struct clk_hw_omap_ops clkhwops_omap3_dpll = { .allow_idle = omap3_dpll_allow_idle, diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index 071af44b1ba8..ed24f20f63c7 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c @@ -555,7 +555,7 @@ static void __init ti_fapll_setup(struct device_node *node) init->num_parents = of_clk_get_parent_count(node); if (init->num_parents != 2) { - pr_err("%s must have two parents\n", node->name); + pr_err("%pOFn must have two parents\n", node); goto free; } @@ -564,19 +564,19 @@ static void __init ti_fapll_setup(struct device_node *node) fd->clk_ref = of_clk_get(node, 0); if (IS_ERR(fd->clk_ref)) { - pr_err("%s could not get clk_ref\n", node->name); + pr_err("%pOFn could not get clk_ref\n", node); goto free; } fd->clk_bypass = of_clk_get(node, 1); if (IS_ERR(fd->clk_bypass)) { - pr_err("%s could not get clk_bypass\n", node->name); + pr_err("%pOFn could not get clk_bypass\n", node); goto free; } fd->base = of_iomap(node, 0); if (!fd->base) { - pr_err("%s could not get IO base\n", node->name); + pr_err("%pOFn could not get IO base\n", node); goto free; } diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c index 0174a51a4ba6..7cbe896db071 100644 --- a/drivers/clk/ti/fixed-factor.c +++ b/drivers/clk/ti/fixed-factor.c @@ -42,12 +42,12 @@ static void __init of_ti_fixed_factor_clk_setup(struct device_node *node) u32 flags = 0; if (of_property_read_u32(node, "ti,clock-div", &div)) { - pr_err("%s must have a clock-div property\n", node->name); + pr_err("%pOFn must have a clock-div property\n", node); return; } if (of_property_read_u32(node, "ti,clock-mult", &mult)) { - pr_err("%s must have a clock-mult property\n", node->name); + pr_err("%pOFn must have a clock-mult property\n", node); return; } diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c index 935b2de5fb88..1c78fff5513c 100644 --- a/drivers/clk/ti/gate.c +++ b/drivers/clk/ti/gate.c @@ -33,6 +33,7 @@ static const struct clk_ops omap_gate_clkdm_clk_ops = { .init = &omap2_init_clk_clkdm, .enable = &omap2_clkops_enable_clkdm, .disable = &omap2_clkops_disable_clkdm, + .restore_context = clk_gate_restore_context, }; const struct clk_ops omap_gate_clk_ops = { @@ -40,6 +41,7 @@ const struct clk_ops omap_gate_clk_ops = { .enable = &omap2_dflt_clk_enable, .disable = &omap2_dflt_clk_disable, .is_enabled = &omap2_dflt_clk_is_enabled, + .restore_context = clk_gate_restore_context, }; static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = { @@ -47,6 +49,7 @@ static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = { .enable = &omap36xx_gate_clk_enable_with_hsdiv_restore, .disable = &omap2_dflt_clk_disable, .is_enabled = &omap2_dflt_clk_is_enabled, + .restore_context = clk_gate_restore_context, }; /** @@ -179,7 +182,7 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node, } if (of_clk_get_parent_count(node) != 1) { - pr_err("%s must have 1 parent\n", node->name); + pr_err("%pOFn must have 1 parent\n", node); return; } diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c index 41ae7021670e..87e00c2ee957 100644 --- a/drivers/clk/ti/interface.c +++ b/drivers/clk/ti/interface.c @@ -84,7 +84,7 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node, parent_name = of_clk_get_parent_name(node, 0); if (!parent_name) { - pr_err("%s must have a parent\n", node->name); + pr_err("%pOFn must have a parent\n", node); return; } diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c index 69a4308a5a98..883bdde94d04 100644 --- a/drivers/clk/ti/mux.c +++ b/drivers/clk/ti/mux.c @@ -91,10 +91,39 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index) return 0; } +/** + * clk_mux_save_context - Save the parent selcted in the mux + * @hw: pointer struct clk_hw + * + * Save the parent mux value. + */ +static int clk_mux_save_context(struct clk_hw *hw) +{ + struct clk_omap_mux *mux = to_clk_omap_mux(hw); + + mux->saved_parent = ti_clk_mux_get_parent(hw); + return 0; +} + +/** + * clk_mux_restore_context - Restore the parent in the mux + * @hw: pointer struct clk_hw + * + * Restore the saved parent mux value. + */ +static void clk_mux_restore_context(struct clk_hw *hw) +{ + struct clk_omap_mux *mux = to_clk_omap_mux(hw); + + ti_clk_mux_set_parent(hw, mux->saved_parent); +} + const struct clk_ops ti_clk_mux_ops = { .get_parent = ti_clk_mux_get_parent, .set_parent = ti_clk_mux_set_parent, .determine_rate = __clk_mux_determine_rate, + .save_context = clk_mux_save_context, + .restore_context = clk_mux_restore_context, }; static struct clk *_register_mux(struct device *dev, const char *name, @@ -186,7 +215,7 @@ static void of_mux_clk_setup(struct device_node *node) num_parents = of_clk_get_parent_count(node); if (num_parents < 2) { - pr_err("mux-clock %s must have parents\n", node->name); + pr_err("mux-clock %pOFn must have parents\n", node); return; } parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL); @@ -278,7 +307,7 @@ static void __init of_ti_composite_mux_clk_setup(struct device_node *node) num_parents = of_clk_get_parent_count(node); if (num_parents < 2) { - pr_err("%s must have parents\n", node->name); + pr_err("%pOFn must have parents\n", node); goto cleanup; } diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 88a2cab37f62..d7b53ac8ad11 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c @@ -602,7 +602,7 @@ void __init zynq_clock_init(void) } if (of_address_to_resource(np, 0, &res)) { - pr_err("%s: failed to get resource\n", np->name); + pr_err("%pOFn: failed to get resource\n", np); goto np_err; } @@ -611,7 +611,7 @@ void __init zynq_clock_init(void) if (slcr->data) { zynq_clkc_base = (__force void __iomem *)slcr->data + res.start; } else { - pr_err("%s: Unable to get I/O memory\n", np->name); + pr_err("%pOFn: Unable to get I/O memory\n", np); of_node_put(slcr); goto np_err; } diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index a11f4ba98b05..55c77e44bb2d 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -620,4 +620,22 @@ config RISCV_TIMER is accessed via both the SBI and the rdcycle instruction. This is required for all RISC-V systems. +config CSKY_MP_TIMER + bool "SMP Timer for the C-SKY platform" if COMPILE_TEST + depends on CSKY + select TIMER_OF + help + Say yes here to enable C-SKY SMP timer driver used for C-SKY SMP + system. + csky,mptimer is not only used in SMP system, it also could be used + single core system. It's not a mmio reg and it use mtcr/mfcr instruction. + +config GX6605S_TIMER + bool "Gx6605s SOC system timer driver" if COMPILE_TEST + depends on CSKY + select CLKSRC_MMIO + select TIMER_OF + help + This option enables support for gx6605s SOC's timer. + endmenu diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index e33b21d3f9d8..dd9138104568 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -79,3 +79,5 @@ obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o obj-$(CONFIG_X86_NUMACHIP) += numachip.o obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o obj-$(CONFIG_RISCV_TIMER) += riscv_timer.o +obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o +obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o diff --git a/drivers/clocksource/timer-gx6605s.c b/drivers/clocksource/timer-gx6605s.c new file mode 100644 index 000000000000..80d0939d040b --- /dev/null +++ b/drivers/clocksource/timer-gx6605s.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/sched_clock.h> + +#include "timer-of.h" + +#define CLKSRC_OFFSET 0x40 + +#define TIMER_STATUS 0x00 +#define TIMER_VALUE 0x04 +#define TIMER_CONTRL 0x10 +#define TIMER_CONFIG 0x20 +#define TIMER_DIV 0x24 +#define TIMER_INI 0x28 + +#define GX6605S_STATUS_CLR BIT(0) +#define GX6605S_CONTRL_RST BIT(0) +#define GX6605S_CONTRL_START BIT(1) +#define GX6605S_CONFIG_EN BIT(0) +#define GX6605S_CONFIG_IRQ_EN BIT(1) + +static irqreturn_t gx6605s_timer_interrupt(int irq, void *dev) +{ + struct clock_event_device *ce = dev; + void __iomem *base = timer_of_base(to_timer_of(ce)); + + writel_relaxed(GX6605S_STATUS_CLR, base + TIMER_STATUS); + + ce->event_handler(ce); + + return IRQ_HANDLED; +} + +static int gx6605s_timer_set_oneshot(struct clock_event_device *ce) +{ + void __iomem *base = timer_of_base(to_timer_of(ce)); + + /* reset and stop counter */ + writel_relaxed(GX6605S_CONTRL_RST, base + TIMER_CONTRL); + + /* enable with irq and start */ + writel_relaxed(GX6605S_CONFIG_EN | GX6605S_CONFIG_IRQ_EN, + base + TIMER_CONFIG); + + return 0; +} + +static int gx6605s_timer_set_next_event(unsigned long delta, + struct clock_event_device *ce) +{ + void __iomem *base = timer_of_base(to_timer_of(ce)); + + /* use reset to pause timer */ + writel_relaxed(GX6605S_CONTRL_RST, base + TIMER_CONTRL); + + /* config next timeout value */ + writel_relaxed(ULONG_MAX - delta, base + TIMER_INI); + writel_relaxed(GX6605S_CONTRL_START, base + TIMER_CONTRL); + + return 0; +} + +static int gx6605s_timer_shutdown(struct clock_event_device *ce) +{ + void __iomem *base = timer_of_base(to_timer_of(ce)); + + writel_relaxed(0, base + TIMER_CONTRL); + writel_relaxed(0, base + TIMER_CONFIG); + + return 0; +} + +static struct timer_of to = { + .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK, + .clkevt = { + .rating = 300, + .features = CLOCK_EVT_FEAT_DYNIRQ | + CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = gx6605s_timer_shutdown, + .set_state_oneshot = gx6605s_timer_set_oneshot, + .set_next_event = gx6605s_timer_set_next_event, + .cpumask = cpu_possible_mask, + }, + .of_irq = { + .handler = gx6605s_timer_interrupt, + .flags = IRQF_TIMER | IRQF_IRQPOLL, + }, +}; + +static u64 notrace gx6605s_sched_clock_read(void) +{ + void __iomem *base; + + base = timer_of_base(&to) + CLKSRC_OFFSET; + + return (u64)readl_relaxed(base + TIMER_VALUE); +} + +static void gx6605s_clkevt_init(void __iomem *base) +{ + writel_relaxed(0, base + TIMER_DIV); + writel_relaxed(0, base + TIMER_CONFIG); + + clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), 2, + ULONG_MAX); +} + +static int gx6605s_clksrc_init(void __iomem *base) +{ + writel_relaxed(0, base + TIMER_DIV); + writel_relaxed(0, base + TIMER_INI); + + writel_relaxed(GX6605S_CONTRL_RST, base + TIMER_CONTRL); + + writel_relaxed(GX6605S_CONFIG_EN, base + TIMER_CONFIG); + + writel_relaxed(GX6605S_CONTRL_START, base + TIMER_CONTRL); + + sched_clock_register(gx6605s_sched_clock_read, 32, timer_of_rate(&to)); + + return clocksource_mmio_init(base + TIMER_VALUE, "gx6605s", + timer_of_rate(&to), 200, 32, clocksource_mmio_readl_up); +} + +static int __init gx6605s_timer_init(struct device_node *np) +{ + int ret; + + /* + * The timer driver is for nationalchip gx6605s SOC and there are two + * same timer in gx6605s. We use one for clkevt and another for clksrc. + * + * The timer is mmio map to access, so we need give mmio address in dts. + * + * It provides a 32bit countup timer and interrupt will be caused by + * count-overflow. + * So we need set-next-event by ULONG_MAX - delta in TIMER_INI reg. + * + * The counter at 0x0 offset is clock event. + * The counter at 0x40 offset is clock source. + * They are the same in hardware, just different used by driver. + */ + ret = timer_of_init(np, &to); + if (ret) + return ret; + + gx6605s_clkevt_init(timer_of_base(&to)); + + return gx6605s_clksrc_init(timer_of_base(&to) + CLKSRC_OFFSET); +} +TIMER_OF_DECLARE(csky_gx6605s_timer, "csky,gx6605s-timer", gx6605s_timer_init); diff --git a/drivers/clocksource/timer-mp-csky.c b/drivers/clocksource/timer-mp-csky.c new file mode 100644 index 000000000000..a8acc431a774 --- /dev/null +++ b/drivers/clocksource/timer-mp-csky.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/sched_clock.h> +#include <linux/cpu.h> +#include <linux/of_irq.h> +#include <asm/reg_ops.h> + +#include "timer-of.h" + +#define PTIM_CCVR "cr<3, 14>" +#define PTIM_CTLR "cr<0, 14>" +#define PTIM_LVR "cr<6, 14>" +#define PTIM_TSR "cr<1, 14>" + +static int csky_mptimer_irq; + +static int csky_mptimer_set_next_event(unsigned long delta, + struct clock_event_device *ce) +{ + mtcr(PTIM_LVR, delta); + + return 0; +} + +static int csky_mptimer_shutdown(struct clock_event_device *ce) +{ + mtcr(PTIM_CTLR, 0); + + return 0; +} + +static int csky_mptimer_oneshot(struct clock_event_device *ce) +{ + mtcr(PTIM_CTLR, 1); + + return 0; +} + +static int csky_mptimer_oneshot_stopped(struct clock_event_device *ce) +{ + mtcr(PTIM_CTLR, 0); + + return 0; +} + +static DEFINE_PER_CPU(struct timer_of, csky_to) = { + .flags = TIMER_OF_CLOCK, + .clkevt = { + .rating = 300, + .features = CLOCK_EVT_FEAT_PERCPU | + CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = csky_mptimer_shutdown, + .set_state_oneshot = csky_mptimer_oneshot, + .set_state_oneshot_stopped = csky_mptimer_oneshot_stopped, + .set_next_event = csky_mptimer_set_next_event, + }, +}; + +static irqreturn_t csky_timer_interrupt(int irq, void *dev) +{ + struct timer_of *to = this_cpu_ptr(&csky_to); + + mtcr(PTIM_TSR, 0); + + to->clkevt.event_handler(&to->clkevt); + + return IRQ_HANDLED; +} + +/* + * clock event for percpu + */ +static int csky_mptimer_starting_cpu(unsigned int cpu) +{ + struct timer_of *to = per_cpu_ptr(&csky_to, cpu); + + to->clkevt.cpumask = cpumask_of(cpu); + + clockevents_config_and_register(&to->clkevt, timer_of_rate(to), + 2, ULONG_MAX); + + enable_percpu_irq(csky_mptimer_irq, 0); + + return 0; +} + +static int csky_mptimer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(csky_mptimer_irq); + + return 0; +} + +/* + * clock source + */ +static u64 sched_clock_read(void) +{ + return (u64)mfcr(PTIM_CCVR); +} + +static u64 clksrc_read(struct clocksource *c) +{ + return (u64)mfcr(PTIM_CCVR); +} + +struct clocksource csky_clocksource = { + .name = "csky", + .rating = 400, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .read = clksrc_read, +}; + +static int __init csky_mptimer_init(struct device_node *np) +{ + int ret, cpu, cpu_rollback; + struct timer_of *to = NULL; + + /* + * Csky_mptimer is designed for C-SKY SMP multi-processors and + * every core has it's own private irq and regs for clkevt and + * clksrc. + * + * The regs is accessed by cpu instruction: mfcr/mtcr instead of + * mmio map style. So we needn't mmio-address in dts, but we still + * need to give clk and irq number. + * + * We use private irq for the mptimer and irq number is the same + * for every core. So we use request_percpu_irq() in timer_of_init. + */ + csky_mptimer_irq = irq_of_parse_and_map(np, 0); + if (csky_mptimer_irq <= 0) + return -EINVAL; + + ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt, + "csky_mp_timer", &csky_to); + if (ret) + return -EINVAL; + + for_each_possible_cpu(cpu) { + to = per_cpu_ptr(&csky_to, cpu); + ret = timer_of_init(np, to); + if (ret) + goto rollback; + } + + clocksource_register_hz(&csky_clocksource, timer_of_rate(to)); + sched_clock_register(sched_clock_read, 32, timer_of_rate(to)); + + ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING, + "clockevents/csky/timer:starting", + csky_mptimer_starting_cpu, + csky_mptimer_dying_cpu); + if (ret) + return -EINVAL; + + return 0; + +rollback: + for_each_possible_cpu(cpu_rollback) { + if (cpu_rollback == cpu) + break; + + to = per_cpu_ptr(&csky_to, cpu_rollback); + timer_of_cleanup(to); + } + return -EINVAL; +} +TIMER_OF_DECLARE(csky_mptimer, "csky,mptimer", csky_mptimer_init); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index df9467eef32a..41c9ccdd20d6 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -234,6 +234,7 @@ config EDAC_SKX depends on PCI && X86_64 && X86_MCE_INTEL && PCI_MMCONFIG depends on ACPI_NFIT || !ACPI_NFIT # if ACPI_NFIT=m, EDAC_SKX can't be y select DMI + select ACPI_ADXL if ACPI help Support for error detection and correction the Intel Skylake server Integrated Memory Controllers. If your diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c index dd209e0dd9ab..a99ea61dad32 100644 --- a/drivers/edac/skx_edac.c +++ b/drivers/edac/skx_edac.c @@ -26,6 +26,7 @@ #include <linux/bitmap.h> #include <linux/math64.h> #include <linux/mod_devicetable.h> +#include <linux/adxl.h> #include <acpi/nfit.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> @@ -35,6 +36,7 @@ #include "edac_module.h" #define EDAC_MOD_STR "skx_edac" +#define MSG_SIZE 1024 /* * Debug macros @@ -54,6 +56,29 @@ static LIST_HEAD(skx_edac_list); static u64 skx_tolm, skx_tohm; +static char *skx_msg; +static unsigned int nvdimm_count; + +enum { + INDEX_SOCKET, + INDEX_MEMCTRL, + INDEX_CHANNEL, + INDEX_DIMM, + INDEX_MAX +}; + +static const char * const component_names[] = { + [INDEX_SOCKET] = "ProcessorSocketId", + [INDEX_MEMCTRL] = "MemoryControllerId", + [INDEX_CHANNEL] = "ChannelId", + [INDEX_DIMM] = "DimmSlotId", +}; + +static int component_indices[ARRAY_SIZE(component_names)]; +static int adxl_component_count; +static const char * const *adxl_component_names; +static u64 *adxl_values; +static char *adxl_msg; #define NUM_IMC 2 /* memory controllers per socket */ #define NUM_CHANNELS 3 /* channels per memory controller */ @@ -393,6 +418,8 @@ static int get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, u16 flags; u64 size = 0; + nvdimm_count++; + dev_handle = ACPI_NFIT_BUILD_DEVICE_HANDLE(dimmno, chan, imc->lmc, imc->src_id, 0); @@ -941,12 +968,46 @@ static void teardown_skx_debug(void) } #endif /*CONFIG_EDAC_DEBUG*/ +static bool skx_adxl_decode(struct decoded_addr *res) + +{ + int i, len = 0; + + if (res->addr >= skx_tohm || (res->addr >= skx_tolm && + res->addr < BIT_ULL(32))) { + edac_dbg(0, "Address 0x%llx out of range\n", res->addr); + return false; + } + + if (adxl_decode(res->addr, adxl_values)) { + edac_dbg(0, "Failed to decode 0x%llx\n", res->addr); + return false; + } + + res->socket = (int)adxl_values[component_indices[INDEX_SOCKET]]; + res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]]; + res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]]; + res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]]; + + for (i = 0; i < adxl_component_count; i++) { + if (adxl_values[i] == ~0x0ull) + continue; + + len += snprintf(adxl_msg + len, MSG_SIZE - len, " %s:0x%llx", + adxl_component_names[i], adxl_values[i]); + if (MSG_SIZE - len <= 0) + break; + } + + return true; +} + static void skx_mce_output_error(struct mem_ctl_info *mci, const struct mce *m, struct decoded_addr *res) { enum hw_event_mc_err_type tp_event; - char *type, *optype, msg[256]; + char *type, *optype; bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); bool overflow = GET_BITFIELD(m->status, 62, 62); bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); @@ -1007,22 +1068,47 @@ static void skx_mce_output_error(struct mem_ctl_info *mci, break; } } + if (adxl_component_count) { + snprintf(skx_msg, MSG_SIZE, "%s%s err_code:%04x:%04x %s", + overflow ? " OVERFLOW" : "", + (uncorrected_error && recoverable) ? " recoverable" : "", + mscod, errcode, adxl_msg); + } else { + snprintf(skx_msg, MSG_SIZE, + "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x", + overflow ? " OVERFLOW" : "", + (uncorrected_error && recoverable) ? " recoverable" : "", + mscod, errcode, + res->socket, res->imc, res->rank, + res->bank_group, res->bank_address, res->row, res->column); + } - snprintf(msg, sizeof(msg), - "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x", - overflow ? " OVERFLOW" : "", - (uncorrected_error && recoverable) ? " recoverable" : "", - mscod, errcode, - res->socket, res->imc, res->rank, - res->bank_group, res->bank_address, res->row, res->column); - - edac_dbg(0, "%s\n", msg); + edac_dbg(0, "%s\n", skx_msg); /* Call the helper to output message */ edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, res->channel, res->dimm, -1, - optype, msg); + optype, skx_msg); +} + +static struct mem_ctl_info *get_mci(int src_id, int lmc) +{ + struct skx_dev *d; + + if (lmc > NUM_IMC - 1) { + skx_printk(KERN_ERR, "Bad lmc %d\n", lmc); + return NULL; + } + + list_for_each_entry(d, &skx_edac_list, list) { + if (d->imc[0].src_id == src_id) + return d->imc[lmc].mci; + } + + skx_printk(KERN_ERR, "No mci for src_id %d lmc %d\n", src_id, lmc); + + return NULL; } static int skx_mce_check_error(struct notifier_block *nb, unsigned long val, @@ -1040,10 +1126,23 @@ static int skx_mce_check_error(struct notifier_block *nb, unsigned long val, if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV)) return NOTIFY_DONE; + memset(&res, 0, sizeof(res)); res.addr = mce->addr; - if (!skx_decode(&res)) + + if (adxl_component_count) { + if (!skx_adxl_decode(&res)) + return NOTIFY_DONE; + + mci = get_mci(res.socket, res.imc); + } else { + if (!skx_decode(&res)) + return NOTIFY_DONE; + + mci = res.dev->imc[res.imc].mci; + } + + if (!mci) return NOTIFY_DONE; - mci = res.dev->imc[res.imc].mci; if (mce->mcgstatus & MCG_STATUS_MCIP) type = "Exception"; @@ -1094,6 +1193,62 @@ static void skx_remove(void) } } +static void __init skx_adxl_get(void) +{ + const char * const *names; + int i, j; + + names = adxl_get_component_names(); + if (!names) { + skx_printk(KERN_NOTICE, "No firmware support for address translation."); + skx_printk(KERN_CONT, " Only decoding DDR4 address!\n"); + return; + } + + for (i = 0; i < INDEX_MAX; i++) { + for (j = 0; names[j]; j++) { + if (!strcmp(component_names[i], names[j])) { + component_indices[i] = j; + break; + } + } + + if (!names[j]) + goto err; + } + + adxl_component_names = names; + while (*names++) + adxl_component_count++; + + adxl_values = kcalloc(adxl_component_count, sizeof(*adxl_values), + GFP_KERNEL); + if (!adxl_values) { + adxl_component_count = 0; + return; + } + + adxl_msg = kzalloc(MSG_SIZE, GFP_KERNEL); + if (!adxl_msg) { + adxl_component_count = 0; + kfree(adxl_values); + } + + return; +err: + skx_printk(KERN_ERR, "'%s' is not matched from DSM parameters: ", + component_names[i]); + for (j = 0; names[j]; j++) + skx_printk(KERN_CONT, "%s ", names[j]); + skx_printk(KERN_CONT, "\n"); +} + +static void __exit skx_adxl_put(void) +{ + kfree(adxl_values); + kfree(adxl_msg); +} + /* * skx_init: * make sure we are running on the correct cpu model @@ -1158,6 +1313,15 @@ static int __init skx_init(void) } } + skx_msg = kzalloc(MSG_SIZE, GFP_KERNEL); + if (!skx_msg) { + rc = -ENOMEM; + goto fail; + } + + if (nvdimm_count) + skx_adxl_get(); + /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -1176,6 +1340,9 @@ static void __exit skx_exit(void) edac_dbg(2, "\n"); mce_unregister_decode_chain(&skx_mce_dec); skx_remove(); + if (nvdimm_count) + skx_adxl_put(); + kfree(skx_msg); teardown_skx_debug(); } diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 7670e8dda829..7273e5082b41 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -145,34 +145,6 @@ config EFI_PCDP See DIG64_HCDPv20_042804.pdf available from <http://www.dig64.org/specifications/> -config DELL_RBU - tristate "BIOS update support for DELL systems via sysfs" - depends on X86 - select FW_LOADER - select FW_LOADER_USER_HELPER - help - Say m if you want to have the option of updating the BIOS for your - DELL system. Note you need a Dell OpenManage or Dell Update package (DUP) - supporting application to communicate with the BIOS regarding the new - image for the image update to take effect. - See <file:Documentation/dell_rbu.txt> for more details on the driver. - -config DCDBAS - tristate "Dell Systems Management Base Driver" - depends on X86 - help - The Dell Systems Management Base Driver provides a sysfs interface - for systems management software to perform System Management - Interrupts (SMIs) and Host Control Actions (system power cycle or - power off after OS shutdown) on certain Dell systems. - - See <file:Documentation/dcdbas.txt> for more details on the driver - and the Dell systems on which Dell systems management software makes - use of this driver. - - Say Y or M here to enable the driver for use by Dell systems - management software such as Dell OpenManage. - config DMIID bool "Export DMI identification via sysfs to userspace" depends on DMI diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 13660a951437..3158dffd9914 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -11,8 +11,6 @@ obj-$(CONFIG_DMI) += dmi_scan.o obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o obj-$(CONFIG_EDD) += edd.o obj-$(CONFIG_EFI_PCDP) += pcdp.o -obj-$(CONFIG_DELL_RBU) += dell_rbu.o -obj-$(CONFIG_DCDBAS) += dcdbas.o obj-$(CONFIG_DMIID) += dmi-id.o obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 3e626fd9bd4e..8061667a6765 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -229,14 +229,6 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor, return 0; } -static inline bool is_compat(void) -{ - if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall()) - return true; - - return false; -} - static void copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src) { @@ -263,7 +255,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) u8 *data; int err; - if (is_compat()) { + if (in_compat_syscall()) { struct compat_efi_variable *compat; if (count != sizeof(*compat)) @@ -324,7 +316,7 @@ efivar_show_raw(struct efivar_entry *entry, char *buf) &entry->var.DataSize, entry->var.Data)) return -EIO; - if (is_compat()) { + if (in_compat_syscall()) { compat = (struct compat_efi_variable *)buf; size = sizeof(*compat); @@ -418,7 +410,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, struct compat_efi_variable *compat = (struct compat_efi_variable *)buf; struct efi_variable *new_var = (struct efi_variable *)buf; struct efivar_entry *new_entry; - bool need_compat = is_compat(); + bool need_compat = in_compat_syscall(); efi_char16_t *name; unsigned long size; u32 attributes; @@ -495,7 +487,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (is_compat()) { + if (in_compat_syscall()) { if (count != sizeof(*compat)) return -EINVAL; diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c index ae861342626e..d92f5b87c251 100644 --- a/drivers/fsi/fsi-sbefifo.c +++ b/drivers/fsi/fsi-sbefifo.c @@ -638,7 +638,7 @@ static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo) } ffdc_iov.iov_base = ffdc; ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE; - iov_iter_kvec(&ffdc_iter, WRITE | ITER_KVEC, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE); + iov_iter_kvec(&ffdc_iter, WRITE, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE); cmd[0] = cpu_to_be32(2); cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC); rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter); @@ -735,7 +735,7 @@ int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len, rbytes = (*resp_len) * sizeof(__be32); resp_iov.iov_base = response; resp_iov.iov_len = rbytes; - iov_iter_kvec(&resp_iter, WRITE | ITER_KVEC, &resp_iov, 1, rbytes); + iov_iter_kvec(&resp_iter, WRITE, &resp_iov, 1, rbytes); /* Perform the command */ mutex_lock(&sbefifo->lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 297a5490ad8c..0a4fba196b84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -135,7 +135,8 @@ static int acp_poweroff(struct generic_pm_domain *genpd) * 2. power off the acp tiles * 3. check and enter ulv state */ - if (adev->powerplay.pp_funcs->set_powergating_by_smu) + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); } return 0; @@ -517,7 +518,8 @@ static int acp_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = state == AMD_PG_STATE_GATE ? true : false; - if (adev->powerplay.pp_funcs->set_powergating_by_smu) + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1e4dd09a5072..30bc345d6fdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1493,8 +1493,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) } adev->powerplay.pp_feature = amdgpu_pp_feature_mask; - if (amdgpu_sriov_vf(adev)) - adev->powerplay.pp_feature &= ~PP_GFXOFF_MASK; for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { @@ -1600,7 +1598,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) } } - if (adev->powerplay.pp_funcs->load_firmware) { + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); if (r) { pr_err("firmware loading failed\n"); @@ -3341,7 +3339,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, kthread_park(ring->sched.thread); - if (job && job->base.sched == &ring->sched) + if (job && job->base.sched != &ring->sched) continue; drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 28781414d71c..943dbf3c5da1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -114,8 +114,8 @@ uint amdgpu_pg_mask = 0xffffffff; uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; -/* OverDrive(bit 14) disabled by default*/ -uint amdgpu_pp_feature_mask = 0xffffbfff; +/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/ +uint amdgpu_pp_feature_mask = 0xfffd3fff; int amdgpu_ngg = 0; int amdgpu_prim_buf_per_se = 0; int amdgpu_pos_buf_per_se = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 790fd5408ddf..1a656b8657f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -392,7 +392,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (!(adev->powerplay.pp_feature & PP_GFXOFF_MASK)) return; - if (!adev->powerplay.pp_funcs->set_powergating_by_smu) + if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->set_powergating_by_smu) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 94055a485e01..59cc678de8c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -704,7 +704,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, return ret; if (adev->powerplay.pp_funcs->force_clock_level) - amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); + ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); + + if (ret) + return -EINVAL; return count; } @@ -737,7 +740,10 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, return ret; if (adev->powerplay.pp_funcs->force_clock_level) - amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); + ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); + + if (ret) + return -EINVAL; return count; } @@ -770,7 +776,10 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, return ret; if (adev->powerplay.pp_funcs->force_clock_level) - amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); + ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); + + if (ret) + return -EINVAL; return count; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6904d794d60a..352b30409060 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -542,7 +542,8 @@ static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev, struct amdgpu_vm_pt_cursor *cursor) { amdgpu_vm_pt_next(adev, cursor); - while (amdgpu_vm_pt_descendant(adev, cursor)); + if (cursor->pfn != ~0ll) + while (amdgpu_vm_pt_descendant(adev, cursor)); } /** @@ -3234,8 +3235,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) } rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va.rb_root, rb) { + /* Don't remove the mapping here, we don't want to trigger a + * rebalance and the tree is about to be destroyed anyway. + */ list_del(&mapping->list); - amdgpu_vm_it_remove(mapping, &vm->va); kfree(mapping); } list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 3d0f277a6523..617b0c8908a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4815,8 +4815,10 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) if (r) goto done; - /* Test KCQs */ - for (i = 0; i < adev->gfx.num_compute_rings; i++) { + /* Test KCQs - reversing the order of rings seems to fix ring test failure + * after GPU reset + */ + for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { ring = &adev->gfx.compute_ring[i]; ring->ready = true; r = amdgpu_ring_test_ring(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 14649f8475f3..fd23ba1226a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -280,7 +280,7 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, return; if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { - if (adev->powerplay.pp_funcs->set_powergating_by_smu) + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 04fa3d972636..7a8c9172d30a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1366,7 +1366,8 @@ static int sdma_v4_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu) + if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); sdma_v4_0_init_golden_registers(adev); @@ -1386,7 +1387,8 @@ static int sdma_v4_0_hw_fini(void *handle) sdma_v4_0_ctx_switch_enable(adev, false); sdma_v4_0_enable(adev, false); - if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu) + if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs + && adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true); return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e224f23e2215..b0df6dc9a775 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1524,6 +1524,13 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) { struct amdgpu_display_manager *dm = bl_get_data(bd); + /* + * PWM interperts 0 as 100% rather than 0% because of HW + * limitation for level 0.So limiting minimum brightness level + * to 1. + */ + if (bd->props.brightness < 1) + return 1; if (dc_link_set_backlight_level(dm->backlight_link, bd->props.brightness, 0, 0)) return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 0fab64a2a915..12001a006b2d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -101,7 +101,7 @@ bool dm_pp_apply_display_requirements( adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; } - if (adev->powerplay.pp_funcs->display_configuration_change) + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change) adev->powerplay.pp_funcs->display_configuration_change( adev->powerplay.pp_handle, &adev->pm.pm_display_cfg); @@ -304,7 +304,7 @@ bool dm_pp_get_clock_levels_by_type( struct amd_pp_simple_clock_info validation_clks = { 0 }; uint32_t i; - if (adev->powerplay.pp_funcs->get_clock_by_type) { + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) { if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle, dc_to_pp_clock_type(clk_type), &pp_clks)) { /* Error in pplib. Provide default values. */ @@ -315,7 +315,7 @@ bool dm_pp_get_clock_levels_by_type( pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type); - if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) { + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) { if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks( pp_handle, &validation_clks)) { /* Error in pplib. Provide default values. */ @@ -398,6 +398,9 @@ bool dm_pp_get_clock_levels_by_type_with_voltage( struct pp_clock_levels_with_voltage pp_clk_info = {0}; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage) + return false; + if (pp_funcs->get_clock_by_type_with_voltage(pp_handle, dc_to_pp_clock_type(clk_type), &pp_clk_info)) @@ -438,7 +441,7 @@ bool dm_pp_apply_clock_for_voltage_request( if (!pp_clock_request.clock_type) return false; - if (adev->powerplay.pp_funcs->display_clock_voltage_request) + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request) ret = adev->powerplay.pp_funcs->display_clock_voltage_request( adev->powerplay.pp_handle, &pp_clock_request); @@ -455,7 +458,7 @@ bool dm_pp_get_static_clocks( struct amd_pp_clock_info pp_clk_info = {0}; int ret = 0; - if (adev->powerplay.pp_funcs->get_current_clocks) + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks) ret = adev->powerplay.pp_funcs->get_current_clocks( adev->powerplay.pp_handle, &pp_clk_info); @@ -505,6 +508,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; + if (!pp_funcs || !pp_funcs->set_watermarks_for_clocks_ranges) + return; + for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { if (ranges->reader_wm_sets[i].wm_inst > 3) wm_dce_clocks[i].wm_set_id = WM_SET_A; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index de190935f0a4..e3624ca24574 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -568,7 +568,7 @@ static struct input_pixel_processor *dce110_ipp_create( static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 594000, + .max_hdmi_pixel_clock = 300000, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true }; diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index a407892905af..c0d9f332baed 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -40,8 +40,6 @@ #define LITTLEENDIAN_CPU #endif -#undef READ -#undef WRITE #undef FRAME_SIZE #define dm_output_to_console(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index e8964cae6b93..d6aa1d414320 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -723,11 +723,14 @@ static int pp_dpm_force_clock_level(void *handle, pr_info("%s was not implemented.\n", __func__); return 0; } + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { + pr_info("force clock level is for dpm manual mode only.\n"); + return -EINVAL; + } + mutex_lock(&hwmgr->smu_lock); - if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) - ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); - else - ret = -EINVAL; + ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); mutex_unlock(&hwmgr->smu_lock); return ret; } @@ -963,6 +966,7 @@ static int pp_dpm_switch_power_profile(void *handle, static int pp_set_power_limit(void *handle, uint32_t limit) { struct pp_hwmgr *hwmgr = handle; + uint32_t max_power_limit; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -975,7 +979,13 @@ static int pp_set_power_limit(void *handle, uint32_t limit) if (limit == 0) limit = hwmgr->default_power_limit; - if (limit > hwmgr->default_power_limit) + max_power_limit = hwmgr->default_power_limit; + if (hwmgr->od_enabled) { + max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit); + max_power_limit /= 100; + } + + if (limit > max_power_limit) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -994,8 +1004,13 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit) mutex_lock(&hwmgr->smu_lock); - if (default_limit) + if (default_limit) { *limit = hwmgr->default_power_limit; + if (hwmgr->od_enabled) { + *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit); + *limit /= 100; + } + } else *limit = hwmgr->power_limit; @@ -1303,12 +1318,12 @@ static int pp_enable_mgpu_fan_boost(void *handle) { struct pp_hwmgr *hwmgr = handle; - if (!hwmgr || !hwmgr->pm_en) + if (!hwmgr) return -EINVAL; - if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) { + if (!hwmgr->pm_en || + hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) return 0; - } mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 6c99cbf51c08..ed35ec0341e6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3588,9 +3588,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons break; } - if (i >= sclk_table->count) + if (i >= sclk_table->count) { data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - else { + sclk_table->dpm_levels[i-1].value = sclk; + } else { /* TODO: Check SCLK in DAL's minimum clocks * in case DeepSleep divider update is required. */ @@ -3605,9 +3606,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons break; } - if (i >= mclk_table->count) + if (i >= mclk_table->count) { data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - + mclk_table->dpm_levels[i-1].value = mclk; + } if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index 4714b5b59825..99a33c33a32c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -718,7 +718,7 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table, table->WatermarkRow[1][i].MaxClock = cpu_to_le16((uint16_t) (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) / - 100); + 1000); table->WatermarkRow[1][i].MinUclk = cpu_to_le16((uint16_t) (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) / diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 419a1d77d661..8c4db86bb4b7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -1333,7 +1333,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) hwmgr->platform_descriptor.overdriveLimit.memoryClock = dpm_table->dpm_levels[dpm_table->count-1].value; - vega10_init_dpm_state(&(dpm_table->dpm_state)); data->dpm_table.eclk_table.count = 0; @@ -3249,6 +3248,37 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) { struct vega10_hwmgr *data = hwmgr->backend; + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct vega10_power_state *vega10_ps = + cast_const_phw_vega10_power_state(states->pnew_state); + struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); + uint32_t sclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].gfx_clock; + struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); + uint32_t mclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].mem_clock; + uint32_t i; + + for (i = 0; i < sclk_table->count; i++) { + if (sclk == sclk_table->dpm_levels[i].value) + break; + } + + if (i >= sclk_table->count) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + sclk_table->dpm_levels[i-1].value = sclk; + } + + for (i = 0; i < mclk_table->count; i++) { + if (mclk == mclk_table->dpm_levels[i].value) + break; + } + + if (i >= mclk_table->count) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + mclk_table->dpm_levels[i-1].value = mclk; + } if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; @@ -4529,11 +4559,13 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) if (vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].gfx_clock > - hwmgr->platform_descriptor.overdriveLimit.engineClock) + hwmgr->platform_descriptor.overdriveLimit.engineClock) { vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].gfx_clock = hwmgr->platform_descriptor.overdriveLimit.engineClock; - + pr_warn("max sclk supported by vbios is %d\n", + hwmgr->platform_descriptor.overdriveLimit.engineClock); + } return 0; } @@ -4581,10 +4613,13 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) if (vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].mem_clock > - hwmgr->platform_descriptor.overdriveLimit.memoryClock) + hwmgr->platform_descriptor.overdriveLimit.memoryClock) { vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].mem_clock = hwmgr->platform_descriptor.overdriveLimit.memoryClock; + pr_warn("max mclk supported by vbios is %d\n", + hwmgr->platform_descriptor.overdriveLimit.memoryClock); + } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 9600e2f226e9..74bc37308dc0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -2356,6 +2356,13 @@ static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable) return vega12_disable_gfx_off(hwmgr); } +static int vega12_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level) +{ + return 0; +} + static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .backend_init = vega12_hwmgr_backend_init, .backend_fini = vega12_hwmgr_backend_fini, @@ -2406,6 +2413,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .register_irq_handlers = smu9_register_irq_handlers, .start_thermal_controller = vega12_start_thermal_controller, .powergate_gfx = vega12_gfx_off_control, + .get_performance_level = vega12_get_performance_level, }; int vega12_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index b4dbbb7c334c..57143d51e3ee 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -1875,38 +1875,20 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, return ret; } -static int vega20_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq) +static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr, + PPCLK_e clk_id, uint32_t *clk_freq) { - uint32_t gfx_clk = 0; int ret = 0; - *gfx_freq = 0; + *clk_freq = 0; PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16))) == 0, - "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!", + PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0, + "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!", return ret); - gfx_clk = smum_get_argument(hwmgr); + *clk_freq = smum_get_argument(hwmgr); - *gfx_freq = gfx_clk * 100; - - return 0; -} - -static int vega20_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq) -{ - uint32_t mem_clk = 0; - int ret = 0; - - *mclk_freq = 0; - - PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16))) == 0, - "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!", - return ret); - mem_clk = smum_get_argument(hwmgr); - - *mclk_freq = mem_clk * 100; + *clk_freq = *clk_freq * 100; return 0; } @@ -1937,12 +1919,16 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - ret = vega20_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value); + ret = vega20_get_current_clk_freq(hwmgr, + PPCLK_GFXCLK, + (uint32_t *)value); if (!ret) *size = 4; break; case AMDGPU_PP_SENSOR_GFX_MCLK: - ret = vega20_get_current_mclk_freq(hwmgr, (uint32_t *)value); + ret = vega20_get_current_clk_freq(hwmgr, + PPCLK_UCLK, + (uint32_t *)value); if (!ret) *size = 4; break; @@ -2012,7 +1998,6 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { switch (clk_type) { case amd_pp_dcef_clock: - clk_freq = clock_req->clock_freq_in_khz / 100; clk_select = PPCLK_DCEFCLK; break; case amd_pp_disp_clock: @@ -2041,11 +2026,20 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, return result; } +static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level) +{ + return 0; +} + static int vega20_notify_smc_display_config_after_ps_adjustment( struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table = + &data->dpm_table.mem_table; struct PP_Clocks min_clocks = {0}; struct pp_display_clock_request clock_req; int ret = 0; @@ -2063,7 +2057,7 @@ static int vega20_notify_smc_display_config_after_ps_adjustment( if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { clock_req.clock_type = amd_pp_dcef_clock; - clock_req.clock_freq_in_khz = min_clocks.dcefClock; + clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10; if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) { if (data->smu_features[GNLD_DS_DCEFCLK].supported) PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter( @@ -2076,6 +2070,15 @@ static int vega20_notify_smc_display_config_after_ps_adjustment( } } + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinByFreq, + (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)), + "[SetHardMinFreq] Set hard min uclk failed!", + return ret); + } + return 0; } @@ -2353,7 +2356,7 @@ static int vega20_get_sclks(struct pp_hwmgr *hwmgr, for (i = 0; i < count; i++) { clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = 0; } @@ -2383,7 +2386,7 @@ static int vega20_get_memclocks(struct pp_hwmgr *hwmgr, for (i = 0; i < count; i++) { clocks->data[i].clocks_in_khz = data->mclk_latency_table.entries[i].frequency = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = data->mclk_latency_table.entries[i].latency = vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value); @@ -2408,7 +2411,7 @@ static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr, for (i = 0; i < count; i++) { clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = 0; } @@ -2431,7 +2434,7 @@ static int vega20_get_socclocks(struct pp_hwmgr *hwmgr, for (i = 0; i < count; i++) { clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = 0; } @@ -2582,11 +2585,11 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return -EINVAL; } - if (input_clk < clocks.data[0].clocks_in_khz / 100 || + if (input_clk < clocks.data[0].clocks_in_khz / 1000 || input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) { pr_info("clock freq %d is not within allowed range [%d - %d]\n", input_clk, - clocks.data[0].clocks_in_khz / 100, + clocks.data[0].clocks_in_khz / 1000, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); return -EINVAL; } @@ -2726,7 +2729,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, switch (type) { case PP_SCLK: - ret = vega20_get_current_gfx_clk_freq(hwmgr, &now); + ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now); PP_ASSERT_WITH_CODE(!ret, "Attempt to get current gfx clk Failed!", return ret); @@ -2738,12 +2741,12 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 100, + i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now) ? "*" : ""); break; case PP_MCLK: - ret = vega20_get_current_mclk_freq(hwmgr, &now); + ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now); PP_ASSERT_WITH_CODE(!ret, "Attempt to get current mclk freq Failed!", return ret); @@ -2755,7 +2758,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 100, + i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now) ? "*" : ""); break; @@ -2820,7 +2823,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", - clocks.data[0].clocks_in_khz / 100, + clocks.data[0].clocks_in_khz / 1000, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); } @@ -3476,6 +3479,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { vega20_set_watermarks_for_clocks_ranges, .display_clock_voltage_request = vega20_display_clock_voltage_request, + .get_performance_level = + vega20_get_performance_level, /* UMD pstate, profile related */ .force_dpm_level = vega20_dpm_force_dpm_level, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c index e5f7f8230065..97f8a1a970c3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c @@ -642,8 +642,14 @@ static int check_powerplay_tables( "Unsupported PPTable format!", return -1); PP_ASSERT_WITH_CODE(powerplay_table->sHeader.structuresize > 0, "Invalid PowerPlay Table!", return -1); - PP_ASSERT_WITH_CODE(powerplay_table->smcPPTable.Version == PPTABLE_V20_SMU_VERSION, - "Unmatch PPTable version, vbios update may be needed!", return -1); + + if (powerplay_table->smcPPTable.Version != PPTABLE_V20_SMU_VERSION) { + pr_info("Unmatch PPTable version: " + "pptable from VBIOS is V%d while driver supported is V%d!", + powerplay_table->smcPPTable.Version, + PPTABLE_V20_SMU_VERSION); + return -EINVAL; + } //dump_pptable(&powerplay_table->smcPPTable); @@ -716,10 +722,6 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable "[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!", return -1); - memset(ppsmc_pptable->Padding32, - 0, - sizeof(struct atom_smc_dpm_info_v4_4) - - sizeof(struct atom_common_table_header)); ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx; ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc; @@ -778,22 +780,19 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable ppsmc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent; ppsmc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq; - if ((smc_dpm_table->table_header.format_revision == 4) && - (smc_dpm_table->table_header.content_revision == 4)) { - for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) { - ppsmc_pptable->I2cControllers[i].Enabled = - smc_dpm_table->i2ccontrollers[i].enabled; - ppsmc_pptable->I2cControllers[i].SlaveAddress = - smc_dpm_table->i2ccontrollers[i].slaveaddress; - ppsmc_pptable->I2cControllers[i].ControllerPort = - smc_dpm_table->i2ccontrollers[i].controllerport; - ppsmc_pptable->I2cControllers[i].ThermalThrottler = - smc_dpm_table->i2ccontrollers[i].thermalthrottler; - ppsmc_pptable->I2cControllers[i].I2cProtocol = - smc_dpm_table->i2ccontrollers[i].i2cprotocol; - ppsmc_pptable->I2cControllers[i].I2cSpeed = - smc_dpm_table->i2ccontrollers[i].i2cspeed; - } + for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) { + ppsmc_pptable->I2cControllers[i].Enabled = + smc_dpm_table->i2ccontrollers[i].enabled; + ppsmc_pptable->I2cControllers[i].SlaveAddress = + smc_dpm_table->i2ccontrollers[i].slaveaddress; + ppsmc_pptable->I2cControllers[i].ControllerPort = + smc_dpm_table->i2ccontrollers[i].controllerport; + ppsmc_pptable->I2cControllers[i].ThermalThrottler = + smc_dpm_table->i2ccontrollers[i].thermalthrottler; + ppsmc_pptable->I2cControllers[i].I2cProtocol = + smc_dpm_table->i2ccontrollers[i].i2cprotocol; + ppsmc_pptable->I2cControllers[i].I2cSpeed = + smc_dpm_table->i2ccontrollers[i].i2cspeed; } return 0; @@ -882,15 +881,10 @@ static int init_powerplay_table_information( if (pptable_information->smc_pptable == NULL) return -ENOMEM; - if (powerplay_table->smcPPTable.Version <= 2) - memcpy(pptable_information->smc_pptable, - &(powerplay_table->smcPPTable), - sizeof(PPTable_t) - - sizeof(I2cControllerConfig_t) * I2C_CONTROLLER_NAME_COUNT); - else - memcpy(pptable_information->smc_pptable, - &(powerplay_table->smcPPTable), - sizeof(PPTable_t)); + memcpy(pptable_information->smc_pptable, + &(powerplay_table->smcPPTable), + sizeof(PPTable_t)); + result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable)); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h index 2998a49960ed..63d5cf691549 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h @@ -29,7 +29,7 @@ // any structure is changed in this file #define SMU11_DRIVER_IF_VERSION 0x12 -#define PPTABLE_V20_SMU_VERSION 2 +#define PPTABLE_V20_SMU_VERSION 3 #define NUM_GFXCLK_DPM_LEVELS 16 #define NUM_VCLK_DPM_LEVELS 8 diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c index f836d30fdd44..09b844ec3eab 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c @@ -71,7 +71,11 @@ static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg) result = PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); if (result != 0) { + /* Read the last message to SMU, to report actual cause */ + uint32_t val = cgs_read_register(hwmgr->device, + mmSMU_MP1_SRBM2P_MSG_0); pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg); + pr_err("SMU still servicing msg (0x%04x)\n", val); return result; } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index f8a931cf3665..680566d97adc 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -458,18 +458,6 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge) unsigned int val; int ret; - /* - * FIXME: - * This 70ms was found necessary by experimentation. If it's not - * present, link training fails. It seems like it can go anywhere from - * pre_enable() up to semi-auto link training initiation below. - * - * Neither the datasheet for the bridge nor the panel tested mention a - * delay of this magnitude in the timing requirements. So for now, add - * the mystery delay until someone figures out a better fix. - */ - msleep(70); - /* DSI_A lane config */ val = CHA_DSI_LANES(4 - pdata->dsi->lanes); regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG, @@ -536,7 +524,22 @@ static void ti_sn_bridge_pre_enable(struct drm_bridge *bridge) /* configure bridge ref_clk */ ti_sn_bridge_set_refclk_freq(pdata); - /* in case drm_panel is connected then HPD is not supported */ + /* + * HPD on this bridge chip is a bit useless. This is an eDP bridge + * so the HPD is an internal signal that's only there to signal that + * the panel is done powering up. ...but the bridge chip debounces + * this signal by between 100 ms and 400 ms (depending on process, + * voltage, and temperate--I measured it at about 200 ms). One + * particular panel asserted HPD 84 ms after it was powered on meaning + * that we saw HPD 284 ms after power on. ...but the same panel said + * that instead of looking at HPD you could just hardcode a delay of + * 200 ms. We'll assume that the panel driver will have the hardcoded + * delay in its prepare and always disable HPD. + * + * If HPD somehow makes sense on some future panel we'll have to + * change this to be conditional on someone specifying that HPD should + * be used. + */ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE, HPD_DISABLE); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 701cb334e1ea..d8b526b7932c 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -308,6 +308,26 @@ update_connector_routing(struct drm_atomic_state *state, return 0; } + crtc_state = drm_atomic_get_new_crtc_state(state, + new_connector_state->crtc); + /* + * For compatibility with legacy users, we want to make sure that + * we allow DPMS On->Off modesets on unregistered connectors. Modesets + * which would result in anything else must be considered invalid, to + * avoid turning on new displays on dead connectors. + * + * Since the connector can be unregistered at any point during an + * atomic check or commit, this is racy. But that's OK: all we care + * about is ensuring that userspace can't do anything but shut off the + * display on a connector that was destroyed after its been notified, + * not before. + */ + if (drm_connector_is_unregistered(connector) && crtc_state->active) { + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n", + connector->base.id, connector->name); + return -EINVAL; + } + funcs = connector->helper_private; if (funcs->atomic_best_encoder) @@ -352,7 +372,6 @@ update_connector_routing(struct drm_atomic_state *state, set_best_encoder(state, new_connector_state, new_encoder); - crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc); crtc_state->connectors_changed = true; DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 1e40e5decbe9..4943cef178be 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -379,7 +379,8 @@ void drm_connector_cleanup(struct drm_connector *connector) /* The connector should have been removed from userspace long before * it is finally destroyed. */ - if (WARN_ON(connector->registered)) + if (WARN_ON(connector->registration_state == + DRM_CONNECTOR_REGISTERED)) drm_connector_unregister(connector); if (connector->tile_group) { @@ -436,7 +437,7 @@ int drm_connector_register(struct drm_connector *connector) return 0; mutex_lock(&connector->mutex); - if (connector->registered) + if (connector->registration_state != DRM_CONNECTOR_INITIALIZING) goto unlock; ret = drm_sysfs_connector_add(connector); @@ -456,7 +457,7 @@ int drm_connector_register(struct drm_connector *connector) drm_mode_object_register(connector->dev, &connector->base); - connector->registered = true; + connector->registration_state = DRM_CONNECTOR_REGISTERED; goto unlock; err_debugfs: @@ -478,7 +479,7 @@ EXPORT_SYMBOL(drm_connector_register); void drm_connector_unregister(struct drm_connector *connector) { mutex_lock(&connector->mutex); - if (!connector->registered) { + if (connector->registration_state != DRM_CONNECTOR_REGISTERED) { mutex_unlock(&connector->mutex); return; } @@ -489,7 +490,7 @@ void drm_connector_unregister(struct drm_connector *connector) drm_sysfs_connector_remove(connector); drm_debugfs_connector_remove(connector); - connector->registered = false; + connector->registration_state = DRM_CONNECTOR_UNREGISTERED; mutex_unlock(&connector->mutex); } EXPORT_SYMBOL(drm_connector_unregister); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ff0bfc65a8c1..b506e3622b08 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -122,6 +122,9 @@ static const struct edid_quirk { /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */ { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC }, + /* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */ + { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC }, + /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 3fae4dab295f..13f9b56a9ce7 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -5102,19 +5102,13 @@ intel_dp_long_pulse(struct intel_connector *connector, */ status = connector_status_disconnected; goto out; - } else { - /* - * If display is now connected check links status, - * there has been known issues of link loss triggering - * long pulse. - * - * Some sinks (eg. ASUS PB287Q) seem to perform some - * weird HPD ping pong during modesets. So we can apparently - * end up with HPD going low during a modeset, and then - * going back up soon after. And once that happens we must - * retrain the link to get a picture. That's in case no - * userspace component reacted to intermittent HPD dip. - */ + } + + /* + * Some external monitors do not signal loss of link synchronization + * with an IRQ_HPD, so force a link status check. + */ + if (!intel_dp_is_edp(intel_dp)) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; intel_dp_retrain_link(encoder, ctx); diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 7f155b4f1a7d..1b00f8ea145b 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -77,7 +77,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, pipe_config->pbn = mst_pbn; /* Zombie connectors can't have VCPI slots */ - if (READ_ONCE(connector->registered)) { + if (!drm_connector_is_unregistered(connector)) { slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, port, @@ -313,7 +313,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) struct edid *edid; int ret; - if (!READ_ONCE(connector->registered)) + if (drm_connector_is_unregistered(connector)) return intel_connector_update_modes(connector, NULL); edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port); @@ -329,7 +329,7 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force) struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; - if (!READ_ONCE(connector->registered)) + if (drm_connector_is_unregistered(connector)) return connector_status_disconnected; return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port); @@ -372,7 +372,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, int bpp = 24; /* MST uses fixed bpp */ int max_rate, mode_rate, max_lanes, max_link_clock; - if (!READ_ONCE(connector->registered)) + if (drm_connector_is_unregistered(connector)) return MODE_ERROR; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 6bb78076b5b5..6cbbae3f438b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -881,22 +881,16 @@ nv50_mstc_atomic_best_encoder(struct drm_connector *connector, { struct nv50_head *head = nv50_head(connector_state->crtc); struct nv50_mstc *mstc = nv50_mstc(connector); - if (mstc->port) { - struct nv50_mstm *mstm = mstc->mstm; - return &mstm->msto[head->base.index]->encoder; - } - return NULL; + + return &mstc->mstm->msto[head->base.index]->encoder; } static struct drm_encoder * nv50_mstc_best_encoder(struct drm_connector *connector) { struct nv50_mstc *mstc = nv50_mstc(connector); - if (mstc->port) { - struct nv50_mstm *mstm = mstc->mstm; - return &mstm->msto[0]->encoder; - } - return NULL; + + return &mstc->mstm->msto[0]->encoder; } static enum drm_mode_status diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 97964f7f2ace..a04ffb3b2174 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -56,6 +56,8 @@ struct panel_desc { /** * @prepare: the time (in milliseconds) that it takes for the panel to * become ready and start receiving video data + * @hpd_absent_delay: Add this to the prepare delay if we know Hot + * Plug Detect isn't used. * @enable: the time (in milliseconds) that it takes for the panel to * display the first valid frame after starting to receive * video data @@ -66,6 +68,7 @@ struct panel_desc { */ struct { unsigned int prepare; + unsigned int hpd_absent_delay; unsigned int enable; unsigned int disable; unsigned int unprepare; @@ -79,6 +82,7 @@ struct panel_simple { struct drm_panel base; bool prepared; bool enabled; + bool no_hpd; const struct panel_desc *desc; @@ -202,6 +206,7 @@ static int panel_simple_unprepare(struct drm_panel *panel) static int panel_simple_prepare(struct drm_panel *panel) { struct panel_simple *p = to_panel_simple(panel); + unsigned int delay; int err; if (p->prepared) @@ -215,8 +220,11 @@ static int panel_simple_prepare(struct drm_panel *panel) gpiod_set_value_cansleep(p->enable_gpio, 1); - if (p->desc->delay.prepare) - msleep(p->desc->delay.prepare); + delay = p->desc->delay.prepare; + if (p->no_hpd) + delay += p->desc->delay.hpd_absent_delay; + if (delay) + msleep(delay); p->prepared = true; @@ -305,6 +313,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) panel->prepared = false; panel->desc = desc; + panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd"); + panel->supply = devm_regulator_get(dev, "power"); if (IS_ERR(panel->supply)) return PTR_ERR(panel->supply); @@ -1363,7 +1373,7 @@ static const struct panel_desc innolux_n156bge_l21 = { }, }; -static const struct drm_display_mode innolux_tv123wam_mode = { +static const struct drm_display_mode innolux_p120zdg_bf1_mode = { .clock = 206016, .hdisplay = 2160, .hsync_start = 2160 + 48, @@ -1377,15 +1387,16 @@ static const struct drm_display_mode innolux_tv123wam_mode = { .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; -static const struct panel_desc innolux_tv123wam = { - .modes = &innolux_tv123wam_mode, +static const struct panel_desc innolux_p120zdg_bf1 = { + .modes = &innolux_p120zdg_bf1_mode, .num_modes = 1, .bpc = 8, .size = { - .width = 259, - .height = 173, + .width = 254, + .height = 169, }, .delay = { + .hpd_absent_delay = 200, .unprepare = 500, }, }; @@ -2445,8 +2456,8 @@ static const struct of_device_id platform_of_match[] = { .compatible = "innolux,n156bge-l21", .data = &innolux_n156bge_l21, }, { - .compatible = "innolux,tv123wam", - .data = &innolux_tv123wam, + .compatible = "innolux,p120zdg-bf1", + .data = &innolux_p120zdg_bf1, }, { .compatible = "innolux,zj070na-01p", .data = &innolux_zj070na_01p, diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 5ed319e3b084..41e9935fc584 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -149,6 +149,7 @@ config HID_APPLEIR config HID_ASUS tristate "Asus" depends on LEDS_CLASS + depends on ASUS_WMI || ASUS_WMI=n ---help--- Support for Asus notebook built-in keyboard and touchpad via i2c, and the Asus Republic of Gamers laptop keyboard special keys. diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 88a5672f42cd..dc6d6477e961 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -29,6 +29,7 @@ #include <linux/dmi.h> #include <linux/hid.h> #include <linux/module.h> +#include <linux/platform_data/x86/asus-wmi.h> #include <linux/input/mt.h> #include <linux/usb.h> /* For to_usb_interface for T100 touchpad intf check */ @@ -349,6 +350,24 @@ static void asus_kbd_backlight_work(struct work_struct *work) hid_err(led->hdev, "Asus failed to set keyboard backlight: %d\n", ret); } +/* WMI-based keyboard backlight LED control (via asus-wmi driver) takes + * precedence. We only activate HID-based backlight control when the + * WMI control is not available. + */ +static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev) +{ + u32 value; + int ret; + + ret = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, + ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, &value); + hid_dbg(hdev, "WMI backlight check: rc %d value %x", ret, value); + if (ret) + return false; + + return !!(value & ASUS_WMI_DSTS_PRESENCE_BIT); +} + static int asus_kbd_register_leds(struct hid_device *hdev) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); @@ -436,7 +455,9 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi) drvdata->input = input; - if (drvdata->enable_backlight && asus_kbd_register_leds(hdev)) + if (drvdata->enable_backlight && + !asus_kbd_wmi_led_control_present(hdev) && + asus_kbd_register_leds(hdev)) hid_warn(hdev, "Failed to initialize backlight.\n"); return 0; diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index dc78aa7369de..28460f6a60cc 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -306,10 +306,7 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client) if (client->flags & I2C_CLIENT_TEN) return -EINVAL; - irq = irq_find_mapping(adap->host_notify_domain, client->addr); - if (!irq) - irq = irq_create_mapping(adap->host_notify_domain, - client->addr); + irq = irq_create_mapping(adap->host_notify_domain, client->addr); return irq > 0 ? irq : -ENXIO; } @@ -433,6 +430,8 @@ static int i2c_device_remove(struct device *dev) dev_pm_clear_wake_irq(&client->dev); device_init_wakeup(&client->dev, false); + client->irq = 0; + return status; } diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c index 566d69a2edbc..add4c9c934c8 100644 --- a/drivers/irqchip/irq-mvebu-sei.c +++ b/drivers/irqchip/irq-mvebu-sei.c @@ -384,9 +384,9 @@ static int mvebu_sei_probe(struct platform_device *pdev) sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sei->base = devm_ioremap_resource(sei->dev, sei->res); - if (!sei->base) { + if (IS_ERR(sei->base)) { dev_err(sei->dev, "Failed to remap SEI resource\n"); - return -ENODEV; + return PTR_ERR(sei->base); } /* Retrieve the SEI capabilities with the interrupt ranges */ diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index b05022f94f18..072bb5e36c18 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -718,8 +718,7 @@ l1oip_socket_thread(void *data) printk(KERN_DEBUG "%s: socket created and open\n", __func__); while (!signal_pending(current)) { - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, - recvbuf_size); + iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, recvbuf_size); recvlen = sock_recvmsg(socket, &msg, 0); if (recvlen > 0) { l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index f3fb5bb8c82a..ac1cffd2a09b 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -542,7 +542,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) !discard_bio) continue; bio_chain(discard_bio, bio); - bio_clone_blkg_association(discard_bio, bio); + bio_clone_blkcg_association(discard_bio, bio); if (mddev->gendisk) trace_block_bio_remap(bdev_get_queue(rdev->bdev), discard_bio, disk_devt(mddev->gendisk), diff --git a/drivers/mfd/cros_ec_dev.h b/drivers/mfd/cros_ec_dev.h index 45e9453608c5..978d836a0248 100644 --- a/drivers/mfd/cros_ec_dev.h +++ b/drivers/mfd/cros_ec_dev.h @@ -26,12 +26,13 @@ #define CROS_EC_DEV_VERSION "1.0.0" -/* - * @offset: within EC_LPC_ADDR_MEMMAP region - * @bytes: number of bytes to read. zero means "read a string" (including '\0') - * (at most only EC_MEMMAP_SIZE bytes can be read) - * @buffer: where to store the result - * ioctl returns the number of bytes read, negative on error +/** + * struct cros_ec_readmem - Struct used to read mapped memory. + * @offset: Within EC_LPC_ADDR_MEMMAP region. + * @bytes: Number of bytes to read. Zero means "read a string" (including '\0') + * At most only EC_MEMMAP_SIZE bytes can be read. + * @buffer: Where to store the result. The ioctl returns the number of bytes + * read or negative on error. */ struct cros_ec_readmem { uint32_t offset; diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index 3370a4138e94..951c984de61a 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -8,7 +8,9 @@ lkdtm-$(CONFIG_LKDTM) += perms.o lkdtm-$(CONFIG_LKDTM) += refcount.o lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o lkdtm-$(CONFIG_LKDTM) += usercopy.o +lkdtm-$(CONFIG_LKDTM) += stackleak.o +KASAN_SANITIZE_stackleak.o := n KCOV_INSTRUMENT_rodata.o := n OBJCOPYFLAGS := diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index 5a755590d3dc..2837dc77478e 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -184,6 +184,7 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(USERCOPY_STACK_BEYOND), CRASHTYPE(USERCOPY_KERNEL), CRASHTYPE(USERCOPY_KERNEL_DS), + CRASHTYPE(STACKLEAK_ERASING), }; diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h index 07db641d71d0..3c6fd327e166 100644 --- a/drivers/misc/lkdtm/lkdtm.h +++ b/drivers/misc/lkdtm/lkdtm.h @@ -84,4 +84,7 @@ void lkdtm_USERCOPY_STACK_BEYOND(void); void lkdtm_USERCOPY_KERNEL(void); void lkdtm_USERCOPY_KERNEL_DS(void); +/* lkdtm_stackleak.c */ +void lkdtm_STACKLEAK_ERASING(void); + #endif diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c new file mode 100644 index 000000000000..d5a084475abc --- /dev/null +++ b/drivers/misc/lkdtm/stackleak.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This code tests that the current task stack is properly erased (filled + * with STACKLEAK_POISON). + * + * Authors: + * Alexander Popov <alex.popov@linux.com> + * Tycho Andersen <tycho@tycho.ws> + */ + +#include "lkdtm.h" +#include <linux/stackleak.h> + +void lkdtm_STACKLEAK_ERASING(void) +{ + unsigned long *sp, left, found, i; + const unsigned long check_depth = + STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long); + + /* + * For the details about the alignment of the poison values, see + * the comment in stackleak_track_stack(). + */ + sp = PTR_ALIGN(&i, sizeof(unsigned long)); + + left = ((unsigned long)sp & (THREAD_SIZE - 1)) / sizeof(unsigned long); + sp--; + + /* + * One 'long int' at the bottom of the thread stack is reserved + * and not poisoned. + */ + if (left > 1) { + left--; + } else { + pr_err("FAIL: not enough stack space for the test\n"); + return; + } + + pr_info("checking unused part of the thread stack (%lu bytes)...\n", + left * sizeof(unsigned long)); + + /* + * Search for 'check_depth' poison values in a row (just like + * stackleak_erase() does). + */ + for (i = 0, found = 0; i < left && found <= check_depth; i++) { + if (*(sp - i) == STACKLEAK_POISON) + found++; + else + found = 0; + } + + if (found <= check_depth) { + pr_err("FAIL: thread stack is not erased (checked %lu bytes)\n", + i * sizeof(unsigned long)); + return; + } + + pr_info("first %lu bytes are unpoisoned\n", + (i - found) * sizeof(unsigned long)); + + /* The rest of thread stack should be erased */ + for (; i < left; i++) { + if (*(sp - i) != STACKLEAK_POISON) { + pr_err("FAIL: thread stack is NOT properly erased\n"); + return; + } + } + + pr_info("OK: the rest of the thread stack is properly erased\n"); + return; +} diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index bd52f29b4a4e..264f4ed8eef2 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -3030,7 +3030,7 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, if (!qpair || !buf) return VMCI_ERROR_INVALID_ARGS; - iov_iter_kvec(&from, WRITE | ITER_KVEC, &v, 1, buf_size); + iov_iter_kvec(&from, WRITE, &v, 1, buf_size); qp_lock(qpair); @@ -3074,7 +3074,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, if (!qpair || !buf) return VMCI_ERROR_INVALID_ARGS; - iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size); + iov_iter_kvec(&to, READ, &v, 1, buf_size); qp_lock(qpair); @@ -3119,7 +3119,7 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, if (!qpair || !buf) return VMCI_ERROR_INVALID_ARGS; - iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size); + iov_iter_kvec(&to, READ, &v, 1, buf_size); qp_lock(qpair); diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c index 93ceea4f27d5..e294d3986ba9 100644 --- a/drivers/mtd/ubi/attach.c +++ b/drivers/mtd/ubi/attach.c @@ -1072,6 +1072,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, * be a result of power cut during erasure. */ ai->maybe_bad_peb_count += 1; + /* fall through */ case UBI_IO_BAD_HDR: /* * If we're facing a bad VID header we have to drop *all* diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index d2a726654ff1..a4e3454133a4 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -1334,8 +1334,10 @@ static int bytes_str_to_int(const char *str) switch (*endp) { case 'G': result *= 1024; + /* fall through */ case 'M': result *= 1024; + /* fall through */ case 'K': result *= 1024; if (endp[1] == 'i' && endp[2] == 'B') diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 9697977b80f0..6b9ad8673218 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -638,8 +638,7 @@ static int bond_fill_info(struct sk_buff *skb, goto nla_put_failure; if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM, - sizeof(bond->params.ad_actor_system), - &bond->params.ad_actor_system)) + ETH_ALEN, &bond->params.ad_actor_system)) goto nla_put_failure; } if (!bond_3ad_get_active_agg_info(bond, &info)) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index e82e4ca20620..055b40606dbc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -316,8 +316,8 @@ struct hnae3_ae_ops { int (*set_loopback)(struct hnae3_handle *handle, enum hnae3_loop loop_mode, bool en); - void (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc, - bool en_mc_pmc); + int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc); int (*set_mtu)(struct hnae3_handle *handle, int new_mtu); void (*get_pauseparam)(struct hnae3_handle *handle, @@ -391,7 +391,7 @@ struct hnae3_ae_ops { int vector_num, struct hnae3_ring_chain_node *vr_chain); - void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id); + int (*reset_queue)(struct hnae3_handle *handle, u16 queue_id); u32 (*get_fw_version)(struct hnae3_handle *handle); void (*get_mdix_mode)(struct hnae3_handle *handle, u8 *tp_mdix_ctrl, u8 *tp_mdix); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 32f3aca814e7..3f96aa30068e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -509,16 +509,18 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev) h->netdev_flags = new_flags; } -void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) +int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; if (h->ae_algo->ops->set_promisc_mode) { - h->ae_algo->ops->set_promisc_mode(h, - promisc_flags & HNAE3_UPE, - promisc_flags & HNAE3_MPE); + return h->ae_algo->ops->set_promisc_mode(h, + promisc_flags & HNAE3_UPE, + promisc_flags & HNAE3_MPE); } + + return 0; } void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) @@ -1494,18 +1496,22 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev, return ret; } -static void hns3_restore_vlan(struct net_device *netdev) +static int hns3_restore_vlan(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret = 0; u16 vid; - int ret; for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); - if (ret) - netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n", - vid, ret); + if (ret) { + netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n", + vid, ret); + return ret; + } } + + return ret; } static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, @@ -2727,7 +2733,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); if (!chain) - return -ENOMEM; + goto err_free_chain; cur_chain->next = chain; chain->tqp_index = tx_ring->tqp->tqp_index; @@ -2757,7 +2763,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, while (rx_ring) { chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); if (!chain) - return -ENOMEM; + goto err_free_chain; cur_chain->next = chain; chain->tqp_index = rx_ring->tqp->tqp_index; @@ -2772,6 +2778,16 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, } return 0; + +err_free_chain: + cur_chain = head->next; + while (cur_chain) { + chain = cur_chain->next; + devm_kfree(&pdev->dev, chain); + cur_chain = chain; + } + + return -ENOMEM; } static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, @@ -2821,7 +2837,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; int ret = 0; - u16 i; + int i; hns3_nic_set_cpumask(priv); @@ -2868,13 +2884,19 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); if (ret) - return ret; + goto map_ring_fail; netif_napi_add(priv->netdev, &tqp_vector->napi, hns3_nic_common_poll, NAPI_POLL_WEIGHT); } return 0; + +map_ring_fail: + while (i--) + netif_napi_del(&priv->tqp_vector[i].napi); + + return ret; } static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) @@ -3031,8 +3053,10 @@ static int hns3_queue_to_ring(struct hnae3_queue *tqp, return ret; ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); - if (ret) + if (ret) { + devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring); return ret; + } return 0; } @@ -3059,6 +3083,12 @@ static int hns3_get_ring_config(struct hns3_nic_priv *priv) return 0; err: + while (i--) { + devm_kfree(priv->dev, priv->ring_data[i].ring); + devm_kfree(priv->dev, + priv->ring_data[i + h->kinfo.num_tqps].ring); + } + devm_kfree(&pdev->dev, priv->ring_data); return ret; } @@ -3226,9 +3256,6 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) int i; for (i = 0; i < h->kinfo.num_tqps; i++) { - if (h->ae_algo->ops->reset_queue) - h->ae_algo->ops->reset_queue(h, i); - hns3_fini_ring(priv->ring_data[i].ring); hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); } @@ -3236,11 +3263,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) } /* Set mac addr if it is configured. or leave it to the AE driver */ -static void hns3_init_mac_addr(struct net_device *netdev, bool init) +static int hns3_init_mac_addr(struct net_device *netdev, bool init) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; u8 mac_addr_temp[ETH_ALEN]; + int ret = 0; if (h->ae_algo->ops->get_mac_addr && init) { h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); @@ -3255,8 +3283,9 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init) } if (h->ae_algo->ops->set_mac_addr) - h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); + ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); + return ret; } static int hns3_restore_fd_rules(struct net_device *netdev) @@ -3469,20 +3498,29 @@ err_out: return ret; } -static void hns3_recover_hw_addr(struct net_device *ndev) +static int hns3_recover_hw_addr(struct net_device *ndev) { struct netdev_hw_addr_list *list; struct netdev_hw_addr *ha, *tmp; + int ret = 0; /* go through and sync uc_addr entries to the device */ list = &ndev->uc; - list_for_each_entry_safe(ha, tmp, &list->list, list) - hns3_nic_uc_sync(ndev, ha->addr); + list_for_each_entry_safe(ha, tmp, &list->list, list) { + ret = hns3_nic_uc_sync(ndev, ha->addr); + if (ret) + return ret; + } /* go through and sync mc_addr entries to the device */ list = &ndev->mc; - list_for_each_entry_safe(ha, tmp, &list->list, list) - hns3_nic_mc_sync(ndev, ha->addr); + list_for_each_entry_safe(ha, tmp, &list->list, list) { + ret = hns3_nic_mc_sync(ndev, ha->addr); + if (ret) + return ret; + } + + return ret; } static void hns3_remove_hw_addr(struct net_device *netdev) @@ -3609,7 +3647,10 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) int ret; for (i = 0; i < h->kinfo.num_tqps; i++) { - h->ae_algo->ops->reset_queue(h, i); + ret = h->ae_algo->ops->reset_queue(h, i); + if (ret) + return ret; + hns3_init_ring_hw(priv->ring_data[i].ring); /* We need to clear tx ring here because self test will @@ -3701,18 +3742,30 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) bool vlan_filter_enable; int ret; - hns3_init_mac_addr(netdev, false); - hns3_recover_hw_addr(netdev); - hns3_update_promisc_mode(netdev, handle->netdev_flags); + ret = hns3_init_mac_addr(netdev, false); + if (ret) + return ret; + + ret = hns3_recover_hw_addr(netdev); + if (ret) + return ret; + + ret = hns3_update_promisc_mode(netdev, handle->netdev_flags); + if (ret) + return ret; + vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; hns3_enable_vlan_filter(netdev, vlan_filter_enable); - /* Hardware table is only clear when pf resets */ - if (!(handle->flags & HNAE3_SUPPORT_VF)) - hns3_restore_vlan(netdev); + if (!(handle->flags & HNAE3_SUPPORT_VF)) { + ret = hns3_restore_vlan(netdev); + return ret; + } - hns3_restore_fd_rules(netdev); + ret = hns3_restore_fd_rules(netdev); + if (ret) + return ret; /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 71cfca132d0b..d3636d088aa3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -640,7 +640,7 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, u32 rl_value); void hns3_enable_vlan_filter(struct net_device *netdev, bool enable); -void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags); +int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags); #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index ac13cb2b168e..690f62ed87dc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -24,15 +24,15 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring) return ring->desc_num - used - 1; } -static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h) +static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head) { - int u = ring->next_to_use; - int c = ring->next_to_clean; + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; - if (unlikely(h >= ring->desc_num)) - return 0; + if (ntu > ntc) + return head >= ntc && head <= ntu; - return u > c ? (h > c && h <= u) : (h > c || h <= u); + return head >= ntc || head <= ntu; } static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) @@ -304,6 +304,10 @@ int hclge_cmd_queue_init(struct hclge_dev *hdev) { int ret; + /* Setup the lock for command queue */ + spin_lock_init(&hdev->hw.cmq.csq.lock); + spin_lock_init(&hdev->hw.cmq.crq.lock); + /* Setup the queue entries for use cmd queue */ hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; @@ -337,18 +341,20 @@ int hclge_cmd_init(struct hclge_dev *hdev) u32 version; int ret; + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock_bh(&hdev->hw.cmq.crq.lock); + hdev->hw.cmq.csq.next_to_clean = 0; hdev->hw.cmq.csq.next_to_use = 0; hdev->hw.cmq.crq.next_to_clean = 0; hdev->hw.cmq.crq.next_to_use = 0; - /* Setup the lock for command queue */ - spin_lock_init(&hdev->hw.cmq.csq.lock); - spin_lock_init(&hdev->hw.cmq.crq.lock); - hclge_cmd_init_regs(&hdev->hw); clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index dca6f2326c26..123c37e653f3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -751,7 +751,7 @@ static void hclge_process_ncsi_error(struct hclge_dev *hdev, ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, HCLGE_NCSI_INT_CLR, 0); if (ret) - dev_err(dev, "failed(=%d) to clear NCSI intrerrupt status\n", + dev_err(dev, "failed(=%d) to clear NCSI interrupt status\n", ret); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 5234b5373ed3..ffdd96020860 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2236,7 +2236,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) } /* clear the source of interrupt if it is not cause by reset */ - if (event_cause != HCLGE_VECTOR0_EVENT_RST) { + if (event_cause == HCLGE_VECTOR0_EVENT_MBX) { hclge_clear_event_cause(hdev, event_cause, clearval); hclge_enable_vector(&hdev->misc_vector, true); } @@ -2470,14 +2470,17 @@ static void hclge_reset(struct hclge_dev *hdev) handle = &hdev->vport[0].nic; rtnl_lock(); hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); if (!hclge_reset_wait(hdev)) { + rtnl_lock(); hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); hclge_reset_ae_dev(hdev->ae_dev); hclge_notify_client(hdev, HNAE3_INIT_CLIENT); hclge_clear_reset_cause(hdev); } else { + rtnl_lock(); /* schedule again to check pending resets later */ set_bit(hdev->reset_type, &hdev->reset_pending); hclge_reset_task_schedule(hdev); @@ -3314,8 +3317,8 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, param->vf_id = vport_id; } -static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, - bool en_mc_pmc) +static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -3323,7 +3326,7 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, vport->vport_id); - hclge_cmd_set_promisc_mode(hdev, ¶m); + return hclge_cmd_set_promisc_mode(hdev, ¶m); } static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) @@ -6107,31 +6110,28 @@ static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, return tqp->index; } -void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; int reset_try_times = 0; int reset_status; u16 queue_gid; - int ret; - - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) - return; + int ret = 0; queue_gid = hclge_covert_handle_qid_global(handle, queue_id); ret = hclge_tqp_enable(hdev, queue_id, 0, false); if (ret) { - dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); - return; + dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); + return ret; } ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); if (ret) { - dev_warn(&hdev->pdev->dev, - "Send reset tqp cmd fail, ret = %d\n", ret); - return; + dev_err(&hdev->pdev->dev, + "Send reset tqp cmd fail, ret = %d\n", ret); + return ret; } reset_try_times = 0; @@ -6144,16 +6144,16 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) } if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { - dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); - return; + dev_err(&hdev->pdev->dev, "Reset TQP fail\n"); + return ret; } ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); - if (ret) { - dev_warn(&hdev->pdev->dev, - "Deassert the soft reset fail, ret = %d\n", ret); - return; - } + if (ret) + dev_err(&hdev->pdev->dev, + "Deassert the soft reset fail, ret = %d\n", ret); + + return ret; } void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index e3dfd654eca9..0d9215404269 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -778,7 +778,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev); void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev); -void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); +int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); int hclge_cfg_flowctrl(struct hclge_dev *hdev); int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 04462a347a94..f890022938d9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -400,6 +400,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev) /* handle all the mailbox requests in the queue */ while (!hclge_cmd_crq_empty(&hdev->hw)) { + if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { + dev_warn(&hdev->pdev->dev, + "command queue needs re-initializing\n"); + return; + } + desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 24b1f2a0c32a..03018638f701 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -52,7 +52,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, struct hclge_desc desc; int ret; - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) return 0; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false); @@ -90,7 +90,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) struct hclge_desc desc; int ret; - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) return 0; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index e0a86a58342c..085edb945389 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -925,12 +925,12 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, return status; } -static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, - bool en_uc_pmc, bool en_mc_pmc) +static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, + bool en_uc_pmc, bool en_mc_pmc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); + return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); } static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, @@ -1080,7 +1080,7 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1, false, NULL, 0); } -static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); u8 msg_data[2]; @@ -1091,10 +1091,10 @@ static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) /* disable vf queue before send queue reset msg to PF */ ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); if (ret) - return; + return ret; - hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, - 2, true, NULL, 0); + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, + 2, true, NULL, 0); } static int hclgevf_notify_client(struct hclgevf_dev *hdev, @@ -1170,6 +1170,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) /* bring down the nic to stop any ongoing TX/RX */ hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); + /* check if VF could successfully fetch the hardware reset completion * status from the hardware */ @@ -1181,12 +1183,15 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) ret); dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); + rtnl_lock(); hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); rtnl_unlock(); return ret; } + rtnl_lock(); + /* now, re-initialize the nic client and ae device*/ ret = hclgevf_reset_stack(hdev); if (ret) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c index 967c993d5303..bbf9bdd0ee3e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c @@ -532,7 +532,7 @@ void hinic_task_set_inner_l3(struct hinic_sq_task *task, } void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, - enum hinic_l4_offload_type l4_type, + enum hinic_l4_tunnel_type l4_type, u32 tunnel_len) { task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) | diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h index a0dc63a4bfc7..038522e202b6 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h @@ -160,7 +160,7 @@ void hinic_task_set_inner_l3(struct hinic_sq_task *task, u32 network_len); void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, - enum hinic_l4_offload_type l4_type, + enum hinic_l4_tunnel_type l4_type, u32 tunnel_len); void hinic_set_cs_inner_l4(struct hinic_sq_task *task, diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index fd3373d82a9e..59e1bc0f609e 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -200,6 +200,15 @@ config IXGBE_DCB If unsure, say N. +config IXGBE_IPSEC + bool "IPSec XFRM cryptography-offload acceleration" + depends on IXGBE + depends on XFRM_OFFLOAD + default y + select XFRM_ALGO + ---help--- + Enable support for IPSec offload in ixgbe.ko + config IXGBEVF tristate "Intel(R) 10GbE PCI Express Virtual Function Ethernet support" depends on PCI_MSI @@ -217,6 +226,15 @@ config IXGBEVF will be called ixgbevf. MSI-X interrupt support is required for this driver to work correctly. +config IXGBEVF_IPSEC + bool "IPSec XFRM cryptography-offload acceleration" + depends on IXGBEVF + depends on XFRM_OFFLOAD + default y + select XFRM_ALGO + ---help--- + Enable support for IPSec offload in ixgbevf.ko + config I40E tristate "Intel(R) Ethernet Controller XL710 Family support" imply PTP_1588_CLOCK diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index e707d717012f..5d4f1761dc0c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -244,7 +244,8 @@ process_mbx: } /* guarantee we have free space in the SM mailbox */ - if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) { + if (hw->mbx.state == FM10K_STATE_OPEN && + !hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) { /* keep track of how many times this occurs */ interface->hw_sm_mbx_full++; @@ -302,6 +303,28 @@ void fm10k_iov_suspend(struct pci_dev *pdev) } } +static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev) +{ + u32 err_mask; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return; + + /* Mask the completion abort bit in the ERR_UNCOR_MASK register, + * preventing the device from reporting these errors to the upstream + * PCIe root device. This avoids bringing down platforms which upgrade + * non-fatal completer aborts into machine check exceptions. Completer + * aborts can occur whenever a VF reads a queue it doesn't own. + */ + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask); + err_mask |= PCI_ERR_UNC_COMP_ABORT; + pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask); + + mmiowb(); +} + int fm10k_iov_resume(struct pci_dev *pdev) { struct fm10k_intfc *interface = pci_get_drvdata(pdev); @@ -317,6 +340,12 @@ int fm10k_iov_resume(struct pci_dev *pdev) if (!iov_data) return -ENOMEM; + /* Lower severity of completer abort error reporting as + * the VFs can trigger this any time they read a queue + * that they don't own. + */ + fm10k_mask_aer_comp_abort(pdev); + /* allocate hardware resources for the VFs */ hw->iov.ops.assign_resources(hw, num_vfs, num_vfs); @@ -460,20 +489,6 @@ void fm10k_iov_disable(struct pci_dev *pdev) fm10k_iov_free_data(pdev); } -static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev) -{ - u32 err_sev; - int pos; - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); - if (!pos) - return; - - pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev); - err_sev &= ~PCI_ERR_UNC_COMP_ABORT; - pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev); -} - int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) { int current_vfs = pci_num_vf(pdev); @@ -495,12 +510,6 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) /* allocate VFs if not already allocated */ if (num_vfs && num_vfs != current_vfs) { - /* Disable completer abort error reporting as - * the VFs can trigger this any time they read a queue - * that they don't own. - */ - fm10k_disable_aer_comp_abort(pdev); - err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_err(&pdev->dev, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 503bbc017792..5b2a50e5798f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -11,7 +11,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.23.4-k" +#define DRV_VERSION "0.26.1-k" #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 02345d381303..e49fb51d3613 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -23,6 +23,8 @@ static const struct fm10k_info *fm10k_info_tbl[] = { */ static const struct pci_device_id fm10k_pci_tbl[] = { { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf }, + { PCI_VDEVICE(INTEL, FM10K_DEV_ID_SDI_FM10420_QDA2), fm10k_device_pf }, + { PCI_VDEVICE(INTEL, FM10K_DEV_ID_SDI_FM10420_DA2), fm10k_device_pf }, { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf }, /* required last entry */ { 0, } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 3e608e493f9d..9fb9fca375e3 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -15,6 +15,8 @@ struct fm10k_hw; #define FM10K_DEV_ID_PF 0x15A4 #define FM10K_DEV_ID_VF 0x15A5 +#define FM10K_DEV_ID_SDI_FM10420_QDA2 0x15D0 +#define FM10K_DEV_ID_SDI_FM10420_DA2 0x15D5 #define FM10K_MAX_QUEUES 256 #define FM10K_MAX_QUEUES_PF 128 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 81b0e1f8d14b..ac5698ed0b11 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -3674,7 +3674,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", local_vf_id, v_opcode, msglen); switch (ret) { - case VIRTCHNL_ERR_PARAM: + case VIRTCHNL_STATUS_ERR_PARAM: return -EPERM; default: return -EINVAL; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 9f4d700e09df..29ced6b74d36 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -51,9 +51,15 @@ * * The 40 bit 82580 SYSTIM overflows every * 2^40 * 10^-9 / 60 = 18.3 minutes. + * + * SYSTIM is converted to real time using a timecounter. As + * timecounter_cyc2time() allows old timestamps, the timecounter + * needs to be updated at least once per half of the SYSTIM interval. + * Scheduling of delayed work is not very accurate, so we aim for 8 + * minutes to be sure the actual interval is shorter than 9.16 minutes. */ -#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) +#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8) #define IGB_PTP_TX_TIMEOUT (HZ * 15) #define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) #define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index ca6b0c458e4a..4fb0d9e3f2da 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -17,4 +17,4 @@ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o -ixgbe-$(CONFIG_XFRM_OFFLOAD) += ixgbe_ipsec.o +ixgbe-$(CONFIG_IXGBE_IPSEC) += ixgbe_ipsec.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index ec1b87cc4410..143bdd5ee2a0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -769,9 +769,9 @@ struct ixgbe_adapter { #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ u32 *rss_key; -#ifdef CONFIG_XFRM_OFFLOAD +#ifdef CONFIG_IXGBE_IPSEC struct ixgbe_ipsec *ipsec; -#endif /* CONFIG_XFRM_OFFLOAD */ +#endif /* CONFIG_IXGBE_IPSEC */ /* AF_XDP zero-copy */ struct xdp_umem **xsk_umems; @@ -1008,7 +1008,7 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter); void ixgbe_store_reta(struct ixgbe_adapter *adapter); s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); -#ifdef CONFIG_XFRM_OFFLOAD +#ifdef CONFIG_IXGBE_IPSEC void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter); void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter); void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter); @@ -1036,5 +1036,5 @@ static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf) { return -EACCES; } static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf) { return -EACCES; } -#endif /* CONFIG_XFRM_OFFLOAD */ +#endif /* CONFIG_IXGBE_IPSEC */ #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0049a2becd7e..113b38e0defb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8694,7 +8694,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, #endif /* IXGBE_FCOE */ -#ifdef CONFIG_XFRM_OFFLOAD +#ifdef CONFIG_IXGBE_IPSEC if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif @@ -10190,7 +10190,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, * the TSO, so it's the exception. */ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { -#ifdef CONFIG_XFRM_OFFLOAD +#ifdef CONFIG_IXGBE_IPSEC if (!skb->sp) #endif features &= ~NETIF_F_TSO; @@ -10883,7 +10883,7 @@ skip_sriov: if (hw->mac.type >= ixgbe_mac_82599EB) netdev->features |= NETIF_F_SCTP_CRC; -#ifdef CONFIG_XFRM_OFFLOAD +#ifdef CONFIG_IXGBE_IPSEC #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \ NETIF_F_HW_ESP_TX_CSUM | \ NETIF_F_GSO_ESP) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index af25a8fffeb8..5dacfc870259 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -722,8 +722,10 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, adapter->default_up, vf); - if (vfinfo->spoofchk_enabled) + if (vfinfo->spoofchk_enabled) { hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + hw->mac.ops.set_mac_anti_spoofing(hw, true, vf); + } } /* reset multicast table array for vf */ diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile index 297d0f0858b5..186a4bb24fde 100644 --- a/drivers/net/ethernet/intel/ixgbevf/Makefile +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -10,5 +10,5 @@ ixgbevf-objs := vf.o \ mbx.o \ ethtool.o \ ixgbevf_main.o -ixgbevf-$(CONFIG_XFRM_OFFLOAD) += ipsec.o +ixgbevf-$(CONFIG_IXGBEVF_IPSEC) += ipsec.o diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index e399e1c0c54a..ecab686574b6 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -459,7 +459,7 @@ int ethtool_ioctl(struct ifreq *ifr); extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); -#ifdef CONFIG_XFRM_OFFLOAD +#ifdef CONFIG_IXGBEVF_IPSEC void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter); void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter); void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter); @@ -482,7 +482,7 @@ static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, struct ixgbevf_ipsec_tx_data *itd) { return 0; } -#endif /* CONFIG_XFRM_OFFLOAD */ +#endif /* CONFIG_IXGBEVF_IPSEC */ void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 98707ee11d72..5e47ede7e832 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4150,7 +4150,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, first->tx_flags = tx_flags; first->protocol = vlan_get_protocol(skb); -#ifdef CONFIG_XFRM_OFFLOAD +#ifdef CONFIG_IXGBEVF_IPSEC if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 176c6b56fdcc..398328f10743 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -796,6 +796,7 @@ struct mvpp2_queue_vector { int nrxqs; u32 pending_cause_rx; struct mvpp2_port *port; + struct cpumask *mask; }; struct mvpp2_port { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 14f9679c957c..7a37a37e3fb3 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3298,24 +3298,30 @@ static int mvpp2_irqs_init(struct mvpp2_port *port) for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *qv = port->qvecs + i; - if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { + qv->mask = kzalloc(cpumask_size(), GFP_KERNEL); + if (!qv->mask) { + err = -ENOMEM; + goto err; + } + irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); + } err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); if (err) goto err; if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { - unsigned long mask = 0; unsigned int cpu; for_each_present_cpu(cpu) { if (mvpp2_cpu_to_thread(port->priv, cpu) == qv->sw_thread_id) - mask |= BIT(cpu); + cpumask_set_cpu(cpu, qv->mask); } - irq_set_affinity_hint(qv->irq, to_cpumask(&mask)); + irq_set_affinity_hint(qv->irq, qv->mask); } } @@ -3325,6 +3331,8 @@ err: struct mvpp2_queue_vector *qv = port->qvecs + i; irq_set_affinity_hint(qv->irq, NULL); + kfree(qv->mask); + qv->mask = NULL; free_irq(qv->irq, qv); } @@ -3339,6 +3347,8 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port) struct mvpp2_queue_vector *qv = port->qvecs + i; irq_set_affinity_hint(qv->irq, NULL); + kfree(qv->mask); + qv->mask = NULL; irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); free_irq(qv->irq, qv); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 5a6d0919533d..db00bf1c23f5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -43,6 +43,7 @@ #include <linux/vmalloc.h> #include <linux/irq.h> +#include <net/ip.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ip6_checksum.h> #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 94224c22ecc3..79638dcbae78 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -713,43 +713,15 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) rq->stats->ecn_mark += !!rc; } -static __be32 mlx5e_get_fcs(struct sk_buff *skb) +static u32 mlx5e_get_fcs(const struct sk_buff *skb) { - int last_frag_sz, bytes_in_prev, nr_frags; - u8 *fcs_p1, *fcs_p2; - skb_frag_t *last_frag; - __be32 fcs_bytes; + const void *fcs_bytes; + u32 _fcs_bytes; - if (!skb_is_nonlinear(skb)) - return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN); + fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN, + ETH_FCS_LEN, &_fcs_bytes); - nr_frags = skb_shinfo(skb)->nr_frags; - last_frag = &skb_shinfo(skb)->frags[nr_frags - 1]; - last_frag_sz = skb_frag_size(last_frag); - - /* If all FCS data is in last frag */ - if (last_frag_sz >= ETH_FCS_LEN) - return *(__be32 *)(skb_frag_address(last_frag) + - last_frag_sz - ETH_FCS_LEN); - - fcs_p2 = (u8 *)skb_frag_address(last_frag); - bytes_in_prev = ETH_FCS_LEN - last_frag_sz; - - /* Find where the other part of the FCS is - Linear or another frag */ - if (nr_frags == 1) { - fcs_p1 = skb_tail_pointer(skb); - } else { - skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2]; - - fcs_p1 = skb_frag_address(prev_frag) + - skb_frag_size(prev_frag); - } - fcs_p1 -= bytes_in_prev; - - memcpy(&fcs_bytes, fcs_p1, bytes_in_prev); - memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz); - - return fcs_bytes; + return __get_unaligned_cpu32(fcs_bytes); } static u8 get_ip_proto(struct sk_buff *skb, __be16 proto) @@ -797,8 +769,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, network_depth - ETH_HLEN, skb->csum); if (unlikely(netdev->features & NETIF_F_RXFCS)) - skb->csum = csum_add(skb->csum, - (__force __wsum)mlx5e_get_fcs(skb)); + skb->csum = csum_block_add(skb->csum, + (__force __wsum)mlx5e_get_fcs(skb), + skb->len - ETH_FCS_LEN); stats->csum_complete++; return; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 937d0ace699a..30f751e69698 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -943,8 +943,8 @@ static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink, mlxsw_core->bus, mlxsw_core->bus_priv, true, devlink); - if (err) - mlxsw_core->reload_fail = true; + mlxsw_core->reload_fail = !!err; + return err; } @@ -1083,8 +1083,15 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, { struct devlink *devlink = priv_to_devlink(mlxsw_core); - if (mlxsw_core->reload_fail) - goto reload_fail; + if (mlxsw_core->reload_fail) { + if (!reload) + /* Only the parts that were not de-initialized in the + * failed reload attempt need to be de-initialized. + */ + goto reload_fail_deinit; + else + return; + } if (mlxsw_core->driver->fini) mlxsw_core->driver->fini(mlxsw_core); @@ -1098,9 +1105,12 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, if (!reload) devlink_resources_unregister(devlink, NULL); mlxsw_core->bus->fini(mlxsw_core->bus_priv); - if (reload) - return; -reload_fail: + + return; + +reload_fail_deinit: + devlink_unregister(devlink); + devlink_resources_unregister(devlink, NULL); devlink_free(devlink); } EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 32cb6718bb17..db3d2790aeec 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3284,7 +3284,7 @@ static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port, * Configures the ETS elements. */ #define MLXSW_REG_QEEC_ID 0x400D -#define MLXSW_REG_QEEC_LEN 0x1C +#define MLXSW_REG_QEEC_LEN 0x20 MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN); @@ -3326,6 +3326,15 @@ MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8); */ MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8); +/* reg_qeec_mise + * Min shaper configuration enable. Enables configuration of the min + * shaper on this ETS element + * 0 - Disable + * 1 - Enable + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, mise, 0x0C, 31, 1); + enum { MLXSW_REG_QEEC_BYTES_MODE, MLXSW_REG_QEEC_PACKETS_MODE, @@ -3342,6 +3351,17 @@ enum { */ MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1); +/* The smallest permitted min shaper rate. */ +#define MLXSW_REG_QEEC_MIS_MIN 200000 /* Kbps */ + +/* reg_qeec_min_shaper_rate + * Min shaper information rate. + * For CPU port, can only be configured for port hierarchy. + * When in bytes mode, value is specified in units of 1000bps. + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, min_shaper_rate, 0x0C, 0, 28); + /* reg_qeec_mase * Max shaper configuration enable. Enables configuration of the max * shaper on this ETS element. diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 8a4983adae94..a2df12b79f8e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2740,6 +2740,21 @@ int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); } +static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qeec_hr hr, u8 index, + u8 next_index, u32 minrate) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qeec_pl[MLXSW_REG_QEEC_LEN]; + + mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, + next_index); + mlxsw_reg_qeec_mise_set(qeec_pl, true); + mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); +} + int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 switch_prio, u8 tclass) { @@ -2817,6 +2832,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) return err; } + /* Configure the min shaper for multicast TCs. */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, + i + 8, i, + MLXSW_REG_QEEC_MIS_MIN); + if (err) + return err; + } + /* Map all priorities to traffic class 0. */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index bc60d7a8b49d..739a51f0a366 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -2661,8 +2661,6 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) break; case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; - if (!fdb_info->added_by_user) - break; mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); break; case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index b72ef171477e..bdd351597b55 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -243,7 +243,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, */ int stmmac_mdio_reset(struct mii_bus *bus) { -#if defined(CONFIG_STMMAC_PLATFORM) +#if IS_ENABLED(CONFIG_STMMAC_PLATFORM) struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index b12023bc2cab..a5bab614ff84 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -71,7 +71,6 @@ static unsigned int tx_start = 10; static unsigned int tx_stop = 5; struct ntb_netdev { - struct list_head list; struct pci_dev *pdev; struct net_device *ndev; struct ntb_transport_qp *qp; @@ -81,8 +80,6 @@ struct ntb_netdev { #define NTB_TX_TIMEOUT_MS 1000 #define NTB_RXQ_SIZE 100 -static LIST_HEAD(dev_list); - static void ntb_netdev_event_handler(void *data, int link_is_up) { struct net_device *ndev = data; @@ -236,7 +233,7 @@ static void ntb_netdev_tx_timer(struct timer_list *t) struct net_device *ndev = dev->ndev; if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) { - mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time)); + mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time)); } else { /* Make sure anybody stopping the queue after this sees the new * value of ntb_transport_tx_free_entry() @@ -452,7 +449,7 @@ static int ntb_netdev_probe(struct device *client_dev) if (rc) goto err1; - list_add(&dev->list, &dev_list); + dev_set_drvdata(client_dev, ndev); dev_info(&pdev->dev, "%s created\n", ndev->name); return 0; @@ -465,27 +462,8 @@ err: static void ntb_netdev_remove(struct device *client_dev) { - struct ntb_dev *ntb; - struct net_device *ndev; - struct pci_dev *pdev; - struct ntb_netdev *dev; - bool found = false; - - ntb = dev_ntb(client_dev->parent); - pdev = ntb->pdev; - - list_for_each_entry(dev, &dev_list, list) { - if (dev->pdev == pdev) { - found = true; - break; - } - } - if (!found) - return; - - list_del(&dev->list); - - ndev = dev->ndev; + struct net_device *ndev = dev_get_drvdata(client_dev); + struct ntb_netdev *dev = netdev_priv(ndev); unregister_netdev(ndev); ntb_transport_free_queue(dev->qp); diff --git a/drivers/ntb/hw/idt/Kconfig b/drivers/ntb/hw/idt/Kconfig index b360e5613b9f..f8948cf515ce 100644 --- a/drivers/ntb/hw/idt/Kconfig +++ b/drivers/ntb/hw/idt/Kconfig @@ -1,6 +1,7 @@ config NTB_IDT tristate "IDT PCIe-switch Non-Transparent Bridge support" depends on PCI + select HWMON help This driver supports NTB of cappable IDT PCIe-switches. @@ -23,9 +24,7 @@ config NTB_IDT BAR settings of peer NT-functions, the BAR setups can't be done over kernel PCI fixups. That's why the alternative pre-initialization techniques like BIOS using SMBus interface or EEPROM should be - utilized. Additionally if one needs to have temperature sensor - information printed to system log, the corresponding registers must - be initialized within BIOS/EEPROM as well. + utilized. If unsure, say N. diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c index dbe72f116017..1dede87dd54f 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.c +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -4,7 +4,7 @@ * * GPL LICENSE SUMMARY * - * Copyright (C) 2016 T-Platforms All Rights Reserved. + * Copyright (C) 2016-2018 T-Platforms JSC All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -49,11 +49,14 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/pci.h> #include <linux/aer.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/debugfs.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> #include <linux/ntb.h> #include "ntb_hw_idt.h" @@ -1105,9 +1108,9 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port, } /* Allocate memory for memory window descriptors */ - ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, - sizeof(*ret_mws), GFP_KERNEL); - if (IS_ERR_OR_NULL(ret_mws)) + ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws), + GFP_KERNEL); + if (!ret_mws) return ERR_PTR(-ENOMEM); /* Copy the info of detected memory windows */ @@ -1320,7 +1323,7 @@ static int idt_ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, idt_nt_write(ndev, bar->ltbase, (u32)addr); idt_nt_write(ndev, bar->utbase, (u32)(addr >> 32)); /* Set the custom BAR aperture limit */ - limit = pci_resource_start(ntb->pdev, mw_cfg->bar) + size; + limit = pci_bus_address(ntb->pdev, mw_cfg->bar) + size; idt_nt_write(ndev, bar->limit, (u32)limit); if (IS_FLD_SET(BARSETUP_TYPE, data, 64)) idt_nt_write(ndev, (bar + 1)->limit, (limit >> 32)); @@ -1821,61 +1824,284 @@ static int idt_ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx, * 7. Temperature sensor operations * * IDT PCIe-switch has an embedded temperature sensor, which can be used to - * warn a user-space of possible chip overheating. Since workload temperature - * can be different on different platforms, temperature thresholds as well as - * general sensor settings must be setup in the framework of BIOS/EEPROM - * initializations. It includes the actual sensor enabling as well. + * check current chip core temperature. Since a workload environment can be + * different on different platforms, an offset and ADC/filter settings can be + * specified. Although the offset configuration is only exposed to the sysfs + * hwmon interface at the moment. The rest of the settings can be adjusted + * for instance by the BIOS/EEPROM firmware. *============================================================================= */ /* + * idt_get_deg() - convert millidegree Celsius value to just degree + * @mdegC: IN - millidegree Celsius value + * + * Return: Degree corresponding to the passed millidegree value + */ +static inline s8 idt_get_deg(long mdegC) +{ + return mdegC / 1000; +} + +/* + * idt_get_frac() - retrieve 0/0.5 fraction of the millidegree Celsius value + * @mdegC: IN - millidegree Celsius value + * + * Return: 0/0.5 degree fraction of the passed millidegree value + */ +static inline u8 idt_get_deg_frac(long mdegC) +{ + return (mdegC % 1000) >= 500 ? 5 : 0; +} + +/* + * idt_get_temp_fmt() - convert millidegree Celsius value to 0:7:1 format + * @mdegC: IN - millidegree Celsius value + * + * Return: 0:7:1 format acceptable by the IDT temperature sensor + */ +static inline u8 idt_temp_get_fmt(long mdegC) +{ + return (idt_get_deg(mdegC) << 1) | (idt_get_deg_frac(mdegC) ? 1 : 0); +} + +/* + * idt_get_temp_sval() - convert temp sample to signed millidegree Celsius + * @data: IN - shifted to LSB 8-bits temperature sample + * + * Return: signed millidegree Celsius + */ +static inline long idt_get_temp_sval(u32 data) +{ + return ((s8)data / 2) * 1000 + (data & 0x1 ? 500 : 0); +} + +/* + * idt_get_temp_sval() - convert temp sample to unsigned millidegree Celsius + * @data: IN - shifted to LSB 8-bits temperature sample + * + * Return: unsigned millidegree Celsius + */ +static inline long idt_get_temp_uval(u32 data) +{ + return (data / 2) * 1000 + (data & 0x1 ? 500 : 0); +} + +/* * idt_read_temp() - read temperature from chip sensor * @ntb: NTB device context. - * @val: OUT - integer value of temperature - * @frac: OUT - fraction + * @type: IN - type of the temperature value to read + * @val: OUT - integer value of temperature in millidegree Celsius */ -static void idt_read_temp(struct idt_ntb_dev *ndev, unsigned char *val, - unsigned char *frac) +static void idt_read_temp(struct idt_ntb_dev *ndev, + const enum idt_temp_val type, long *val) { u32 data; - /* Read the data from TEMP field of the TMPSTS register */ - data = idt_sw_read(ndev, IDT_SW_TMPSTS); - data = GET_FIELD(TMPSTS_TEMP, data); - /* TEMP field has one fractional bit and seven integer bits */ - *val = data >> 1; - *frac = ((data & 0x1) ? 5 : 0); + /* Alter the temperature field in accordance with the passed type */ + switch (type) { + case IDT_TEMP_CUR: + data = GET_FIELD(TMPSTS_TEMP, + idt_sw_read(ndev, IDT_SW_TMPSTS)); + break; + case IDT_TEMP_LOW: + data = GET_FIELD(TMPSTS_LTEMP, + idt_sw_read(ndev, IDT_SW_TMPSTS)); + break; + case IDT_TEMP_HIGH: + data = GET_FIELD(TMPSTS_HTEMP, + idt_sw_read(ndev, IDT_SW_TMPSTS)); + break; + case IDT_TEMP_OFFSET: + /* This is the only field with signed 0:7:1 format */ + data = GET_FIELD(TMPADJ_OFFSET, + idt_sw_read(ndev, IDT_SW_TMPADJ)); + *val = idt_get_temp_sval(data); + return; + default: + data = GET_FIELD(TMPSTS_TEMP, + idt_sw_read(ndev, IDT_SW_TMPSTS)); + break; + } + + /* The rest of the fields accept unsigned 0:7:1 format */ + *val = idt_get_temp_uval(data); } /* - * idt_temp_isr() - temperature sensor alarm events ISR - * @ndev: IDT NTB hardware driver descriptor - * @ntint_sts: NT-function interrupt status + * idt_write_temp() - write temperature to the chip sensor register + * @ntb: NTB device context. + * @type: IN - type of the temperature value to change + * @val: IN - integer value of temperature in millidegree Celsius + */ +static void idt_write_temp(struct idt_ntb_dev *ndev, + const enum idt_temp_val type, const long val) +{ + unsigned int reg; + u32 data; + u8 fmt; + + /* Retrieve the properly formatted temperature value */ + fmt = idt_temp_get_fmt(val); + + mutex_lock(&ndev->hwmon_mtx); + switch (type) { + case IDT_TEMP_LOW: + reg = IDT_SW_TMPALARM; + data = SET_FIELD(TMPALARM_LTEMP, idt_sw_read(ndev, reg), fmt) & + ~IDT_TMPALARM_IRQ_MASK; + break; + case IDT_TEMP_HIGH: + reg = IDT_SW_TMPALARM; + data = SET_FIELD(TMPALARM_HTEMP, idt_sw_read(ndev, reg), fmt) & + ~IDT_TMPALARM_IRQ_MASK; + break; + case IDT_TEMP_OFFSET: + reg = IDT_SW_TMPADJ; + data = SET_FIELD(TMPADJ_OFFSET, idt_sw_read(ndev, reg), fmt); + break; + default: + goto inval_spin_unlock; + } + + idt_sw_write(ndev, reg, data); + +inval_spin_unlock: + mutex_unlock(&ndev->hwmon_mtx); +} + +/* + * idt_sysfs_show_temp() - printout corresponding temperature value + * @dev: Pointer to the NTB device structure + * @da: Sensor device attribute structure + * @buf: Buffer to print temperature out * - * It handles events of temperature crossing alarm thresholds. Since reading - * of TMPALARM register clears it up, the function doesn't analyze the - * read value, instead the current temperature value just warningly printed to - * log. - * The method is called from PCIe ISR bottom-half routine. + * Return: Number of written symbols or negative error */ -static void idt_temp_isr(struct idt_ntb_dev *ndev, u32 ntint_sts) +static ssize_t idt_sysfs_show_temp(struct device *dev, + struct device_attribute *da, char *buf) { - unsigned char val, frac; + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct idt_ntb_dev *ndev = dev_get_drvdata(dev); + enum idt_temp_val type = attr->index; + long mdeg; - /* Read the current temperature value */ - idt_read_temp(ndev, &val, &frac); + idt_read_temp(ndev, type, &mdeg); + return sprintf(buf, "%ld\n", mdeg); +} - /* Read the temperature alarm to clean the alarm status out */ - /*(void)idt_sw_read(ndev, IDT_SW_TMPALARM);*/ +/* + * idt_sysfs_set_temp() - set corresponding temperature value + * @dev: Pointer to the NTB device structure + * @da: Sensor device attribute structure + * @buf: Buffer to print temperature out + * @count: Size of the passed buffer + * + * Return: Number of written symbols or negative error + */ +static ssize_t idt_sysfs_set_temp(struct device *dev, + struct device_attribute *da, const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct idt_ntb_dev *ndev = dev_get_drvdata(dev); + enum idt_temp_val type = attr->index; + long mdeg; + int ret; - /* Clean the corresponding interrupt bit */ - idt_nt_write(ndev, IDT_NT_NTINTSTS, IDT_NTINTSTS_TMPSENSOR); + ret = kstrtol(buf, 10, &mdeg); + if (ret) + return ret; + + /* Clamp the passed value in accordance with the type */ + if (type == IDT_TEMP_OFFSET) + mdeg = clamp_val(mdeg, IDT_TEMP_MIN_OFFSET, + IDT_TEMP_MAX_OFFSET); + else + mdeg = clamp_val(mdeg, IDT_TEMP_MIN_MDEG, IDT_TEMP_MAX_MDEG); + + idt_write_temp(ndev, type, mdeg); + + return count; +} + +/* + * idt_sysfs_reset_hist() - reset temperature history + * @dev: Pointer to the NTB device structure + * @da: Sensor device attribute structure + * @buf: Buffer to print temperature out + * @count: Size of the passed buffer + * + * Return: Number of written symbols or negative error + */ +static ssize_t idt_sysfs_reset_hist(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) +{ + struct idt_ntb_dev *ndev = dev_get_drvdata(dev); + + /* Just set the maximal value to the lowest temperature field and + * minimal value to the highest temperature field + */ + idt_write_temp(ndev, IDT_TEMP_LOW, IDT_TEMP_MAX_MDEG); + idt_write_temp(ndev, IDT_TEMP_HIGH, IDT_TEMP_MIN_MDEG); - dev_dbg(&ndev->ntb.pdev->dev, - "Temp sensor IRQ detected %#08x", ntint_sts); + return count; +} + +/* + * Hwmon IDT sysfs attributes + */ +static SENSOR_DEVICE_ATTR(temp1_input, 0444, idt_sysfs_show_temp, NULL, + IDT_TEMP_CUR); +static SENSOR_DEVICE_ATTR(temp1_lowest, 0444, idt_sysfs_show_temp, NULL, + IDT_TEMP_LOW); +static SENSOR_DEVICE_ATTR(temp1_highest, 0444, idt_sysfs_show_temp, NULL, + IDT_TEMP_HIGH); +static SENSOR_DEVICE_ATTR(temp1_offset, 0644, idt_sysfs_show_temp, + idt_sysfs_set_temp, IDT_TEMP_OFFSET); +static DEVICE_ATTR(temp1_reset_history, 0200, NULL, idt_sysfs_reset_hist); - /* Print temperature value to log */ - dev_warn(&ndev->ntb.pdev->dev, "Temperature %hhu.%hhu", val, frac); +/* + * Hwmon IDT sysfs attributes group + */ +static struct attribute *idt_temp_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_lowest.dev_attr.attr, + &sensor_dev_attr_temp1_highest.dev_attr.attr, + &sensor_dev_attr_temp1_offset.dev_attr.attr, + &dev_attr_temp1_reset_history.attr, + NULL +}; +ATTRIBUTE_GROUPS(idt_temp); + +/* + * idt_init_temp() - initialize temperature sensor interface + * @ndev: IDT NTB hardware driver descriptor + * + * Simple sensor initializarion method is responsible for device switching + * on and resource management based hwmon interface registration. Note, that + * since the device is shared we won't disable it on remove, but leave it + * working until the system is powered off. + */ +static void idt_init_temp(struct idt_ntb_dev *ndev) +{ + struct device *hwmon; + + /* Enable sensor if it hasn't been already */ + idt_sw_write(ndev, IDT_SW_TMPCTL, 0x0); + + /* Initialize hwmon interface fields */ + mutex_init(&ndev->hwmon_mtx); + + hwmon = devm_hwmon_device_register_with_groups(&ndev->ntb.pdev->dev, + ndev->swcfg->name, ndev, idt_temp_groups); + if (IS_ERR(hwmon)) { + dev_err(&ndev->ntb.pdev->dev, "Couldn't create hwmon device"); + return; + } + + dev_dbg(&ndev->ntb.pdev->dev, "Temperature HWmon interface registered"); } /*============================================================================= @@ -1931,7 +2157,7 @@ static int idt_init_isr(struct idt_ntb_dev *ndev) goto err_free_vectors; } - /* Unmask Message/Doorbell/SE/Temperature interrupts */ + /* Unmask Message/Doorbell/SE interrupts */ ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) & ~IDT_NTINTMSK_ALL; idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask); @@ -1946,7 +2172,6 @@ err_free_vectors: return ret; } - /* * idt_deinit_ist() - deinitialize PCIe interrupt handler * @ndev: IDT NTB hardware driver descriptor @@ -2007,12 +2232,6 @@ static irqreturn_t idt_thread_isr(int irq, void *devid) handled = true; } - /* Handle temperature sensor interrupt */ - if (ntint_sts & IDT_NTINTSTS_TMPSENSOR) { - idt_temp_isr(ndev, ntint_sts); - handled = true; - } - dev_dbg(&ndev->ntb.pdev->dev, "IDT IRQs 0x%08x handled", ntint_sts); return handled ? IRQ_HANDLED : IRQ_NONE; @@ -2123,9 +2342,9 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp) { struct idt_ntb_dev *ndev = filp->private_data; - unsigned char temp, frac, idx, pidx, cnt; + unsigned char idx, pidx, cnt; + unsigned long irqflags, mdeg; ssize_t ret = 0, off = 0; - unsigned long irqflags; enum ntb_speed speed; enum ntb_width width; char *strbuf; @@ -2274,9 +2493,10 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf, off += scnprintf(strbuf + off, size - off, "\n"); /* Current temperature */ - idt_read_temp(ndev, &temp, &frac); + idt_read_temp(ndev, IDT_TEMP_CUR, &mdeg); off += scnprintf(strbuf + off, size - off, - "Switch temperature\t\t- %hhu.%hhuC\n", temp, frac); + "Switch temperature\t\t- %hhd.%hhuC\n", + idt_get_deg(mdeg), idt_get_deg_frac(mdeg)); /* Copy the buffer to the User Space */ ret = simple_read_from_buffer(ubuf, count, offp, strbuf, off); @@ -2390,7 +2610,7 @@ static struct idt_ntb_dev *idt_create_dev(struct pci_dev *pdev, /* Allocate memory for the IDT PCIe-device descriptor */ ndev = devm_kzalloc(&pdev->dev, sizeof(*ndev), GFP_KERNEL); - if (IS_ERR_OR_NULL(ndev)) { + if (!ndev) { dev_err(&pdev->dev, "Memory allocation failed for descriptor"); return ERR_PTR(-ENOMEM); } @@ -2571,6 +2791,9 @@ static int idt_pci_probe(struct pci_dev *pdev, /* Initialize Messaging subsystem */ idt_init_msg(ndev); + /* Initialize hwmon interface */ + idt_init_temp(ndev); + /* Initialize IDT interrupts handler */ ret = idt_init_isr(ndev); if (ret != 0) diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.h b/drivers/ntb/hw/idt/ntb_hw_idt.h index 856fd182f6f4..2f1aa121b0cf 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.h +++ b/drivers/ntb/hw/idt/ntb_hw_idt.h @@ -4,7 +4,7 @@ * * GPL LICENSE SUMMARY * - * Copyright (C) 2016 T-Platforms All Rights Reserved. + * Copyright (C) 2016-2018 T-Platforms JSC All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -47,9 +47,9 @@ #include <linux/pci_ids.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/ntb.h> - /* * Macro is used to create the struct pci_device_id that matches * the supported IDT PCIe-switches @@ -688,15 +688,14 @@ * @IDT_NTINTMSK_DBELL: Doorbell interrupt mask bit * @IDT_NTINTMSK_SEVENT: Switch Event interrupt mask bit * @IDT_NTINTMSK_TMPSENSOR: Temperature sensor interrupt mask bit - * @IDT_NTINTMSK_ALL: All the useful interrupts mask + * @IDT_NTINTMSK_ALL: NTB-related interrupts mask */ #define IDT_NTINTMSK_MSG 0x00000001U #define IDT_NTINTMSK_DBELL 0x00000002U #define IDT_NTINTMSK_SEVENT 0x00000008U #define IDT_NTINTMSK_TMPSENSOR 0x00000080U #define IDT_NTINTMSK_ALL \ - (IDT_NTINTMSK_MSG | IDT_NTINTMSK_DBELL | \ - IDT_NTINTMSK_SEVENT | IDT_NTINTMSK_TMPSENSOR) + (IDT_NTINTMSK_MSG | IDT_NTINTMSK_DBELL | IDT_NTINTMSK_SEVENT) /* * NTGSIGNAL register fields related constants @@ -886,12 +885,60 @@ #define IDT_SWPxMSGCTL_PART_FLD 4 /* + * TMPCTL register fields related constants + * @IDT_TMPCTL_LTH_MASK: Low temperature threshold field mask + * @IDT_TMPCTL_LTH_FLD: Low temperature threshold field offset + * @IDT_TMPCTL_MTH_MASK: Middle temperature threshold field mask + * @IDT_TMPCTL_MTH_FLD: Middle temperature threshold field offset + * @IDT_TMPCTL_HTH_MASK: High temperature threshold field mask + * @IDT_TMPCTL_HTH_FLD: High temperature threshold field offset + * @IDT_TMPCTL_PDOWN: Temperature sensor power down + */ +#define IDT_TMPCTL_LTH_MASK 0x000000FFU +#define IDT_TMPCTL_LTH_FLD 0 +#define IDT_TMPCTL_MTH_MASK 0x0000FF00U +#define IDT_TMPCTL_MTH_FLD 8 +#define IDT_TMPCTL_HTH_MASK 0x00FF0000U +#define IDT_TMPCTL_HTH_FLD 16 +#define IDT_TMPCTL_PDOWN 0x80000000U + +/* * TMPSTS register fields related constants * @IDT_TMPSTS_TEMP_MASK: Current temperature field mask * @IDT_TMPSTS_TEMP_FLD: Current temperature field offset + * @IDT_TMPSTS_LTEMP_MASK: Lowest temperature field mask + * @IDT_TMPSTS_LTEMP_FLD: Lowest temperature field offset + * @IDT_TMPSTS_HTEMP_MASK: Highest temperature field mask + * @IDT_TMPSTS_HTEMP_FLD: Highest temperature field offset */ #define IDT_TMPSTS_TEMP_MASK 0x000000FFU #define IDT_TMPSTS_TEMP_FLD 0 +#define IDT_TMPSTS_LTEMP_MASK 0x0000FF00U +#define IDT_TMPSTS_LTEMP_FLD 8 +#define IDT_TMPSTS_HTEMP_MASK 0x00FF0000U +#define IDT_TMPSTS_HTEMP_FLD 16 + +/* + * TMPALARM register fields related constants + * @IDT_TMPALARM_LTEMP_MASK: Lowest temperature field mask + * @IDT_TMPALARM_LTEMP_FLD: Lowest temperature field offset + * @IDT_TMPALARM_HTEMP_MASK: Highest temperature field mask + * @IDT_TMPALARM_HTEMP_FLD: Highest temperature field offset + * @IDT_TMPALARM_IRQ_MASK: Alarm IRQ status mask + */ +#define IDT_TMPALARM_LTEMP_MASK 0x0000FF00U +#define IDT_TMPALARM_LTEMP_FLD 8 +#define IDT_TMPALARM_HTEMP_MASK 0x00FF0000U +#define IDT_TMPALARM_HTEMP_FLD 16 +#define IDT_TMPALARM_IRQ_MASK 0x3F000000U + +/* + * TMPADJ register fields related constants + * @IDT_TMPADJ_OFFSET_MASK: Temperature value offset field mask + * @IDT_TMPADJ_OFFSET_FLD: Temperature value offset field offset + */ +#define IDT_TMPADJ_OFFSET_MASK 0x000000FFU +#define IDT_TMPADJ_OFFSET_FLD 0 /* * Helper macro to get/set the corresponding field value @@ -951,6 +998,32 @@ #define IDT_DIR_SIZE_ALIGN 1 /* + * IDT PCIe-switch temperature sensor value limits + * @IDT_TEMP_MIN_MDEG: Minimal integer value of temperature + * @IDT_TEMP_MAX_MDEG: Maximal integer value of temperature + * @IDT_TEMP_MIN_OFFSET:Minimal integer value of temperature offset + * @IDT_TEMP_MAX_OFFSET:Maximal integer value of temperature offset + */ +#define IDT_TEMP_MIN_MDEG 0 +#define IDT_TEMP_MAX_MDEG 127500 +#define IDT_TEMP_MIN_OFFSET -64000 +#define IDT_TEMP_MAX_OFFSET 63500 + +/* + * Temperature sensor values enumeration + * @IDT_TEMP_CUR: Current temperature + * @IDT_TEMP_LOW: Lowest historical temperature + * @IDT_TEMP_HIGH: Highest historical temperature + * @IDT_TEMP_OFFSET: Current temperature offset + */ +enum idt_temp_val { + IDT_TEMP_CUR, + IDT_TEMP_LOW, + IDT_TEMP_HIGH, + IDT_TEMP_OFFSET +}; + +/* * IDT Memory Windows type. Depending on the device settings, IDT supports * Direct Address Translation MW registers and Lookup Table registers * @IDT_MW_DIR: Direct address translation @@ -1044,6 +1117,8 @@ struct idt_ntb_peer { * @msg_mask_lock: Message mask register lock * @gasa_lock: GASA registers access lock * + * @hwmon_mtx: Temperature sensor interface update mutex + * * @dbgfs_info: DebugFS info node */ struct idt_ntb_dev { @@ -1071,6 +1146,8 @@ struct idt_ntb_dev { spinlock_t msg_mask_lock; spinlock_t gasa_lock; + struct mutex hwmon_mtx; + struct dentry *dbgfs_info; }; #define to_ndev_ntb(__ntb) container_of(__ntb, struct idt_ntb_dev, ntb) diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c index 6aa573227279..2ad263f708da 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen1.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c @@ -265,7 +265,7 @@ static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits, return 0; } -static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) +static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) { u64 shift, mask; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 939895966476..3bfdb4562408 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -194,6 +194,8 @@ struct ntb_transport_mw { void __iomem *vbase; size_t xlat_size; size_t buff_size; + size_t alloc_size; + void *alloc_addr; void *virt_addr; dma_addr_t dma_addr; }; @@ -672,13 +674,59 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) return; ntb_mw_clear_trans(nt->ndev, PIDX, num_mw); - dma_free_coherent(&pdev->dev, mw->buff_size, - mw->virt_addr, mw->dma_addr); + dma_free_coherent(&pdev->dev, mw->alloc_size, + mw->alloc_addr, mw->dma_addr); mw->xlat_size = 0; mw->buff_size = 0; + mw->alloc_size = 0; + mw->alloc_addr = NULL; mw->virt_addr = NULL; } +static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, + struct device *dma_dev, size_t align) +{ + dma_addr_t dma_addr; + void *alloc_addr, *virt_addr; + int rc; + + alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size, + &dma_addr, GFP_KERNEL); + if (!alloc_addr) { + dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n", + mw->alloc_size); + return -ENOMEM; + } + virt_addr = alloc_addr; + + /* + * we must ensure that the memory address allocated is BAR size + * aligned in order for the XLAT register to take the value. This + * is a requirement of the hardware. It is recommended to setup CMA + * for BAR sizes equal or greater than 4MB. + */ + if (!IS_ALIGNED(dma_addr, align)) { + if (mw->alloc_size > mw->buff_size) { + virt_addr = PTR_ALIGN(alloc_addr, align); + dma_addr = ALIGN(dma_addr, align); + } else { + rc = -ENOMEM; + goto err; + } + } + + mw->alloc_addr = alloc_addr; + mw->virt_addr = virt_addr; + mw->dma_addr = dma_addr; + + return 0; + +err: + dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr); + + return rc; +} + static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, resource_size_t size) { @@ -710,28 +758,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, /* Alloc memory for receiving data. Must be aligned */ mw->xlat_size = xlat_size; mw->buff_size = buff_size; + mw->alloc_size = buff_size; - mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size, - &mw->dma_addr, GFP_KERNEL); - if (!mw->virt_addr) { - mw->xlat_size = 0; - mw->buff_size = 0; - dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n", - buff_size); - return -ENOMEM; - } - - /* - * we must ensure that the memory address allocated is BAR size - * aligned in order for the XLAT register to take the value. This - * is a requirement of the hardware. It is recommended to setup CMA - * for BAR sizes equal or greater than 4MB. - */ - if (!IS_ALIGNED(mw->dma_addr, xlat_align)) { - dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", - &mw->dma_addr); - ntb_free_mw(nt, num_mw); - return -ENOMEM; + rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align); + if (rc) { + mw->alloc_size *= 2; + rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align); + if (rc) { + dev_err(&pdev->dev, + "Unable to alloc aligned MW buff\n"); + mw->xlat_size = 0; + mw->buff_size = 0; + mw->alloc_size = 0; + return rc; + } } /* Notify HW the memory location of the receive buffer */ @@ -1278,6 +1318,7 @@ static void ntb_rx_copy_callback(void *data, case DMA_TRANS_READ_FAILED: case DMA_TRANS_WRITE_FAILED: entry->errors++; + /* fall through */ case DMA_TRANS_ABORTED: { struct ntb_transport_qp *qp = entry->qp; @@ -1533,6 +1574,7 @@ static void ntb_tx_copy_callback(void *data, case DMA_TRANS_READ_FAILED: case DMA_TRANS_WRITE_FAILED: entry->errors++; + /* fall through */ case DMA_TRANS_ABORTED: { void __iomem *offset = diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index e52b9d3c0bd6..0b70c8bab045 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1704,7 +1704,6 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, op->fcp_req.rspaddr = &op->rsp_iu; op->fcp_req.rsplen = sizeof(op->rsp_iu); op->fcp_req.done = nvme_fc_fcpio_done; - op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE]; op->ctrl = ctrl; op->queue = queue; op->rq = rq; @@ -1752,6 +1751,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, if (res) return res; op->op.fcp_req.first_sgl = &op->sgl[0]; + op->op.fcp_req.private = &op->priv[0]; return res; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index f30031945ee4..c33bb201b884 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1663,6 +1663,9 @@ static void nvme_map_cmb(struct nvme_dev *dev) struct pci_dev *pdev = to_pci_dev(dev->dev); int bar; + if (dev->cmb_size) + return; + dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); if (!dev->cmbsz) return; @@ -2147,7 +2150,6 @@ static void nvme_pci_disable(struct nvme_dev *dev) { struct pci_dev *pdev = to_pci_dev(dev->dev); - nvme_release_cmb(dev); pci_free_irq_vectors(pdev); if (pci_is_enabled(pdev)) { @@ -2595,6 +2597,7 @@ static void nvme_remove(struct pci_dev *pdev) nvme_stop_ctrl(&dev->ctrl); nvme_remove_namespaces(&dev->ctrl); nvme_dev_disable(dev, true); + nvme_release_cmb(dev); nvme_free_host_mem(dev); nvme_dev_remove_admin(dev); nvme_free_queues(dev, 0); diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 39d972e2595f..01feebec29ea 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -101,7 +101,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, rw = READ; } - iov_iter_bvec(&iter, ITER_BVEC | rw, req->f.bvec, nr_segs, count); + iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); iocb->ki_pos = pos; iocb->ki_filp = req->ns->file; diff --git a/drivers/of/base.c b/drivers/of/base.c index d023cf303d56..09692c9b32a7 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -777,8 +777,6 @@ struct device_node *of_get_next_cpu_node(struct device_node *prev) if (!(of_node_name_eq(next, "cpu") || (next->type && !of_node_cmp(next->type, "cpu")))) continue; - if (!__of_device_is_available(next)) - continue; if (of_node_get(next)) break; } diff --git a/drivers/platform/chrome/chromeos_tbmc.c b/drivers/platform/chrome/chromeos_tbmc.c index 1e81f8144c0d..ce259ec9f990 100644 --- a/drivers/platform/chrome/chromeos_tbmc.c +++ b/drivers/platform/chrome/chromeos_tbmc.c @@ -99,7 +99,7 @@ static const struct acpi_device_id chromeos_tbmc_acpi_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, chromeos_tbmc_acpi_device_ids); -static const SIMPLE_DEV_PM_OPS(chromeos_tbmc_pm_ops, NULL, +static SIMPLE_DEV_PM_OPS(chromeos_tbmc_pm_ops, NULL, chromeos_tbmc_resume); static struct acpi_driver chromeos_tbmc_driver = { diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index 31c8b8c49e45..e1b75775cd4a 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -25,14 +25,16 @@ #include <linux/dmi.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/interrupt.h> #include <linux/mfd/cros_ec.h> #include <linux/mfd/cros_ec_commands.h> -#include <linux/mfd/cros_ec_lpc_reg.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/printk.h> #include <linux/suspend.h> +#include "cros_ec_lpc_reg.h" + #define DRV_NAME "cros_ec_lpcs" #define ACPI_DRV_NAME "GOOG0004" @@ -248,7 +250,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev) acpi_status status; struct cros_ec_device *ec_dev; u8 buf[2]; - int ret; + int irq, ret; if (!devm_request_region(dev, EC_LPC_ADDR_MEMMAP, EC_MEMMAP_SIZE, dev_name(dev))) { @@ -287,6 +289,18 @@ static int cros_ec_lpc_probe(struct platform_device *pdev) sizeof(struct ec_response_get_protocol_info); ec_dev->dout_size = sizeof(struct ec_host_request); + /* + * Some boards do not have an IRQ allotted for cros_ec_lpc, + * which makes ENXIO an expected (and safe) scenario. + */ + irq = platform_get_irq(pdev, 0); + if (irq > 0) + ec_dev->irq = irq; + else if (irq != -ENXIO) { + dev_err(dev, "couldn't retrieve IRQ number (%d)\n", irq); + return irq; + } + ret = cros_ec_register(ec_dev); if (ret) { dev_err(dev, "couldn't register ec_dev (%d)\n", ret); diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.c b/drivers/platform/chrome/cros_ec_lpc_mec.c index 2eda2c2fc210..c4edfa83e493 100644 --- a/drivers/platform/chrome/cros_ec_lpc_mec.c +++ b/drivers/platform/chrome/cros_ec_lpc_mec.c @@ -24,10 +24,11 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/mfd/cros_ec_commands.h> -#include <linux/mfd/cros_ec_lpc_mec.h> #include <linux/mutex.h> #include <linux/types.h> +#include "cros_ec_lpc_mec.h" + /* * This mutex must be held while accessing the EMI unit. We can't rely on the * EC mutex because memmap data may be accessed without it being held. diff --git a/include/linux/mfd/cros_ec_lpc_mec.h b/drivers/platform/chrome/cros_ec_lpc_mec.h index 176496ddc66c..105068c0e919 100644 --- a/include/linux/mfd/cros_ec_lpc_mec.h +++ b/drivers/platform/chrome/cros_ec_lpc_mec.h @@ -21,8 +21,8 @@ * expensive. */ -#ifndef __LINUX_MFD_CROS_EC_MEC_H -#define __LINUX_MFD_CROS_EC_MEC_H +#ifndef __CROS_EC_LPC_MEC_H +#define __CROS_EC_LPC_MEC_H #include <linux/mfd/cros_ec_commands.h> @@ -87,4 +87,4 @@ void cros_ec_lpc_mec_destroy(void); u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, unsigned int offset, unsigned int length, u8 *buf); -#endif /* __LINUX_MFD_CROS_EC_MEC_H */ +#endif /* __CROS_EC_LPC_MEC_H */ diff --git a/drivers/platform/chrome/cros_ec_lpc_reg.c b/drivers/platform/chrome/cros_ec_lpc_reg.c index dcc7a3e30604..fc23d535c404 100644 --- a/drivers/platform/chrome/cros_ec_lpc_reg.c +++ b/drivers/platform/chrome/cros_ec_lpc_reg.c @@ -24,7 +24,8 @@ #include <linux/io.h> #include <linux/mfd/cros_ec.h> #include <linux/mfd/cros_ec_commands.h> -#include <linux/mfd/cros_ec_lpc_mec.h> + +#include "cros_ec_lpc_mec.h" static u8 lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest) { diff --git a/include/linux/mfd/cros_ec_lpc_reg.h b/drivers/platform/chrome/cros_ec_lpc_reg.h index 5560bef63c2b..1c12c38b306a 100644 --- a/include/linux/mfd/cros_ec_lpc_reg.h +++ b/drivers/platform/chrome/cros_ec_lpc_reg.h @@ -21,8 +21,8 @@ * expensive. */ -#ifndef __LINUX_MFD_CROS_EC_REG_H -#define __LINUX_MFD_CROS_EC_REG_H +#ifndef __CROS_EC_LPC_REG_H +#define __CROS_EC_LPC_REG_H /** * cros_ec_lpc_read_bytes - Read bytes from a given LPC-mapped address. @@ -58,4 +58,4 @@ void cros_ec_lpc_reg_init(void); */ void cros_ec_lpc_reg_destroy(void); -#endif /* __LINUX_MFD_CROS_EC_REG_H */ +#endif /* __CROS_EC_LPC_REG_H */ diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index bdac939de223..54f6a40c75c6 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -60,7 +60,10 @@ config ACERHDF After loading this driver the BIOS is still in control of the fan. To let the kernel handle the fan, do: - echo -n enabled > /sys/class/thermal/thermal_zone0/mode + echo -n enabled > /sys/class/thermal/thermal_zoneN/mode + where N=0,1,2... depending on the number of thermal nodes and the + detection order of your particular system. The "type" parameter + in the same node directory will tell you if it is "acerhdf". For more information about this driver see <http://piie.net/files/acerhdf_README.txt> @@ -105,6 +108,22 @@ config ASUS_LAPTOP If you have an ACPI-compatible ASUS laptop, say Y or M here. +config DCDBAS + tristate "Dell Systems Management Base Driver" + depends on X86 + help + The Dell Systems Management Base Driver provides a sysfs interface + for systems management software to perform System Management + Interrupts (SMIs) and Host Control Actions (system power cycle or + power off after OS shutdown) on certain Dell systems. + + See <file:Documentation/dcdbas.txt> for more details on the driver + and the Dell systems on which Dell systems management software makes + use of this driver. + + Say Y or M here to enable the driver for use by Dell systems + management software such as Dell OpenManage. + # # The DELL_SMBIOS driver depends on ACPI_WMI and/or DCDBAS if those # backends are selected. The "depends" line prevents a configuration @@ -227,6 +246,18 @@ config DELL_RBTN To compile this driver as a module, choose M here: the module will be called dell-rbtn. +config DELL_RBU + tristate "BIOS update support for DELL systems via sysfs" + depends on X86 + select FW_LOADER + select FW_LOADER_USER_HELPER + help + Say m if you want to have the option of updating the BIOS for your + DELL system. Note you need a Dell OpenManage or Dell Update package (DUP) + supporting application to communicate with the BIOS regarding the new + image for the image update to take effect. + See <file:Documentation/dell_rbu.txt> for more details on the driver. + config FUJITSU_LAPTOP tristate "Fujitsu Laptop Extras" @@ -336,6 +367,20 @@ config HP_WMI To compile this driver as a module, choose M here: the module will be called hp-wmi. +config LG_LAPTOP + tristate "LG Laptop Extras" + depends on ACPI + depends on ACPI_WMI + depends on INPUT + select INPUT_SPARSEKMAP + select LEDS_CLASS + help + This driver adds support for hotkeys as well as control of keyboard + backlight, battery maximum charge level and various other ACPI + features. + + If you have an LG Gram laptop, say Y or M here. + config MSI_LAPTOP tristate "MSI Laptop Extras" depends on ACPI @@ -1231,6 +1276,18 @@ config I2C_MULTI_INSTANTIATE To compile this driver as a module, choose M here: the module will be called i2c-multi-instantiate. +config INTEL_ATOMISP2_PM + tristate "Intel AtomISP2 dummy / power-management driver" + depends on PCI && IOSF_MBI && PM + help + Power-management driver for Intel's Image Signal Processor found on + Bay and Cherry Trail devices. This dummy driver's sole purpose is to + turn the ISP off (put it in D3) to save power and to allow entering + of S0ix modes. + + To compile this driver as a module, choose M here: the module + will be called intel_atomisp2_pm. + endif # X86_PLATFORM_DEVICES config PMC_ATOM diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index e6d1becf81ce..39ae94135406 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -9,9 +9,11 @@ obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o obj-$(CONFIG_ASUS_WIRELESS) += asus-wireless.o obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o +obj-$(CONFIG_LG_LAPTOP) += lg-laptop.o obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o +obj-$(CONFIG_DCDBAS) += dcdbas.o obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o dell-smbios-objs := dell-smbios-base.o dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o @@ -23,6 +25,7 @@ obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o +obj-$(CONFIG_DELL_RBU) += dell_rbu.o obj-$(CONFIG_ACER_WMI) += acer-wmi.o obj-$(CONFIG_ACER_WIRELESS) += acer-wireless.o obj-$(CONFIG_ACERHDF) += acerhdf.o @@ -92,3 +95,4 @@ obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o +obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index ea22591ee66f..505224225378 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -86,6 +86,7 @@ static unsigned int interval = 10; static unsigned int fanon = 60000; static unsigned int fanoff = 53000; static unsigned int verbose; +static unsigned int list_supported; static unsigned int fanstate = ACERHDF_FAN_AUTO; static char force_bios[16]; static char force_product[16]; @@ -104,10 +105,12 @@ module_param(fanoff, uint, 0600); MODULE_PARM_DESC(fanoff, "Turn the fan off below this temperature"); module_param(verbose, uint, 0600); MODULE_PARM_DESC(verbose, "Enable verbose dmesg output"); +module_param(list_supported, uint, 0600); +MODULE_PARM_DESC(list_supported, "List supported models and BIOS versions"); module_param_string(force_bios, force_bios, 16, 0); -MODULE_PARM_DESC(force_bios, "Force BIOS version and omit BIOS check"); +MODULE_PARM_DESC(force_bios, "Pretend system has this known supported BIOS version"); module_param_string(force_product, force_product, 16, 0); -MODULE_PARM_DESC(force_product, "Force BIOS product and omit BIOS check"); +MODULE_PARM_DESC(force_product, "Pretend system is this known supported model"); /* * cmd_off: to switch the fan completely off and check if the fan is off @@ -130,7 +133,7 @@ static const struct manualcmd mcmd = { .moff = 0xff, }; -/* BIOS settings */ +/* BIOS settings - only used during probe */ struct bios_settings { const char *vendor; const char *product; @@ -141,8 +144,18 @@ struct bios_settings { int mcmd_enable; }; +/* This could be a daughter struct in the above, but not worth the redirect */ +struct ctrl_settings { + u8 fanreg; + u8 tempreg; + struct fancmd cmd; + int mcmd_enable; +}; + +static struct ctrl_settings ctrl_cfg __read_mostly; + /* Register addresses and values for different BIOS versions */ -static const struct bios_settings bios_tbl[] = { +static const struct bios_settings bios_tbl[] __initconst = { /* AOA110 */ {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00}, 0}, {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00}, 0}, @@ -233,6 +246,7 @@ static const struct bios_settings bios_tbl[] = { {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0}, {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0}, {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0}, + {"Gateway", "LT31", "v1.3307", 0x55, 0x58, {0x9e, 0x00}, 0}, /* Packard Bell */ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00}, 0}, {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0}, @@ -256,8 +270,6 @@ static const struct bios_settings bios_tbl[] = { {"", "", "", 0, 0, {0, 0}, 0} }; -static const struct bios_settings *bios_cfg __read_mostly; - /* * this struct is used to instruct thermal layer to use bang_bang instead of * default governor for acerhdf @@ -270,7 +282,7 @@ static int acerhdf_get_temp(int *temp) { u8 read_temp; - if (ec_read(bios_cfg->tempreg, &read_temp)) + if (ec_read(ctrl_cfg.tempreg, &read_temp)) return -EINVAL; *temp = read_temp * 1000; @@ -282,10 +294,10 @@ static int acerhdf_get_fanstate(int *state) { u8 fan; - if (ec_read(bios_cfg->fanreg, &fan)) + if (ec_read(ctrl_cfg.fanreg, &fan)) return -EINVAL; - if (fan != bios_cfg->cmd.cmd_off) + if (fan != ctrl_cfg.cmd.cmd_off) *state = ACERHDF_FAN_AUTO; else *state = ACERHDF_FAN_OFF; @@ -306,13 +318,13 @@ static void acerhdf_change_fanstate(int state) state = ACERHDF_FAN_AUTO; } - cmd = (state == ACERHDF_FAN_OFF) ? bios_cfg->cmd.cmd_off - : bios_cfg->cmd.cmd_auto; + cmd = (state == ACERHDF_FAN_OFF) ? ctrl_cfg.cmd.cmd_off + : ctrl_cfg.cmd.cmd_auto; fanstate = state; - ec_write(bios_cfg->fanreg, cmd); + ec_write(ctrl_cfg.fanreg, cmd); - if (bios_cfg->mcmd_enable && state == ACERHDF_FAN_OFF) { + if (ctrl_cfg.mcmd_enable && state == ACERHDF_FAN_OFF) { if (verbose) pr_notice("turning off fan manually\n"); ec_write(mcmd.mreg, mcmd.moff); @@ -615,10 +627,11 @@ static int str_starts_with(const char *str, const char *start) } /* check hardware */ -static int acerhdf_check_hardware(void) +static int __init acerhdf_check_hardware(void) { char const *vendor, *version, *product; const struct bios_settings *bt = NULL; + int found = 0; /* get BIOS data */ vendor = dmi_get_system_info(DMI_SYS_VENDOR); @@ -632,6 +645,17 @@ static int acerhdf_check_hardware(void) pr_info("Acer Aspire One Fan driver, v.%s\n", DRV_VER); + if (list_supported) { + pr_info("List of supported Manufacturer/Model/BIOS:\n"); + pr_info("---------------------------------------------------\n"); + for (bt = bios_tbl; bt->vendor[0]; bt++) { + pr_info("%-13s | %-17s | %-10s\n", bt->vendor, + bt->product, bt->version); + } + pr_info("---------------------------------------------------\n"); + return -ECANCELED; + } + if (force_bios[0]) { version = force_bios; pr_info("forcing BIOS version: %s\n", version); @@ -657,30 +681,36 @@ static int acerhdf_check_hardware(void) if (str_starts_with(vendor, bt->vendor) && str_starts_with(product, bt->product) && str_starts_with(version, bt->version)) { - bios_cfg = bt; + found = 1; break; } } - if (!bios_cfg) { + if (!found) { pr_err("unknown (unsupported) BIOS version %s/%s/%s, please report, aborting!\n", vendor, product, version); return -EINVAL; } + /* Copy control settings from BIOS table before we free it. */ + ctrl_cfg.fanreg = bt->fanreg; + ctrl_cfg.tempreg = bt->tempreg; + memcpy(&ctrl_cfg.cmd, &bt->cmd, sizeof(struct fancmd)); + ctrl_cfg.mcmd_enable = bt->mcmd_enable; + /* * if started with kernel mode off, prevent the kernel from switching * off the fan */ if (!kernelmode) { pr_notice("Fan control off, to enable do:\n"); - pr_notice("echo -n \"enabled\" > /sys/class/thermal/thermal_zone0/mode\n"); + pr_notice("echo -n \"enabled\" > /sys/class/thermal/thermal_zoneN/mode # N=0,1,2...\n"); } return 0; } -static int acerhdf_register_platform(void) +static int __init acerhdf_register_platform(void) { int err = 0; @@ -712,7 +742,7 @@ static void acerhdf_unregister_platform(void) platform_driver_unregister(&acerhdf_driver); } -static int acerhdf_register_thermal(void) +static int __init acerhdf_register_thermal(void) { cl_dev = thermal_cooling_device_register("acerhdf-fan", NULL, &acerhdf_cooling_ops); diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 93ee2d5466f8..c285a16675ee 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -43,6 +43,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/platform_data/x86/asus-wmi.h> #include <linux/platform_device.h> #include <linux/thermal.h> #include <linux/acpi.h> @@ -69,89 +70,6 @@ MODULE_LICENSE("GPL"); #define NOTIFY_KBD_BRTDWN 0xc5 #define NOTIFY_KBD_BRTTOGGLE 0xc7 -/* WMI Methods */ -#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */ -#define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */ -#define ASUS_WMI_METHODID_GLCD 0x44434C47 /* Get LCD status */ -#define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */ -#define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */ -#define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */ -#define ASUS_WMI_METHODID_AGFN 0x4E464741 /* FaN? */ -#define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */ -#define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */ -#define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */ -#define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */ -#define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */ -#define ASUS_WMI_METHODID_DSTS 0x53544344 /* Device STatuS */ -#define ASUS_WMI_METHODID_DSTS2 0x53545344 /* Device STatuS #2*/ -#define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */ -#define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */ -#define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */ -#define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */ -#define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */ -#define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */ - -#define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE - -/* Wireless */ -#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001 -#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 -#define ASUS_WMI_DEVID_CWAP 0x00010003 -#define ASUS_WMI_DEVID_WLAN 0x00010011 -#define ASUS_WMI_DEVID_WLAN_LED 0x00010012 -#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 -#define ASUS_WMI_DEVID_GPS 0x00010015 -#define ASUS_WMI_DEVID_WIMAX 0x00010017 -#define ASUS_WMI_DEVID_WWAN3G 0x00010019 -#define ASUS_WMI_DEVID_UWB 0x00010021 - -/* Leds */ -/* 0x000200XX and 0x000400XX */ -#define ASUS_WMI_DEVID_LED1 0x00020011 -#define ASUS_WMI_DEVID_LED2 0x00020012 -#define ASUS_WMI_DEVID_LED3 0x00020013 -#define ASUS_WMI_DEVID_LED4 0x00020014 -#define ASUS_WMI_DEVID_LED5 0x00020015 -#define ASUS_WMI_DEVID_LED6 0x00020016 - -/* Backlight and Brightness */ -#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */ -#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 -#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012 -#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 -#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ -#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025 - -/* Misc */ -#define ASUS_WMI_DEVID_CAMERA 0x00060013 - -/* Storage */ -#define ASUS_WMI_DEVID_CARDREADER 0x00080013 - -/* Input */ -#define ASUS_WMI_DEVID_TOUCHPAD 0x00100011 -#define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012 - -/* Fan, Thermal */ -#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011 -#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 - -/* Power */ -#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012 - -/* Deep S3 / Resume on LID open */ -#define ASUS_WMI_DEVID_LID_RESUME 0x00120031 - -/* DSTS masks */ -#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 -#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 -#define ASUS_WMI_DSTS_PRESENCE_BIT 0x00010000 -#define ASUS_WMI_DSTS_USER_BIT 0x00020000 -#define ASUS_WMI_DSTS_BIOS_BIT 0x00040000 -#define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF -#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00 -#define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F - #define ASUS_FAN_DESC "cpu_fan" #define ASUS_FAN_MFUN 0x13 #define ASUS_FAN_SFUN_READ 0x06 @@ -239,7 +157,6 @@ struct asus_wmi { int lightbar_led_wk; struct workqueue_struct *led_workqueue; struct work_struct tpd_led_work; - struct work_struct kbd_led_work; struct work_struct wlan_led_work; struct work_struct lightbar_led_work; @@ -302,8 +219,7 @@ static void asus_wmi_input_exit(struct asus_wmi *asus) asus->inputdev = NULL; } -static int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, - u32 *retval) +int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval) { struct bios_args args = { .arg0 = arg0, @@ -339,6 +255,7 @@ exit: return 0; } +EXPORT_SYMBOL_GPL(asus_wmi_evaluate_method); static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args) { @@ -456,12 +373,9 @@ static enum led_brightness tpd_led_get(struct led_classdev *led_cdev) return read_tpd_led_state(asus); } -static void kbd_led_update(struct work_struct *work) +static void kbd_led_update(struct asus_wmi *asus) { int ctrl_param = 0; - struct asus_wmi *asus; - - asus = container_of(work, struct asus_wmi, kbd_led_work); /* * bits 0-2: level @@ -471,7 +385,6 @@ static void kbd_led_update(struct work_struct *work) ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL); - led_classdev_notify_brightness_hw_changed(&asus->kbd_led, asus->kbd_led_wk); } static int kbd_led_read(struct asus_wmi *asus, int *level, int *env) @@ -516,7 +429,7 @@ static void do_kbd_led_set(struct led_classdev *led_cdev, int value) value = 0; asus->kbd_led_wk = value; - queue_work(asus->led_workqueue, &asus->kbd_led_work); + kbd_led_update(asus); } static void kbd_led_set(struct led_classdev *led_cdev, @@ -525,6 +438,14 @@ static void kbd_led_set(struct led_classdev *led_cdev, do_kbd_led_set(led_cdev, value); } +static void kbd_led_set_by_kbd(struct asus_wmi *asus, enum led_brightness value) +{ + struct led_classdev *led_cdev = &asus->kbd_led; + + do_kbd_led_set(led_cdev, value); + led_classdev_notify_brightness_hw_changed(led_cdev, asus->kbd_led_wk); +} + static enum led_brightness kbd_led_get(struct led_classdev *led_cdev) { struct asus_wmi *asus; @@ -671,8 +592,6 @@ static int asus_wmi_led_init(struct asus_wmi *asus) led_val = kbd_led_read(asus, NULL, NULL); if (led_val >= 0) { - INIT_WORK(&asus->kbd_led_work, kbd_led_update); - asus->kbd_led_wk = led_val; asus->kbd_led.name = "asus::kbd_backlight"; asus->kbd_led.flags = LED_BRIGHT_HW_CHANGED; @@ -1746,18 +1665,18 @@ static void asus_wmi_notify(u32 value, void *context) } if (code == NOTIFY_KBD_BRTUP) { - do_kbd_led_set(&asus->kbd_led, asus->kbd_led_wk + 1); + kbd_led_set_by_kbd(asus, asus->kbd_led_wk + 1); goto exit; } if (code == NOTIFY_KBD_BRTDWN) { - do_kbd_led_set(&asus->kbd_led, asus->kbd_led_wk - 1); + kbd_led_set_by_kbd(asus, asus->kbd_led_wk - 1); goto exit; } if (code == NOTIFY_KBD_BRTTOGGLE) { if (asus->kbd_led_wk == asus->kbd_led.max_brightness) - do_kbd_led_set(&asus->kbd_led, 0); + kbd_led_set_by_kbd(asus, 0); else - do_kbd_led_set(&asus->kbd_led, asus->kbd_led_wk + 1); + kbd_led_set_by_kbd(asus, asus->kbd_led_wk + 1); goto exit; } @@ -2291,7 +2210,7 @@ static int asus_hotk_resume(struct device *device) struct asus_wmi *asus = dev_get_drvdata(device); if (!IS_ERR_OR_NULL(asus->kbd_led.dev)) - queue_work(asus->led_workqueue, &asus->kbd_led_work); + kbd_led_update(asus); return 0; } @@ -2327,7 +2246,7 @@ static int asus_hotk_restore(struct device *device) rfkill_set_sw_state(asus->uwb.rfkill, bl); } if (!IS_ERR_OR_NULL(asus->kbd_led.dev)) - queue_work(asus->led_workqueue, &asus->kbd_led_work); + kbd_led_update(asus); return 0; } diff --git a/drivers/firmware/dcdbas.c b/drivers/platform/x86/dcdbas.c index 0bdea60c65dd..88bd7efafe14 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/platform/x86/dcdbas.c @@ -21,11 +21,13 @@ */ #include <linux/platform_device.h> +#include <linux/acpi.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/cpu.h> #include <linux/gfp.h> #include <linux/init.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/mc146818rtc.h> #include <linux/module.h> @@ -36,12 +38,11 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/mutex.h> -#include <asm/io.h> #include "dcdbas.h" #define DRIVER_NAME "dcdbas" -#define DRIVER_VERSION "5.6.0-3.2" +#define DRIVER_VERSION "5.6.0-3.3" #define DRIVER_DESCRIPTION "Dell Systems Management Base Driver" static struct platform_device *dcdbas_pdev; @@ -49,19 +50,23 @@ static struct platform_device *dcdbas_pdev; static u8 *smi_data_buf; static dma_addr_t smi_data_buf_handle; static unsigned long smi_data_buf_size; +static unsigned long max_smi_data_buf_size = MAX_SMI_DATA_BUF_SIZE; static u32 smi_data_buf_phys_addr; static DEFINE_MUTEX(smi_data_lock); +static u8 *eps_buffer; static unsigned int host_control_action; static unsigned int host_control_smi_type; static unsigned int host_control_on_shutdown; +static bool wsmt_enabled; + /** * smi_data_buf_free: free SMI data buffer */ static void smi_data_buf_free(void) { - if (!smi_data_buf) + if (!smi_data_buf || wsmt_enabled) return; dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", @@ -86,7 +91,7 @@ static int smi_data_buf_realloc(unsigned long size) if (smi_data_buf_size >= size) return 0; - if (size > MAX_SMI_DATA_BUF_SIZE) + if (size > max_smi_data_buf_size) return -EINVAL; /* new buffer is needed */ @@ -169,7 +174,7 @@ static ssize_t smi_data_write(struct file *filp, struct kobject *kobj, { ssize_t ret; - if ((pos + count) > MAX_SMI_DATA_BUF_SIZE) + if ((pos + count) > max_smi_data_buf_size) return -EINVAL; mutex_lock(&smi_data_lock); @@ -322,8 +327,20 @@ static ssize_t smi_request_store(struct device *dev, ret = count; break; case 1: - /* Calling Interface SMI */ - smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer); + /* + * Calling Interface SMI + * + * Provide physical address of command buffer field within + * the struct smi_cmd to BIOS. + * + * Because the address that smi_cmd (smi_data_buf) points to + * will be from memremap() of a non-memory address if WSMT + * is present, we can't use virt_to_phys() on smi_cmd, so + * we have to use the physical address that was saved when + * the virtual address for smi_cmd was received. + */ + smi_cmd->ebx = smi_data_buf_phys_addr + + offsetof(struct smi_cmd, command_buffer); ret = dcdbas_smi_request(smi_cmd); if (!ret) ret = count; @@ -482,6 +499,93 @@ static void dcdbas_host_control(void) } } +/* WSMT */ + +static u8 checksum(u8 *buffer, u8 length) +{ + u8 sum = 0; + u8 *end = buffer + length; + + while (buffer < end) + sum += *buffer++; + return sum; +} + +static inline struct smm_eps_table *check_eps_table(u8 *addr) +{ + struct smm_eps_table *eps = (struct smm_eps_table *)addr; + + if (strncmp(eps->smm_comm_buff_anchor, SMM_EPS_SIG, 4) != 0) + return NULL; + + if (checksum(addr, eps->length) != 0) + return NULL; + + return eps; +} + +static int dcdbas_check_wsmt(void) +{ + struct acpi_table_wsmt *wsmt = NULL; + struct smm_eps_table *eps = NULL; + u64 remap_size; + u8 *addr; + + acpi_get_table(ACPI_SIG_WSMT, 0, (struct acpi_table_header **)&wsmt); + if (!wsmt) + return 0; + + /* Check if WSMT ACPI table shows that protection is enabled */ + if (!(wsmt->protection_flags & ACPI_WSMT_FIXED_COMM_BUFFERS) || + !(wsmt->protection_flags & ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION)) + return 0; + + /* Scan for EPS (entry point structure) */ + for (addr = (u8 *)__va(0xf0000); + addr < (u8 *)__va(0x100000 - sizeof(struct smm_eps_table)); + addr += 16) { + eps = check_eps_table(addr); + if (eps) + break; + } + + if (!eps) { + dev_dbg(&dcdbas_pdev->dev, "found WSMT, but no EPS found\n"); + return -ENODEV; + } + + /* + * Get physical address of buffer and map to virtual address. + * Table gives size in 4K pages, regardless of actual system page size. + */ + if (upper_32_bits(eps->smm_comm_buff_addr + 8)) { + dev_warn(&dcdbas_pdev->dev, "found WSMT, but EPS buffer address is above 4GB\n"); + return -EINVAL; + } + /* + * Limit remap size to MAX_SMI_DATA_BUF_SIZE + 8 (since the first 8 + * bytes are used for a semaphore, not the data buffer itself). + */ + remap_size = eps->num_of_4k_pages * PAGE_SIZE; + if (remap_size > MAX_SMI_DATA_BUF_SIZE + 8) + remap_size = MAX_SMI_DATA_BUF_SIZE + 8; + eps_buffer = memremap(eps->smm_comm_buff_addr, remap_size, MEMREMAP_WB); + if (!eps_buffer) { + dev_warn(&dcdbas_pdev->dev, "found WSMT, but failed to map EPS buffer\n"); + return -ENOMEM; + } + + /* First 8 bytes is for a semaphore, not part of the smi_data_buf */ + smi_data_buf_phys_addr = eps->smm_comm_buff_addr + 8; + smi_data_buf = eps_buffer + 8; + smi_data_buf_size = remap_size - 8; + max_smi_data_buf_size = smi_data_buf_size; + wsmt_enabled = true; + dev_info(&dcdbas_pdev->dev, + "WSMT found, using firmware-provided SMI buffer.\n"); + return 1; +} + /** * dcdbas_reboot_notify: handle reboot notification for host control */ @@ -548,6 +652,11 @@ static int dcdbas_probe(struct platform_device *dev) dcdbas_pdev = dev; + /* Check if ACPI WSMT table specifies protected SMI buffer address */ + error = dcdbas_check_wsmt(); + if (error < 0) + return error; + /* * BIOS SMI calls require buffer addresses be in 32-bit address space. * This is done by setting the DMA mask below. @@ -635,6 +744,8 @@ static void __exit dcdbas_exit(void) */ if (dcdbas_pdev) smi_data_buf_free(); + if (eps_buffer) + memunmap(eps_buffer); platform_device_unregister(dcdbas_pdev_reg); platform_driver_unregister(&dcdbas_driver); } diff --git a/drivers/firmware/dcdbas.h b/drivers/platform/x86/dcdbas.h index ca3cb0a54ab6..52729a494b00 100644 --- a/drivers/firmware/dcdbas.h +++ b/drivers/platform/x86/dcdbas.h @@ -53,6 +53,7 @@ #define EXPIRED_TIMER (0) #define SMI_CMD_MAGIC (0x534D4931) +#define SMM_EPS_SIG "$SCB" #define DCDBAS_DEV_ATTR_RW(_name) \ DEVICE_ATTR(_name,0600,_name##_show,_name##_store); @@ -103,5 +104,14 @@ struct apm_cmd { int dcdbas_smi_request(struct smi_cmd *smi_cmd); +struct smm_eps_table { + char smm_comm_buff_anchor[4]; + u8 length; + u8 checksum; + u8 version; + u64 smm_comm_buff_addr; + u64 num_of_4k_pages; +} __packed; + #endif /* _DCDBAS_H_ */ diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c index 97a90bebc360..ab9b822a6dfe 100644 --- a/drivers/platform/x86/dell-smbios-smm.c +++ b/drivers/platform/x86/dell-smbios-smm.c @@ -18,7 +18,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> -#include "../../firmware/dcdbas.h" +#include "dcdbas.h" #include "dell-smbios.h" static int da_command_address; diff --git a/drivers/firmware/dell_rbu.c b/drivers/platform/x86/dell_rbu.c index fb8af5cb7c9b..ccefa84f7305 100644 --- a/drivers/firmware/dell_rbu.c +++ b/drivers/platform/x86/dell_rbu.c @@ -45,6 +45,7 @@ #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/dma-mapping.h> +#include <asm/set_memory.h> MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>"); MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems"); @@ -181,6 +182,11 @@ static int create_packet(void *data, size_t length) packet_data_temp_buf = NULL; } } + /* + * set to uncachable or it may never get written back before reboot + */ + set_memory_uc((unsigned long)packet_data_temp_buf, 1 << ordernum); + spin_lock(&rbu_data.lock); newpacket->data = packet_data_temp_buf; @@ -349,6 +355,8 @@ static void packet_empty_list(void) * to make sure there are no stale RBU packets left in memory */ memset(newpacket->data, 0, rbu_data.packetsize); + set_memory_wb((unsigned long)newpacket->data, + 1 << newpacket->ordernum); free_pages((unsigned long) newpacket->data, newpacket->ordernum); kfree(newpacket); diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index d4f1259ff5a2..b6489cba2985 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -212,7 +212,7 @@ static int read_ec_data(acpi_handle handle, int cmd, unsigned long *data) return 0; } } - pr_err("timeout in read_ec_cmd\n"); + pr_err("timeout in %s\n", __func__); return -1; } @@ -1147,6 +1147,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { }, }, { + .ident = "Lenovo Legion Y530-15ICH", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH"), + }, + }, + { .ident = "Lenovo Legion Y720-15IKB", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 6cf9b7fa5bf0..e28bcf61b126 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -1,19 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Intel HID event & 5 button array driver * * Copyright (C) 2015 Alex Hung <alex.hung@canonical.com> * Copyright (C) 2015 Andrew Lutomirski <luto@kernel.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #include <linux/acpi.h> diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c index 7344d841f4d9..3b81cb896fed 100644 --- a/drivers/platform/x86/intel-rst.c +++ b/drivers/platform/x86/intel-rst.c @@ -1,26 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2013 Matthew Garrett <mjg59@srcf.ucam.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ - -#include <linux/init.h> +#include <linux/acpi.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/acpi.h> MODULE_LICENSE("GPL"); @@ -53,12 +38,10 @@ static ssize_t irst_store_wakeup_events(struct device *dev, acpi = to_acpi_device(dev); error = kstrtoul(buf, 0, &value); - if (error) return error; status = acpi_execute_simple_method(acpi->handle, "SFFS", value); - if (ACPI_FAILURE(status)) return -EINVAL; @@ -99,12 +82,10 @@ static ssize_t irst_store_wakeup_time(struct device *dev, acpi = to_acpi_device(dev); error = kstrtoul(buf, 0, &value); - if (error) return error; status = acpi_execute_simple_method(acpi->handle, "SFTV", value); - if (ACPI_FAILURE(status)) return -EINVAL; diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c index bbe4c06c769f..64c2dc93472f 100644 --- a/drivers/platform/x86/intel-smartconnect.c +++ b/drivers/platform/x86/intel-smartconnect.c @@ -1,25 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2013 Matthew Garrett <mjg59@srcf.ucam.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ - -#include <linux/init.h> -#include <linux/module.h> #include <linux/acpi.h> +#include <linux/module.h> MODULE_LICENSE("GPL"); @@ -44,6 +29,7 @@ static const struct acpi_device_id smartconnect_ids[] = { {"INT33A0", 0}, {"", 0} }; +MODULE_DEVICE_TABLE(acpi, smartconnect_ids); static struct acpi_driver smartconnect_driver = { .owner = THIS_MODULE, @@ -56,5 +42,3 @@ static struct acpi_driver smartconnect_driver = { }; module_acpi_driver(smartconnect_driver); - -MODULE_DEVICE_TABLE(acpi, smartconnect_ids); diff --git a/drivers/platform/x86/intel-wmi-thunderbolt.c b/drivers/platform/x86/intel-wmi-thunderbolt.c index c2257bd06f18..9ded8e2af312 100644 --- a/drivers/platform/x86/intel-wmi-thunderbolt.c +++ b/drivers/platform/x86/intel-wmi-thunderbolt.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * WMI Thunderbolt driver * * Copyright (C) 2017 Dell Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -38,12 +30,16 @@ static ssize_t force_power_store(struct device *dev, input.length = sizeof(u8); input.pointer = &mode; mode = hex_to_bin(buf[0]); + dev_dbg(dev, "force_power: storing %#x\n", mode); if (mode == 0 || mode == 1) { status = wmi_evaluate_method(INTEL_WMI_THUNDERBOLT_GUID, 0, 1, &input, NULL); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { + dev_dbg(dev, "force_power: failed to evaluate ACPI method\n"); return -ENODEV; + } } else { + dev_dbg(dev, "force_power: unsupported mode\n"); return -EINVAL; } return count; @@ -95,4 +91,4 @@ module_wmi_driver(intel_wmi_thunderbolt_driver); MODULE_ALIAS("wmi:" INTEL_WMI_THUNDERBOLT_GUID); MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>"); MODULE_DESCRIPTION("Intel WMI Thunderbolt force power driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c new file mode 100644 index 000000000000..9371603a0ac9 --- /dev/null +++ b/drivers/platform/x86/intel_atomisp2_pm.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry + * Trail devices. The sole purpose of this driver is to allow the ISP to + * be put in D3. + * + * Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com> + * + * Based on various non upstream patches for ISP support: + * Copyright (C) 2010-2017 Intel Corporation. All rights reserved. + * Copyright (c) 2010 Silicon Hive www.siliconhive.com. + */ + +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <asm/iosf_mbi.h> + +/* PCI configuration regs */ +#define PCI_INTERRUPT_CTRL 0x9c + +#define PCI_CSI_CONTROL 0xe8 +#define PCI_CSI_CONTROL_PORTS_OFF_MASK 0x7 + +/* IOSF BT_MBI_UNIT_PMC regs */ +#define ISPSSPM0 0x39 +#define ISPSSPM0_ISPSSC_OFFSET 0 +#define ISPSSPM0_ISPSSC_MASK 0x00000003 +#define ISPSSPM0_ISPSSS_OFFSET 24 +#define ISPSSPM0_ISPSSS_MASK 0x03000000 +#define ISPSSPM0_IUNIT_POWER_ON 0x0 +#define ISPSSPM0_IUNIT_POWER_OFF 0x3 + +static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + unsigned long timeout; + u32 val; + + pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, 0); + + /* + * MRFLD IUNIT DPHY is located in an always-power-on island + * MRFLD HW design need all CSI ports are disabled before + * powering down the IUNIT. + */ + pci_read_config_dword(dev, PCI_CSI_CONTROL, &val); + val |= PCI_CSI_CONTROL_PORTS_OFF_MASK; + pci_write_config_dword(dev, PCI_CSI_CONTROL, val); + + /* Write 0x3 to ISPSSPM0 bit[1:0] to power off the IUNIT */ + iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, + ISPSSPM0_IUNIT_POWER_OFF, ISPSSPM0_ISPSSC_MASK); + + /* + * There should be no IUNIT access while power-down is + * in progress HW sighting: 4567865 + * Wait up to 50 ms for the IUNIT to shut down. + */ + timeout = jiffies + msecs_to_jiffies(50); + while (1) { + /* Wait until ISPSSPM0 bit[25:24] shows 0x3 */ + iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &val); + val = (val & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET; + if (val == ISPSSPM0_IUNIT_POWER_OFF) + break; + + if (time_after(jiffies, timeout)) { + dev_err(&dev->dev, "IUNIT power-off timeout.\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + pm_runtime_allow(&dev->dev); + pm_runtime_put_sync_suspend(&dev->dev); + + return 0; +} + +static void isp_remove(struct pci_dev *dev) +{ + pm_runtime_get_sync(&dev->dev); + pm_runtime_forbid(&dev->dev); +} + +static int isp_pci_suspend(struct device *dev) +{ + return 0; +} + +static int isp_pci_resume(struct device *dev) +{ + return 0; +} + +static UNIVERSAL_DEV_PM_OPS(isp_pm_ops, isp_pci_suspend, + isp_pci_resume, NULL); + +static const struct pci_device_id isp_id_table[] = { + { PCI_VDEVICE(INTEL, 0x22b8), }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, isp_id_table); + +static struct pci_driver isp_pci_driver = { + .name = "intel_atomisp2_pm", + .id_table = isp_id_table, + .probe = isp_probe, + .remove = isp_remove, + .driver.pm = &isp_pm_ops, +}; + +module_pci_driver(isp_pci_driver); + +MODULE_DESCRIPTION("Intel AtomISP2 dummy / power-management drv (for suspend)"); +MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_bxtwc_tmu.c b/drivers/platform/x86/intel_bxtwc_tmu.c index 227943a20212..951c105bafc1 100644 --- a/drivers/platform/x86/intel_bxtwc_tmu.c +++ b/drivers/platform/x86/intel_bxtwc_tmu.c @@ -1,21 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * intel_bxtwc_tmu.c - Intel BXT Whiskey Cove PMIC TMU driver + * Intel BXT Whiskey Cove PMIC TMU driver * * Copyright (C) 2016 Intel Corporation. All rights reserved. * * This driver adds TMU (Time Management Unit) support for Intel BXT platform. * It enables the alarm wake-up functionality in the TMU unit of Whiskey Cove * PMIC. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #include <linux/module.h> diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c index f40b1c192106..464fe93657b5 100644 --- a/drivers/platform/x86/intel_cht_int33fe.c +++ b/drivers/platform/x86/intel_cht_int33fe.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel Cherry Trail ACPI INT33FE pseudo device driver * * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Some Intel Cherry Trail based device which ship with Windows 10, have * this weird INT33FE ACPI device with a CRS table with 4 I2cSerialBusV2 * resources, for 4 different chips attached to various i2c busses: @@ -257,4 +254,4 @@ module_platform_driver(cht_int33fe_driver); MODULE_DESCRIPTION("Intel Cherry Trail ACPI INT33FE pseudo device driver"); MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c b/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c index 38b8e7cfe88c..0df2e82dd249 100644 --- a/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c +++ b/drivers/platform/x86/intel_chtdc_ti_pwrbtn.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Power-button driver for Dollar Cove TI PMIC * Copyright (C) 2014 Intel Corp diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c index e89ad4964dc1..4b8f7305fc8a 100644 --- a/drivers/platform/x86/intel_int0002_vgpio.c +++ b/drivers/platform/x86/intel_int0002_vgpio.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel INT0002 "Virtual GPIO" driver * @@ -9,10 +10,6 @@ * * Author: Dyut Kumar Sil <dyut.k.sil@intel.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Some peripherals on Bay Trail and Cherry Trail platforms signal a Power * Management Event (PME) to the Power Management Controller (PMC) to wakeup * the system. When this happens software needs to clear the PME bus 0 status @@ -57,11 +54,7 @@ #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } static const struct x86_cpu_id int0002_cpu_ids[] = { -/* - * Limit ourselves to Cherry Trail for now, until testing shows we - * need to handle the INT0002 device on Baytrail too. - * ICPU(INTEL_FAM6_ATOM_SILVERMONT), * Valleyview, Bay Trail * - */ + ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */ ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */ {} }; @@ -110,6 +103,21 @@ static void int0002_irq_mask(struct irq_data *data) outl(gpe_en_reg, GPE0A_EN_PORT); } +static int int0002_irq_set_wake(struct irq_data *data, unsigned int on) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + struct platform_device *pdev = to_platform_device(chip->parent); + int irq = platform_get_irq(pdev, 0); + + /* Propagate to parent irq */ + if (on) + enable_irq_wake(irq); + else + disable_irq_wake(irq); + + return 0; +} + static irqreturn_t int0002_irq(int irq, void *data) { struct gpio_chip *chip = data; @@ -132,6 +140,7 @@ static struct irq_chip int0002_irqchip = { .irq_ack = int0002_irq_ack, .irq_mask = int0002_irq_mask, .irq_unmask = int0002_irq_unmask, + .irq_set_wake = int0002_irq_set_wake, }; static int int0002_probe(struct platform_device *pdev) @@ -216,4 +225,4 @@ module_platform_driver(int0002_driver); MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Intel INT0002 Virtual GPIO driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index c5ece7ef08c6..225638a1b09e 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2009-2010 Intel Corporation * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * * Authors: * Jesse Barnes <jbarnes@virtuousgeek.org> */ @@ -1697,6 +1686,6 @@ static struct pci_driver ips_pci_driver = { module_pci_driver(ips_pci_driver); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jesse Barnes <jbarnes@virtuousgeek.org>"); MODULE_DESCRIPTION("Intelligent Power Sharing Driver"); diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h index 60f4e3ddbe9f..512ad234ad0d 100644 --- a/drivers/platform/x86/intel_ips.h +++ b/drivers/platform/x86/intel_ips.h @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2010 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". */ void ips_link_to_i915_driver(void); diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c index ef9b0af8cdd3..77eb8709c931 100644 --- a/drivers/platform/x86/intel_menlow.c +++ b/drivers/platform/x86/intel_menlow.c @@ -1,25 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * intel_menlow.c - Intel menlow Driver for thermal management extension + * Intel menlow Driver for thermal management extension * * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com> * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com> - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This driver creates the sys I/F for programming the sensors. * It also implements the driver for intel menlow memory controller (hardware @@ -29,20 +14,19 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/acpi.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/types.h> #include <linux/pci.h> #include <linux/pm.h> +#include <linux/slab.h> #include <linux/thermal.h> -#include <linux/acpi.h> +#include <linux/types.h> MODULE_AUTHOR("Thomas Sujith"); MODULE_AUTHOR("Zhang Rui"); MODULE_DESCRIPTION("Intel Menlow platform specific driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); /* * Memory controller device control diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 5ad44204a9c3..292bace83f1e 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Power button driver for Intel MID platforms. * @@ -5,18 +6,8 @@ * * Author: Hong Liu <hong.liu@intel.com> * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. */ -#include <linux/init.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/mfd/intel_msic.h> @@ -121,12 +112,9 @@ static const struct mid_pb_ddata mrfld_ddata = { .setup = mrfld_setup, }; -#define ICPU(model, ddata) \ - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata } - static const struct x86_cpu_id mid_pb_cpu_ids[] = { - ICPU(INTEL_FAM6_ATOM_SALTWELL_MID, mfld_ddata), - ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, mrfld_ddata), + INTEL_CPU_FAM6(ATOM_SALTWELL_MID, mfld_ddata), + INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, mrfld_ddata), {} }; diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c index 008a76903cbf..f402e2e74a38 100644 --- a/drivers/platform/x86/intel_mid_thermal.c +++ b/drivers/platform/x86/intel_mid_thermal.c @@ -1,39 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * intel_mid_thermal.c - Intel MID platform thermal driver + * Intel MID platform thermal driver * * Copyright (C) 2011 Intel Corporation * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Author: Durgadoss R <durgadoss.r@intel.com> */ #define pr_fmt(fmt) "intel_mid_thermal: " fmt -#include <linux/module.h> -#include <linux/init.h> +#include <linux/device.h> #include <linux/err.h> +#include <linux/mfd/intel_msic.h> +#include <linux/module.h> #include <linux/param.h> -#include <linux/device.h> #include <linux/platform_device.h> -#include <linux/slab.h> #include <linux/pm.h> +#include <linux/slab.h> #include <linux/thermal.h> -#include <linux/mfd/intel_msic.h> /* Number of thermal sensors */ #define MSIC_THERMAL_SENSORS 4 @@ -567,4 +551,4 @@ module_platform_driver(mid_thermal_driver); MODULE_AUTHOR("Durgadoss R <durgadoss.r@intel.com>"); MODULE_DESCRIPTION("Intel Medfield Platform Thermal Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c index 5747f63c8d9f..3c0438ba385e 100644 --- a/drivers/platform/x86/intel_oaktrail.c +++ b/drivers/platform/x86/intel_oaktrail.c @@ -1,5 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0+ /* - * intel_oaktrail.c - Intel OakTrail Platform support. + * Intel OakTrail Platform support * * Copyright (C) 2010-2011 Intel Corporation * Author: Yin Kangkai (kangkai.yin@intel.com) @@ -8,21 +9,6 @@ * <cezary.jackiewicz (at) gmail.com>, based on MSI driver * Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * * This driver does below things: * 1. registers itself in the Linux backlight control in * /sys/class/backlight/intel_oaktrail/ @@ -38,18 +24,18 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> #include <linux/acpi.h> -#include <linux/fb.h> -#include <linux/mutex.h> +#include <linux/backlight.h> +#include <linux/dmi.h> #include <linux/err.h> +#include <linux/fb.h> #include <linux/i2c.h> -#include <linux/backlight.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> #include <linux/platform_device.h> -#include <linux/dmi.h> #include <linux/rfkill.h> + #include <acpi/video.h> #define DRIVER_NAME "intel_oaktrail" diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 2d272a3e0176..6b31d410cb09 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel Core SoC Power Management Controller Driver * @@ -6,16 +7,6 @@ * * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com> * Vishwanath Somayaji <vishwanath.somayaji@intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h index 93a7e99e1f8b..be045348ad86 100644 --- a/drivers/platform/x86/intel_pmc_core.h +++ b/drivers/platform/x86/intel_pmc_core.h @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel Core SoC Power Management Controller Header File * @@ -6,16 +7,6 @@ * * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com> * Vishwanath Somayaji <vishwanath.somayaji@intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * */ #ifndef PMC_CORE_H diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index e7edc8c63936..7964ba22ef8d 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -1,39 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * intel_pmc_ipc.c: Driver for the Intel PMC IPC mechanism + * Driver for the Intel PMC IPC mechanism * * (C) Copyright 2014-2015 Intel Corporation * - * This driver is based on Intel SCU IPC driver(intel_scu_opc.c) by + * This driver is based on Intel SCU IPC driver(intel_scu_ipc.c) by * Sreedhara DS <sreedhara.ds@intel.com> * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. - * * PMC running in ARC processor communicates with other entity running in IA * core through IPC mechanism which in turn messaging between IA core ad PMC. */ -#include <linux/module.h> +#include <linux/acpi.h> +#include <linux/atomic.h> +#include <linux/bitops.h> #include <linux/delay.h> -#include <linux/errno.h> -#include <linux/init.h> #include <linux/device.h> -#include <linux/pm.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/notifier.h> #include <linux/pci.h> #include <linux/platform_device.h> -#include <linux/interrupt.h> +#include <linux/pm.h> #include <linux/pm_qos.h> -#include <linux/kernel.h> -#include <linux/bitops.h> #include <linux/sched.h> -#include <linux/atomic.h> -#include <linux/notifier.h> -#include <linux/suspend.h> -#include <linux/acpi.h> -#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/spinlock.h> +#include <linux/suspend.h> #include <asm/intel_pmc_ipc.h> @@ -1029,7 +1024,7 @@ static void __exit intel_pmc_ipc_exit(void) MODULE_AUTHOR("Zha Qipeng <qipeng.zha@intel.com>"); MODULE_DESCRIPTION("Intel PMC IPC driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); /* Some modules are dependent on this, so init earlier */ fs_initcall(intel_pmc_ipc_init); diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c index 2efeab650345..79671927f4ef 100644 --- a/drivers/platform/x86/intel_punit_ipc.c +++ b/drivers/platform/x86/intel_punit_ipc.c @@ -1,25 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for the Intel P-Unit Mailbox IPC mechanism * * (C) Copyright 2015 Intel Corporation * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * The heart of the P-Unit is the Foxton microcontroller and its firmware, * which provide mailbox interface for power management usage. */ -#include <linux/module.h> -#include <linux/mod_devicetable.h> #include <linux/acpi.h> -#include <linux/delay.h> #include <linux/bitops.h> +#include <linux/delay.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> #include <linux/platform_device.h> + #include <asm/intel_punit_ipc.h> /* IPC Mailbox registers */ diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 75c8fef7a482..cdab916fbf92 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism + * Driver for the Intel SCU IPC mechanism * * (C) Copyright 2008-2010,2015 Intel Corporation * Author: Sreedhara DS (sreedhara.ds@intel.com) * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. - * * SCU running in ARC processor communicates with other entity running in IA * core through IPC mechanism which in turn messaging between IA core ad SCU. * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and @@ -16,14 +12,16 @@ * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC) * along with other APIs. */ + #include <linux/delay.h> +#include <linux/device.h> #include <linux/errno.h> #include <linux/init.h> -#include <linux/device.h> -#include <linux/pm.h> -#include <linux/pci.h> #include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/pm.h> #include <linux/sfi.h> + #include <asm/intel-mid.h> #include <asm/intel_scu_ipc.h> diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c index aa454241489c..8afe6fa06d7b 100644 --- a/drivers/platform/x86/intel_scu_ipcutil.c +++ b/drivers/platform/x86/intel_scu_ipcutil.c @@ -1,32 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism + * Driver for the Intel SCU IPC mechanism * * (C) Copyright 2008-2010 Intel Corporation * Author: Sreedhara DS (sreedhara.ds@intel.com) * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. - * - * This driver provides ioctl interfaces to call intel scu ipc driver api + * This driver provides IOCTL interfaces to call Intel SCU IPC driver API. */ -#include <linux/module.h> -#include <linux/kernel.h> #include <linux/errno.h> -#include <linux/types.h> -#include <linux/fs.h> #include <linux/fcntl.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/sched.h> -#include <linux/uaccess.h> #include <linux/slab.h> -#include <linux/init.h> +#include <linux/types.h> +#include <linux/uaccess.h> + #include <asm/intel_scu_ipc.h> static int major; -/* ioctl commnds */ +/* IOCTL commands */ #define INTE_SCU_IPC_REGISTER_READ 0 #define INTE_SCU_IPC_REGISTER_WRITE 1 #define INTE_SCU_IPC_REGISTER_UPDATE 2 diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c index f378621b5fe9..d4040bb222b4 100644 --- a/drivers/platform/x86/intel_telemetry_core.c +++ b/drivers/platform/x86/intel_telemetry_core.c @@ -1,17 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel SoC Core Telemetry Driver * Copyright (C) 2015, Intel Corporation. * All Rights Reserved. * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * * Telemetry Framework provides platform related PM and performance statistics. * This file provides the core telemetry API implementation. */ @@ -460,4 +452,4 @@ module_exit(telemetry_module_exit); MODULE_AUTHOR("Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>"); MODULE_DESCRIPTION("Intel SoC Telemetry Interface"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index cee08f236292..40bce560eb30 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -1,17 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel SOC Telemetry debugfs Driver: Currently supports APL * Copyright (c) 2015, Intel Corporation. * All Rights Reserved. * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * * This file provides the debugfs interfaces for telemetry. * /sys/kernel/debug/telemetry/pss_info: Shows Primary Control Sub-Sys Counters * /sys/kernel/debug/telemetry/ioss_info: Shows IO Sub-System Counters @@ -72,9 +64,6 @@ #define TELEM_IOSS_DX_D0IX_EVTS 25 #define TELEM_IOSS_PG_EVTS 30 -#define TELEM_DEBUGFS_CPU(model, data) \ - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data} - #define TELEM_CHECK_AND_PARSE_EVTS(EVTID, EVTNUM, BUF, EVTLOG, EVTDAT, MASK) { \ if (evtlog[index].telem_evtid == (EVTID)) { \ for (idx = 0; idx < (EVTNUM); idx++) \ @@ -319,8 +308,8 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = { }; static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = { - TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_debugfs_conf), - TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, telem_apl_debugfs_conf), + INTEL_CPU_FAM6(ATOM_GOLDMONT, telem_apl_debugfs_conf), + INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, telem_apl_debugfs_conf), {} }; @@ -951,12 +940,16 @@ static int __init telemetry_debugfs_init(void) debugfs_conf = (struct telemetry_debugfs_conf *)id->driver_data; err = telemetry_pltconfig_valid(); - if (err < 0) + if (err < 0) { + pr_info("Invalid pltconfig, ensure IPC1 device is enabled in BIOS\n"); return -ENODEV; + } err = telemetry_debugfs_check_evts(); - if (err < 0) + if (err < 0) { + pr_info("telemetry_debugfs_check_evts failed\n"); return -EINVAL; + } register_pm_notifier(&pm_notifier); @@ -1037,4 +1030,4 @@ module_exit(telemetry_debugfs_exit); MODULE_AUTHOR("Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>"); MODULE_DESCRIPTION("Intel SoC Telemetry debugfs Interface"); MODULE_VERSION(DRIVER_VERSION); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c index fcc6bee51a42..df8565bad595 100644 --- a/drivers/platform/x86/intel_telemetry_pltdrv.c +++ b/drivers/platform/x86/intel_telemetry_pltdrv.c @@ -1,17 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel SOC Telemetry Platform Driver: Currently supports APL * Copyright (c) 2015, Intel Corporation. * All Rights Reserved. * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * * This file provides the platform specific telemetry implementation for APL. * It used the PUNIT and PMC IPC interfaces for configuring the counters. * The accumulated results are fetched from SRAM. @@ -1242,4 +1234,4 @@ module_exit(telemetry_module_exit); MODULE_AUTHOR("Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>"); MODULE_DESCRIPTION("Intel SoC Telemetry Platform Driver"); MODULE_VERSION(DRIVER_VERSION); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_turbo_max_3.c b/drivers/platform/x86/intel_turbo_max_3.c index a6d5aa0c3c47..7b9cc841ab65 100644 --- a/drivers/platform/x86/intel_turbo_max_3.c +++ b/drivers/platform/x86/intel_turbo_max_3.c @@ -1,28 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver * Copyright (c) 2017, Intel Corporation. * All rights reserved. * * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/kernel.h> +#include <linux/cpufeature.h> +#include <linux/cpuhotplug.h> #include <linux/init.h> +#include <linux/kernel.h> #include <linux/topology.h> #include <linux/workqueue.h> -#include <linux/cpuhotplug.h> -#include <linux/cpufeature.h> + #include <asm/cpu_device_id.h> #include <asm/intel-family.h> diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c new file mode 100644 index 000000000000..c0bb1f864dfe --- /dev/null +++ b/drivers/platform/x86/lg-laptop.c @@ -0,0 +1,700 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * lg-laptop.c - LG Gram ACPI features and hotkeys Driver + * + * Copyright (C) 2018 Matan Ziv-Av <matan@svgalib.org> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/input.h> +#include <linux/input/sparse-keymap.h> +#include <linux/kernel.h> +#include <linux/leds.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +#define LED_DEVICE(_name, max) struct led_classdev _name = { \ + .name = __stringify(_name), \ + .max_brightness = max, \ + .brightness_set = _name##_set, \ + .brightness_get = _name##_get, \ +} + +MODULE_AUTHOR("Matan Ziv-Av"); +MODULE_DESCRIPTION("LG WMI Hotkey Driver"); +MODULE_LICENSE("GPL"); + +#define WMI_EVENT_GUID0 "E4FB94F9-7F2B-4173-AD1A-CD1D95086248" +#define WMI_EVENT_GUID1 "023B133E-49D1-4E10-B313-698220140DC2" +#define WMI_EVENT_GUID2 "37BE1AC0-C3F2-4B1F-BFBE-8FDEAF2814D6" +#define WMI_EVENT_GUID3 "911BAD44-7DF8-4FBB-9319-BABA1C4B293B" +#define WMI_METHOD_WMAB "C3A72B38-D3EF-42D3-8CBB-D5A57049F66D" +#define WMI_METHOD_WMBB "2B4F501A-BD3C-4394-8DCF-00A7D2BC8210" +#define WMI_EVENT_GUID WMI_EVENT_GUID0 + +#define WMAB_METHOD "\\XINI.WMAB" +#define WMBB_METHOD "\\XINI.WMBB" +#define SB_GGOV_METHOD "\\_SB.GGOV" +#define GOV_TLED 0x2020008 +#define WM_GET 1 +#define WM_SET 2 +#define WM_KEY_LIGHT 0x400 +#define WM_TLED 0x404 +#define WM_FN_LOCK 0x407 +#define WM_BATT_LIMIT 0x61 +#define WM_READER_MODE 0xBF +#define WM_FAN_MODE 0x33 +#define WMBB_USB_CHARGE 0x10B +#define WMBB_BATT_LIMIT 0x10C + +#define PLATFORM_NAME "lg-laptop" + +MODULE_ALIAS("wmi:" WMI_EVENT_GUID0); +MODULE_ALIAS("wmi:" WMI_EVENT_GUID1); +MODULE_ALIAS("wmi:" WMI_EVENT_GUID2); +MODULE_ALIAS("wmi:" WMI_EVENT_GUID3); +MODULE_ALIAS("wmi:" WMI_METHOD_WMAB); +MODULE_ALIAS("wmi:" WMI_METHOD_WMBB); +MODULE_ALIAS("acpi*:LGEX0815:*"); + +static struct platform_device *pf_device; +static struct input_dev *wmi_input_dev; + +static u32 inited; +#define INIT_INPUT_WMI_0 0x01 +#define INIT_INPUT_WMI_2 0x02 +#define INIT_INPUT_ACPI 0x04 +#define INIT_TPAD_LED 0x08 +#define INIT_KBD_LED 0x10 +#define INIT_SPARSE_KEYMAP 0x80 + +static const struct key_entry wmi_keymap[] = { + {KE_KEY, 0x70, {KEY_F15} }, /* LG control panel (F1) */ + {KE_KEY, 0x74, {KEY_F13} }, /* Touchpad toggle (F5) */ + {KE_KEY, 0xf020000, {KEY_F14} }, /* Read mode (F9) */ + {KE_KEY, 0x10000000, {KEY_F16} },/* Keyboard backlight (F8) - pressing + * this key both sends an event and + * changes backlight level. + */ + {KE_KEY, 0x80, {KEY_RFKILL} }, + {KE_END, 0} +}; + +static int ggov(u32 arg0) +{ + union acpi_object args[1]; + union acpi_object *r; + acpi_status status; + acpi_handle handle; + struct acpi_object_list arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + int res; + + args[0].type = ACPI_TYPE_INTEGER; + args[0].integer.value = arg0; + + status = acpi_get_handle(NULL, (acpi_string) SB_GGOV_METHOD, &handle); + if (ACPI_FAILURE(status)) { + pr_err("Cannot get handle"); + return -ENODEV; + } + + arg.count = 1; + arg.pointer = args; + + status = acpi_evaluate_object(handle, NULL, &arg, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "GGOV: call failed.\n"); + return -EINVAL; + } + + r = buffer.pointer; + if (r->type != ACPI_TYPE_INTEGER) { + kfree(r); + return -EINVAL; + } + + res = r->integer.value; + kfree(r); + + return res; +} + +static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2) +{ + union acpi_object args[3]; + acpi_status status; + acpi_handle handle; + struct acpi_object_list arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + + args[0].type = ACPI_TYPE_INTEGER; + args[0].integer.value = method; + args[1].type = ACPI_TYPE_INTEGER; + args[1].integer.value = arg1; + args[2].type = ACPI_TYPE_INTEGER; + args[2].integer.value = arg2; + + status = acpi_get_handle(NULL, (acpi_string) WMAB_METHOD, &handle); + if (ACPI_FAILURE(status)) { + pr_err("Cannot get handle"); + return NULL; + } + + arg.count = 3; + arg.pointer = args; + + status = acpi_evaluate_object(handle, NULL, &arg, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "WMAB: call failed.\n"); + return NULL; + } + + return buffer.pointer; +} + +static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2) +{ + union acpi_object args[3]; + acpi_status status; + acpi_handle handle; + struct acpi_object_list arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + u8 buf[32]; + + *(u32 *)buf = method_id; + *(u32 *)(buf + 4) = arg1; + *(u32 *)(buf + 16) = arg2; + args[0].type = ACPI_TYPE_INTEGER; + args[0].integer.value = 0; /* ignored */ + args[1].type = ACPI_TYPE_INTEGER; + args[1].integer.value = 1; /* Must be 1 or 2. Does not matter which */ + args[2].type = ACPI_TYPE_BUFFER; + args[2].buffer.length = 32; + args[2].buffer.pointer = buf; + + status = acpi_get_handle(NULL, (acpi_string)WMBB_METHOD, &handle); + if (ACPI_FAILURE(status)) { + pr_err("Cannot get handle"); + return NULL; + } + + arg.count = 3; + arg.pointer = args; + + status = acpi_evaluate_object(handle, NULL, &arg, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "WMAB: call failed.\n"); + return NULL; + } + + return (union acpi_object *)buffer.pointer; +} + +static void wmi_notify(u32 value, void *context) +{ + struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + acpi_status status; + long data = (long)context; + + pr_debug("event guid %li\n", data); + status = wmi_get_event_data(value, &response); + if (ACPI_FAILURE(status)) { + pr_err("Bad event status 0x%x\n", status); + return; + } + + obj = (union acpi_object *)response.pointer; + if (!obj) + return; + + if (obj->type == ACPI_TYPE_INTEGER) { + int eventcode = obj->integer.value; + struct key_entry *key; + + key = + sparse_keymap_entry_from_scancode(wmi_input_dev, eventcode); + if (key && key->type == KE_KEY) + sparse_keymap_report_entry(wmi_input_dev, key, 1, true); + } + + pr_debug("Type: %i Eventcode: 0x%llx\n", obj->type, + obj->integer.value); + kfree(response.pointer); +} + +static void wmi_input_setup(void) +{ + acpi_status status; + + wmi_input_dev = input_allocate_device(); + if (wmi_input_dev) { + wmi_input_dev->name = "LG WMI hotkeys"; + wmi_input_dev->phys = "wmi/input0"; + wmi_input_dev->id.bustype = BUS_HOST; + + if (sparse_keymap_setup(wmi_input_dev, wmi_keymap, NULL) || + input_register_device(wmi_input_dev)) { + pr_info("Cannot initialize input device"); + input_free_device(wmi_input_dev); + return; + } + + inited |= INIT_SPARSE_KEYMAP; + status = wmi_install_notify_handler(WMI_EVENT_GUID0, wmi_notify, + (void *)0); + if (ACPI_SUCCESS(status)) + inited |= INIT_INPUT_WMI_0; + + status = wmi_install_notify_handler(WMI_EVENT_GUID2, wmi_notify, + (void *)2); + if (ACPI_SUCCESS(status)) + inited |= INIT_INPUT_WMI_2; + } else { + pr_info("Cannot allocate input device"); + } +} + +static void acpi_notify(struct acpi_device *device, u32 event) +{ + struct key_entry *key; + + acpi_handle_debug(device->handle, "notify: %d\n", event); + if (inited & INIT_SPARSE_KEYMAP) { + key = sparse_keymap_entry_from_scancode(wmi_input_dev, 0x80); + if (key && key->type == KE_KEY) + sparse_keymap_report_entry(wmi_input_dev, key, 1, true); + } +} + +static ssize_t fan_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + bool value; + union acpi_object *r; + u32 m; + int ret; + + ret = kstrtobool(buffer, &value); + if (ret) + return ret; + + r = lg_wmab(WM_FAN_MODE, WM_GET, 0); + if (!r) + return -EIO; + + if (r->type != ACPI_TYPE_INTEGER) { + kfree(r); + return -EIO; + } + + m = r->integer.value; + kfree(r); + r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4)); + kfree(r); + r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value); + kfree(r); + + return count; +} + +static ssize_t fan_mode_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + unsigned int status; + union acpi_object *r; + + r = lg_wmab(WM_FAN_MODE, WM_GET, 0); + if (!r) + return -EIO; + + if (r->type != ACPI_TYPE_INTEGER) { + kfree(r); + return -EIO; + } + + status = r->integer.value & 0x01; + kfree(r); + + return snprintf(buffer, PAGE_SIZE, "%d\n", status); +} + +static ssize_t usb_charge_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + bool value; + union acpi_object *r; + int ret; + + ret = kstrtobool(buffer, &value); + if (ret) + return ret; + + r = lg_wmbb(WMBB_USB_CHARGE, WM_SET, value); + if (!r) + return -EIO; + + kfree(r); + return count; +} + +static ssize_t usb_charge_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + unsigned int status; + union acpi_object *r; + + r = lg_wmbb(WMBB_USB_CHARGE, WM_GET, 0); + if (!r) + return -EIO; + + if (r->type != ACPI_TYPE_BUFFER) { + kfree(r); + return -EIO; + } + + status = !!r->buffer.pointer[0x10]; + + kfree(r); + + return snprintf(buffer, PAGE_SIZE, "%d\n", status); +} + +static ssize_t reader_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + bool value; + union acpi_object *r; + int ret; + + ret = kstrtobool(buffer, &value); + if (ret) + return ret; + + r = lg_wmab(WM_READER_MODE, WM_SET, value); + if (!r) + return -EIO; + + kfree(r); + return count; +} + +static ssize_t reader_mode_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + unsigned int status; + union acpi_object *r; + + r = lg_wmab(WM_READER_MODE, WM_GET, 0); + if (!r) + return -EIO; + + if (r->type != ACPI_TYPE_INTEGER) { + kfree(r); + return -EIO; + } + + status = !!r->integer.value; + + kfree(r); + + return snprintf(buffer, PAGE_SIZE, "%d\n", status); +} + +static ssize_t fn_lock_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + bool value; + union acpi_object *r; + int ret; + + ret = kstrtobool(buffer, &value); + if (ret) + return ret; + + r = lg_wmab(WM_FN_LOCK, WM_SET, value); + if (!r) + return -EIO; + + kfree(r); + return count; +} + +static ssize_t fn_lock_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + unsigned int status; + union acpi_object *r; + + r = lg_wmab(WM_FN_LOCK, WM_GET, 0); + if (!r) + return -EIO; + + if (r->type != ACPI_TYPE_BUFFER) { + kfree(r); + return -EIO; + } + + status = !!r->buffer.pointer[0]; + kfree(r); + + return snprintf(buffer, PAGE_SIZE, "%d\n", status); +} + +static ssize_t battery_care_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + unsigned long value; + int ret; + + ret = kstrtoul(buffer, 10, &value); + if (ret) + return ret; + + if (value == 100 || value == 80) { + union acpi_object *r; + + r = lg_wmab(WM_BATT_LIMIT, WM_SET, value); + if (!r) + return -EIO; + + kfree(r); + return count; + } + + return -EINVAL; +} + +static ssize_t battery_care_limit_show(struct device *dev, + struct device_attribute *attr, + char *buffer) +{ + unsigned int status; + union acpi_object *r; + + r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0); + if (!r) + return -EIO; + + if (r->type != ACPI_TYPE_INTEGER) { + kfree(r); + return -EIO; + } + + status = r->integer.value; + kfree(r); + if (status != 80 && status != 100) + status = 0; + + return snprintf(buffer, PAGE_SIZE, "%d\n", status); +} + +static DEVICE_ATTR_RW(fan_mode); +static DEVICE_ATTR_RW(usb_charge); +static DEVICE_ATTR_RW(reader_mode); +static DEVICE_ATTR_RW(fn_lock); +static DEVICE_ATTR_RW(battery_care_limit); + +static struct attribute *dev_attributes[] = { + &dev_attr_fan_mode.attr, + &dev_attr_usb_charge.attr, + &dev_attr_reader_mode.attr, + &dev_attr_fn_lock.attr, + &dev_attr_battery_care_limit.attr, + NULL +}; + +static const struct attribute_group dev_attribute_group = { + .attrs = dev_attributes, +}; + +static void tpad_led_set(struct led_classdev *cdev, + enum led_brightness brightness) +{ + union acpi_object *r; + + r = lg_wmab(WM_TLED, WM_SET, brightness > LED_OFF); + kfree(r); +} + +static enum led_brightness tpad_led_get(struct led_classdev *cdev) +{ + return ggov(GOV_TLED) > 0 ? LED_ON : LED_OFF; +} + +static LED_DEVICE(tpad_led, 1); + +static void kbd_backlight_set(struct led_classdev *cdev, + enum led_brightness brightness) +{ + u32 val; + union acpi_object *r; + + val = 0x22; + if (brightness <= LED_OFF) + val = 0; + if (brightness >= LED_FULL) + val = 0x24; + r = lg_wmab(WM_KEY_LIGHT, WM_SET, val); + kfree(r); +} + +static enum led_brightness kbd_backlight_get(struct led_classdev *cdev) +{ + union acpi_object *r; + int val; + + r = lg_wmab(WM_KEY_LIGHT, WM_GET, 0); + + if (!r) + return LED_OFF; + + if (r->type != ACPI_TYPE_BUFFER || r->buffer.pointer[1] != 0x05) { + kfree(r); + return LED_OFF; + } + + switch (r->buffer.pointer[0] & 0x27) { + case 0x24: + val = LED_FULL; + break; + case 0x22: + val = LED_HALF; + break; + default: + val = LED_OFF; + } + + kfree(r); + + return val; +} + +static LED_DEVICE(kbd_backlight, 255); + +static void wmi_input_destroy(void) +{ + if (inited & INIT_INPUT_WMI_2) + wmi_remove_notify_handler(WMI_EVENT_GUID2); + + if (inited & INIT_INPUT_WMI_0) + wmi_remove_notify_handler(WMI_EVENT_GUID0); + + if (inited & INIT_SPARSE_KEYMAP) + input_unregister_device(wmi_input_dev); + + inited &= ~(INIT_INPUT_WMI_0 | INIT_INPUT_WMI_2 | INIT_SPARSE_KEYMAP); +} + +static struct platform_driver pf_driver = { + .driver = { + .name = PLATFORM_NAME, + } +}; + +static int acpi_add(struct acpi_device *device) +{ + int ret; + + if (pf_device) + return 0; + + ret = platform_driver_register(&pf_driver); + if (ret) + return ret; + + pf_device = platform_device_register_simple(PLATFORM_NAME, + PLATFORM_DEVID_NONE, + NULL, 0); + if (IS_ERR(pf_device)) { + ret = PTR_ERR(pf_device); + pf_device = NULL; + pr_err("unable to register platform device\n"); + goto out_platform_registered; + } + + ret = sysfs_create_group(&pf_device->dev.kobj, &dev_attribute_group); + if (ret) + goto out_platform_device; + + if (!led_classdev_register(&pf_device->dev, &kbd_backlight)) + inited |= INIT_KBD_LED; + + if (!led_classdev_register(&pf_device->dev, &tpad_led)) + inited |= INIT_TPAD_LED; + + wmi_input_setup(); + + return 0; + +out_platform_device: + platform_device_unregister(pf_device); +out_platform_registered: + platform_driver_unregister(&pf_driver); + return ret; +} + +static int acpi_remove(struct acpi_device *device) +{ + sysfs_remove_group(&pf_device->dev.kobj, &dev_attribute_group); + if (inited & INIT_KBD_LED) + led_classdev_unregister(&kbd_backlight); + + if (inited & INIT_TPAD_LED) + led_classdev_unregister(&tpad_led); + + wmi_input_destroy(); + platform_device_unregister(pf_device); + pf_device = NULL; + platform_driver_unregister(&pf_driver); + + return 0; +} + +static const struct acpi_device_id device_ids[] = { + {"LGEX0815", 0}, + {"", 0} +}; +MODULE_DEVICE_TABLE(acpi, device_ids); + +static struct acpi_driver acpi_driver = { + .name = "LG Gram Laptop Support", + .class = "lg-laptop", + .ids = device_ids, + .ops = { + .add = acpi_add, + .remove = acpi_remove, + .notify = acpi_notify, + }, + .owner = THIS_MODULE, +}; + +static int __init acpi_init(void) +{ + int result; + + result = acpi_bus_register_driver(&acpi_driver); + if (result < 0) { + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error registering driver\n")); + return -ENODEV; + } + + return 0; +} + +static void __exit acpi_exit(void) +{ + acpi_bus_unregister_driver(&acpi_driver); +} + +module_init(acpi_init); +module_exit(acpi_exit); diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index d89936c93ba0..c2c3a1a19879 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -575,7 +575,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_msn201x_items[] = { static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn201x_data = { - .items = mlxplat_mlxcpld_msn21xx_items, + .items = mlxplat_mlxcpld_msn201x_items, .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_items), .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, .mask = MLXPLAT_CPLD_AGGR_MASK_DEF, diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index cb204f973491..5f2d7ea912b5 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -42,10 +42,13 @@ static const struct ts_dmi_data chuwi_hi8_data = { }; static const struct property_entry chuwi_hi8_pro_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 6), + PROPERTY_ENTRY_U32("touchscreen-min-y", 3), PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -56,6 +59,8 @@ static const struct ts_dmi_data chuwi_hi8_pro_data = { }; static const struct property_entry chuwi_vi8_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 4), + PROPERTY_ENTRY_U32("touchscreen-min-y", 6), PROPERTY_ENTRY_U32("touchscreen-size-x", 1724), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), @@ -88,9 +93,9 @@ static const struct ts_dmi_data chuwi_vi10_data = { static const struct property_entry connect_tablet9_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 9), - PROPERTY_ENTRY_U32("touchscreen-min-y", 8), + PROPERTY_ENTRY_U32("touchscreen-min-y", 10), PROPERTY_ENTRY_U32("touchscreen-size-x", 1664), - PROPERTY_ENTRY_U32("touchscreen-size-y", 878), + PROPERTY_ENTRY_U32("touchscreen-size-y", 880), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-connect-tablet9.fw"), @@ -104,8 +109,10 @@ static const struct ts_dmi_data connect_tablet9_data = { }; static const struct property_entry cube_iwork8_air_props[] = { - PROPERTY_ENTRY_U32("touchscreen-size-x", 1660), - PROPERTY_ENTRY_U32("touchscreen-size-y", 900), + PROPERTY_ENTRY_U32("touchscreen-min-x", 1), + PROPERTY_ENTRY_U32("touchscreen-min-y", 3), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1664), + PROPERTY_ENTRY_U32("touchscreen-size-y", 896), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-cube-iwork8-air.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), @@ -179,11 +186,14 @@ static const struct ts_dmi_data gp_electronic_t701_data = { }; static const struct property_entry itworks_tw891_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 1), + PROPERTY_ENTRY_U32("touchscreen-min-y", 5), PROPERTY_ENTRY_U32("touchscreen-size-x", 1600), - PROPERTY_ENTRY_U32("touchscreen-size-y", 890), + PROPERTY_ENTRY_U32("touchscreen-size-y", 896), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -207,8 +217,10 @@ static const struct ts_dmi_data jumper_ezpad_6_pro_data = { }; static const struct property_entry jumper_ezpad_mini3_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 23), + PROPERTY_ENTRY_U32("touchscreen-min-y", 16), PROPERTY_ENTRY_U32("touchscreen-size-x", 1700), - PROPERTY_ENTRY_U32("touchscreen-size-y", 1150), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1138), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-jumper-ezpad-mini3.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), @@ -237,6 +249,24 @@ static const struct ts_dmi_data onda_obook_20_plus_data = { .properties = onda_obook_20_plus_props, }; +static const struct property_entry onda_v80_plus_v3_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 22), + PROPERTY_ENTRY_U32("touchscreen-min-y", 15), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1698), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_STRING("firmware-name", + "gsl3676-onda-v80-plus-v3.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + { } +}; + +static const struct ts_dmi_data onda_v80_plus_v3_data = { + .acpi_name = "MSSL1680:00", + .properties = onda_v80_plus_v3_props, +}; + static const struct property_entry onda_v820w_32g_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1665), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), @@ -322,11 +352,14 @@ static const struct ts_dmi_data pov_mobii_wintab_p800w_v20_data = { }; static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = { - PROPERTY_ENTRY_U32("touchscreen-size-x", 1800), - PROPERTY_ENTRY_U32("touchscreen-size-y", 1150), + PROPERTY_ENTRY_U32("touchscreen-min-x", 1), + PROPERTY_ENTRY_U32("touchscreen-min-y", 8), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1794), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -366,6 +399,22 @@ static const struct ts_dmi_data teclast_x98plus2_data = { .properties = teclast_x98plus2_props, }; +static const struct property_entry trekstor_primebook_c11_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 1970), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1530), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_STRING("firmware-name", + "gsl1680-trekstor-primebook-c11.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + { } +}; + +static const struct ts_dmi_data trekstor_primebook_c11_data = { + .acpi_name = "MSSL1680:00", + .properties = trekstor_primebook_c11_props, +}; + static const struct property_entry trekstor_primebook_c13_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2624), PROPERTY_ENTRY_U32("touchscreen-size-y", 1920), @@ -381,6 +430,22 @@ static const struct ts_dmi_data trekstor_primebook_c13_data = { .properties = trekstor_primebook_c13_props, }; +static const struct property_entry trekstor_primetab_t13b_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 2500), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1900), + PROPERTY_ENTRY_STRING("firmware-name", + "gsl1680-trekstor-primetab-t13b.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + { } +}; + +static const struct ts_dmi_data trekstor_primetab_t13b_data = { + .acpi_name = "MSSL1680:00", + .properties = trekstor_primetab_t13b_props, +}; + static const struct property_entry trekstor_surftab_twin_10_1_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1900), PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), @@ -397,6 +462,8 @@ static const struct ts_dmi_data trekstor_surftab_twin_10_1_data = { }; static const struct property_entry trekstor_surftab_wintron70_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 12), + PROPERTY_ENTRY_U32("touchscreen-min-y", 8), PROPERTY_ENTRY_U32("touchscreen-size-x", 884), PROPERTY_ENTRY_U32("touchscreen-size-y", 632), PROPERTY_ENTRY_STRING("firmware-name", @@ -556,6 +623,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* ONDA V80 plus v3 (P80PSBG9V3A01501) */ + .driver_data = (void *)&onda_v80_plus_v3_data, + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONDA"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "V80 PLUS") + }, + }, + { /* ONDA V820w DualOS */ .driver_data = (void *)&onda_v820w_32g_data, .matches = { @@ -641,6 +716,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* Trekstor Primebook C11 */ + .driver_data = (void *)&trekstor_primebook_c11_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"), + DMI_MATCH(DMI_PRODUCT_NAME, "Primebook C11"), + }, + }, + { /* Trekstor Primebook C13 */ .driver_data = (void *)&trekstor_primebook_c13_data, .matches = { @@ -649,6 +732,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* Trekstor Primetab T13B */ + .driver_data = (void *)&trekstor_primetab_t13b_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"), + DMI_MATCH(DMI_PRODUCT_NAME, "Primetab T13B"), + }, + }, + { /* TrekStor SurfTab twin 10.1 ST10432-8 */ .driver_data = (void *)&trekstor_surftab_twin_10_1_data, .matches = { diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 04791ea5d97b..bea35be68706 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -987,19 +987,19 @@ static struct bus_type wmi_bus_type = { .remove = wmi_dev_remove, }; -static struct device_type wmi_type_event = { +static const struct device_type wmi_type_event = { .name = "event", .groups = wmi_event_groups, .release = wmi_dev_release, }; -static struct device_type wmi_type_method = { +static const struct device_type wmi_type_method = { .name = "method", .groups = wmi_method_groups, .release = wmi_dev_release, }; -static struct device_type wmi_type_data = { +static const struct device_type wmi_type_data = { .name = "data", .groups = wmi_data_groups, .release = wmi_dev_release, diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 504d252716f2..27e5dd47a01f 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -447,10 +447,9 @@ config PWM_TEGRA config PWM_TIECAP tristate "ECAP PWM support" - depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE + depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3 help - PWM driver support for the ECAP APWM controller found on AM33XX - TI SOC + PWM driver support for the ECAP APWM controller found on TI SOCs To compile this driver as a module, choose M here: the module will be called pwm-tiecap. diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c index 5561b9e190f8..757230e1f575 100644 --- a/drivers/pwm/pwm-lpss-platform.c +++ b/drivers/pwm/pwm-lpss-platform.c @@ -30,6 +30,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = { .clk_rate = 19200000, .npwm = 1, .base_unit_bits = 16, + .other_devices_aml_touches_pwm_regs = true, }; /* Broxton */ @@ -60,6 +61,7 @@ static int pwm_lpss_probe_platform(struct platform_device *pdev) platform_set_drvdata(pdev, lpwm); + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); @@ -74,13 +76,29 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev) return pwm_lpss_remove(lpwm); } -static SIMPLE_DEV_PM_OPS(pwm_lpss_platform_pm_ops, - pwm_lpss_suspend, - pwm_lpss_resume); +static int pwm_lpss_prepare(struct device *dev) +{ + struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev); + + /* + * If other device's AML code touches the PWM regs on suspend/resume + * force runtime-resume the PWM controller to allow this. + */ + if (lpwm->info->other_devices_aml_touches_pwm_regs) + return 0; /* Force runtime-resume */ + + return 1; /* If runtime-suspended leave as is */ +} + +static const struct dev_pm_ops pwm_lpss_platform_pm_ops = { + .prepare = pwm_lpss_prepare, + SET_SYSTEM_SLEEP_PM_OPS(pwm_lpss_suspend, pwm_lpss_resume) +}; static const struct acpi_device_id pwm_lpss_acpi_match[] = { { "80860F09", (unsigned long)&pwm_lpss_byt_info }, { "80862288", (unsigned long)&pwm_lpss_bsw_info }, + { "80862289", (unsigned long)&pwm_lpss_bsw_info }, { "80865AC8", (unsigned long)&pwm_lpss_bxt_info }, { }, }; diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 4721a264bac2..2ac3a2aa9e53 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -32,15 +32,6 @@ /* Size of each PWM register space if multiple */ #define PWM_SIZE 0x400 -#define MAX_PWMS 4 - -struct pwm_lpss_chip { - struct pwm_chip chip; - void __iomem *regs; - const struct pwm_lpss_boardinfo *info; - u32 saved_ctrl[MAX_PWMS]; -}; - static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip) { return container_of(chip, struct pwm_lpss_chip, chip); @@ -97,7 +88,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, unsigned long long on_time_div; unsigned long c = lpwm->info->clk_rate, base_unit_range; unsigned long long base_unit, freq = NSEC_PER_SEC; - u32 ctrl; + u32 orig_ctrl, ctrl; do_div(freq, period_ns); @@ -114,13 +105,17 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, do_div(on_time_div, period_ns); on_time_div = 255ULL - on_time_div; - ctrl = pwm_lpss_read(pwm); + orig_ctrl = ctrl = pwm_lpss_read(pwm); ctrl &= ~PWM_ON_TIME_DIV_MASK; ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT); base_unit &= base_unit_range; ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT; ctrl |= on_time_div; - pwm_lpss_write(pwm, ctrl); + + if (orig_ctrl != ctrl) { + pwm_lpss_write(pwm, ctrl); + pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE); + } } static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond) @@ -144,7 +139,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, return ret; } pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); - pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false); ret = pwm_lpss_wait_for_update(pwm); if (ret) { @@ -157,7 +151,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, if (ret) return ret; pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); - pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); return pwm_lpss_wait_for_update(pwm); } } else if (pwm_is_enabled(pwm)) { @@ -168,8 +161,42 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } +/* This function gets called once from pwmchip_add to get the initial state */ +static void pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct pwm_lpss_chip *lpwm = to_lpwm(chip); + unsigned long base_unit_range; + unsigned long long base_unit, freq, on_time_div; + u32 ctrl; + + base_unit_range = BIT(lpwm->info->base_unit_bits); + + ctrl = pwm_lpss_read(pwm); + on_time_div = 255 - (ctrl & PWM_ON_TIME_DIV_MASK); + base_unit = (ctrl >> PWM_BASE_UNIT_SHIFT) & (base_unit_range - 1); + + freq = base_unit * lpwm->info->clk_rate; + do_div(freq, base_unit_range); + if (freq == 0) + state->period = NSEC_PER_SEC; + else + state->period = NSEC_PER_SEC / (unsigned long)freq; + + on_time_div *= state->period; + do_div(on_time_div, 255); + state->duty_cycle = on_time_div; + + state->polarity = PWM_POLARITY_NORMAL; + state->enabled = !!(ctrl & PWM_ENABLE); + + if (state->enabled) + pm_runtime_get(chip->dev); +} + static const struct pwm_ops pwm_lpss_ops = { .apply = pwm_lpss_apply, + .get_state = pwm_lpss_get_state, .owner = THIS_MODULE, }; @@ -214,6 +241,12 @@ EXPORT_SYMBOL_GPL(pwm_lpss_probe); int pwm_lpss_remove(struct pwm_lpss_chip *lpwm) { + int i; + + for (i = 0; i < lpwm->info->npwm; i++) { + if (pwm_is_enabled(&lpwm->chip.pwms[i])) + pm_runtime_put(lpwm->chip.dev); + } return pwmchip_remove(&lpwm->chip); } EXPORT_SYMBOL_GPL(pwm_lpss_remove); diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h index 7a4238ad1fcb..3236be835bd9 100644 --- a/drivers/pwm/pwm-lpss.h +++ b/drivers/pwm/pwm-lpss.h @@ -16,13 +16,25 @@ #include <linux/device.h> #include <linux/pwm.h> -struct pwm_lpss_chip; +#define MAX_PWMS 4 + +struct pwm_lpss_chip { + struct pwm_chip chip; + void __iomem *regs; + const struct pwm_lpss_boardinfo *info; + u32 saved_ctrl[MAX_PWMS]; +}; struct pwm_lpss_boardinfo { unsigned long clk_rate; unsigned int npwm; unsigned long base_unit_bits; bool bypass; + /* + * On some devices the _PS0/_PS3 AML code of the GPU (GFX0) device + * messes with the PWM0 controllers state, + */ + bool other_devices_aml_touches_pwm_regs; }; struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index 748f614d5375..a41812fc6f95 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * R-Car PWM Timer driver * * Copyright (C) 2015 Renesas Electronics Corporation - * - * This is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. */ #include <linux/clk.h> diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c index 29267d12fb4c..4a855a21b782 100644 --- a/drivers/pwm/pwm-renesas-tpu.c +++ b/drivers/pwm/pwm-renesas-tpu.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * R-Mobile TPU PWM driver * * Copyright (C) 2012 Renesas Solutions Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/clk.h> diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index f8ebbece57b7..48c4595a0ffc 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -300,7 +300,6 @@ static const struct of_device_id tegra_pwm_of_match[] = { { .compatible = "nvidia,tegra186-pwm", .data = &tegra186_pwm_soc }, { } }; - MODULE_DEVICE_TABLE(of, tegra_pwm_of_match); static const struct dev_pm_ops tegra_pwm_pm_ops = { diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 7c71cdb8a9d8..ceb233dd6048 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -249,6 +249,7 @@ static void pwm_export_release(struct device *child) static int pwm_export_child(struct device *parent, struct pwm_device *pwm) { struct pwm_export *export; + char *pwm_prop[2]; int ret; if (test_and_set_bit(PWMF_EXPORTED, &pwm->flags)) @@ -263,7 +264,6 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm) export->pwm = pwm; mutex_init(&export->lock); - export->child.class = parent->class; export->child.release = pwm_export_release; export->child.parent = parent; export->child.devt = MKDEV(0, 0); @@ -277,6 +277,10 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm) export = NULL; return ret; } + pwm_prop[0] = kasprintf(GFP_KERNEL, "EXPORT=pwm%u", pwm->hwpwm); + pwm_prop[1] = NULL; + kobject_uevent_env(&parent->kobj, KOBJ_CHANGE, pwm_prop); + kfree(pwm_prop[0]); return 0; } @@ -289,6 +293,7 @@ static int pwm_unexport_match(struct device *child, void *data) static int pwm_unexport_child(struct device *parent, struct pwm_device *pwm) { struct device *child; + char *pwm_prop[2]; if (!test_and_clear_bit(PWMF_EXPORTED, &pwm->flags)) return -ENODEV; @@ -297,6 +302,11 @@ static int pwm_unexport_child(struct device *parent, struct pwm_device *pwm) if (!child) return -ENODEV; + pwm_prop[0] = kasprintf(GFP_KERNEL, "UNEXPORT=pwm%u", pwm->hwpwm); + pwm_prop[1] = NULL; + kobject_uevent_env(&parent->kobj, KOBJ_CHANGE, pwm_prop); + kfree(pwm_prop[0]); + /* for device_find_child() */ put_device(child); device_unregister(child); diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 05293babb031..2d655a97b959 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -143,7 +143,9 @@ static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int secon static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal); static int twa_reset_device_extension(TW_Device_Extension *tw_dev); static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); -static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); +static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, + unsigned char *cdb, int use_sg, + TW_SG_Entry *sglistarg); static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); @@ -278,7 +280,7 @@ out: static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) { int request_id = 0; - char cdb[TW_MAX_CDB_LEN]; + unsigned char cdb[TW_MAX_CDB_LEN]; TW_SG_Entry sglist[1]; int finished = 0, count = 0; TW_Command_Full *full_command_packet; @@ -423,7 +425,7 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H /* This function will read the aen queue from the isr */ static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) { - char cdb[TW_MAX_CDB_LEN]; + unsigned char cdb[TW_MAX_CDB_LEN]; TW_SG_Entry sglist[1]; TW_Command_Full *full_command_packet; int retval = 1; @@ -1798,7 +1800,9 @@ out: static DEF_SCSI_QCMD(twa_scsi_queue) /* This function hands scsi cdb's to the firmware */ -static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg) +static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, + unsigned char *cdb, int use_sg, + TW_SG_Entry *sglistarg) { TW_Command_Full *full_command_packet; TW_Command_Apache *command_packet; diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 266bdac75304..480cf82700e9 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -287,7 +287,9 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id) } /* End twl_post_command_packet() */ /* This function hands scsi cdb's to the firmware */ -static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) +static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, + unsigned char *cdb, int use_sg, + TW_SG_Entry_ISO *sglistarg) { TW_Command_Full *full_command_packet; TW_Command_Apache *command_packet; @@ -372,7 +374,7 @@ out: /* This function will read the aen queue from the isr */ static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) { - char cdb[TW_MAX_CDB_LEN]; + unsigned char cdb[TW_MAX_CDB_LEN]; TW_SG_Entry_ISO sglist[1]; TW_Command_Full *full_command_packet; int retval = 1; @@ -554,7 +556,7 @@ out: static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) { int request_id = 0; - char cdb[TW_MAX_CDB_LEN]; + unsigned char cdb[TW_MAX_CDB_LEN]; TW_SG_Entry_ISO sglist[1]; int finished = 0, count = 0; TW_Command_Full *full_command_packet; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 70988c381268..f07444d30b21 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -538,7 +538,7 @@ config SCSI_HPTIOP config SCSI_BUSLOGIC tristate "BusLogic SCSI support" - depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API && VIRT_TO_BUS + depends on (PCI || ISA) && SCSI && ISA_DMA_API && VIRT_TO_BUS ---help--- This is support for BusLogic MultiMaster and FlashPoint SCSI Host Adapters. Consult the SCSI-HOWTO, available from @@ -1175,12 +1175,12 @@ config SCSI_LPFC_DEBUG_FS config SCSI_SIM710 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" - depends on (EISA || MCA) && SCSI + depends on EISA && SCSI select SCSI_SPI_ATTRS ---help--- This driver is for NCR53c710 based SCSI host adapters. - It currently supports Compaq EISA cards and NCR MCA cards + It currently supports Compaq EISA cards. config SCSI_DC395x tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support" diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 4d7b0e0adbf7..301b3cad15f8 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -269,7 +269,7 @@ static LIST_HEAD(aha152x_host_list); /* DEFINES */ /* For PCMCIA cards, always use AUTOCONF */ -#if defined(PCMCIA) || defined(MODULE) +#if defined(AHA152X_PCMCIA) || defined(MODULE) #if !defined(AUTOCONF) #define AUTOCONF #endif @@ -297,7 +297,7 @@ CMD_INC_RESID(struct scsi_cmnd *cmd, int inc) #define DELAY_DEFAULT 1000 -#if defined(PCMCIA) +#if defined(AHA152X_PCMCIA) #define IRQ_MIN 0 #define IRQ_MAX 16 #else @@ -328,7 +328,7 @@ MODULE_AUTHOR("Jürgen Fischer"); MODULE_DESCRIPTION(AHA152X_REVID); MODULE_LICENSE("GPL"); -#if !defined(PCMCIA) +#if !defined(AHA152X_PCMCIA) #if defined(MODULE) static int io[] = {0, 0}; module_param_hw_array(io, int, ioport, NULL, 0); @@ -391,7 +391,7 @@ static struct isapnp_device_id id_table[] = { MODULE_DEVICE_TABLE(isapnp, id_table); #endif /* ISAPNP */ -#endif /* !PCMCIA */ +#endif /* !AHA152X_PCMCIA */ static struct scsi_host_template aha152x_driver_template; @@ -863,7 +863,7 @@ void aha152x_release(struct Scsi_Host *shpnt) if (shpnt->irq) free_irq(shpnt->irq, shpnt); -#if !defined(PCMCIA) +#if !defined(AHA152X_PCMCIA) if (shpnt->io_port) release_region(shpnt->io_port, IO_RANGE); #endif @@ -2924,7 +2924,7 @@ static struct scsi_host_template aha152x_driver_template = { .slave_alloc = aha152x_adjust_queue, }; -#if !defined(PCMCIA) +#if !defined(AHA152X_PCMCIA) static int setup_count; static struct aha152x_setup setup[2]; @@ -3392,4 +3392,4 @@ static int __init aha152x_setup(char *str) __setup("aha152x=", aha152x_setup); #endif -#endif /* !PCMCIA */ +#endif /* !AHA152X_PCMCIA */ diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 3df1428df317..311d23c727ce 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -790,12 +790,11 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf slot->n_elem = n_elem; slot->slot_tag = tag; - slot->buf = dma_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); + slot->buf = dma_pool_zalloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); if (!slot->buf) { rc = -ENOMEM; goto err_out_tag; } - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); tei.task = task; tei.hdr = &mvi->slot[tag]; @@ -1906,8 +1905,7 @@ static void mvs_work_queue(struct work_struct *work) if (phy->phy_event & PHY_PLUG_OUT) { u32 tmp; - struct sas_identify_frame *id; - id = (struct sas_identify_frame *)phy->frame_rcvd; + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); phy->phy_event &= ~PHY_PLUG_OUT; if (!(tmp & PHY_READY_MASK)) { diff --git a/drivers/scsi/pcmcia/aha152x_core.c b/drivers/scsi/pcmcia/aha152x_core.c index dba3716511c5..24b89228b241 100644 --- a/drivers/scsi/pcmcia/aha152x_core.c +++ b/drivers/scsi/pcmcia/aha152x_core.c @@ -1,3 +1,3 @@ -#define PCMCIA 1 +#define AHA152X_PCMCIA 1 #define AHA152X_STAT 1 #include "aha152x.c" diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index b28f159fdaee..0bb9ac6ece92 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -218,7 +218,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, mutex_lock(&ha->optrom_mutex); if (qla2x00_chip_is_down(vha)) { - mutex_unlock(&vha->hw->optrom_mutex); + mutex_unlock(&ha->optrom_mutex); return -EAGAIN; } diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index c72d8012fe2a..6fe20c27acc1 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -425,7 +425,7 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) __qla24xx_handle_gpdb_event(vha, ea); } -int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) +static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; @@ -680,7 +680,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, fcport); break; } - /* drop through */ + /* fall through */ default: if (fcport_is_smaller(fcport)) { /* local adapter is bigger */ @@ -1551,7 +1551,8 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, } -void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea) +static void qla_handle_els_plogi_done(scsi_qla_host_t *vha, + struct event_arg *ea) { ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post PRLI\n", diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 86fb8b21aa71..032635321ad6 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1195,8 +1195,8 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, * @sp: SRB command to process * @cmd_pkt: Command type 3 IOCB * @tot_dsds: Total number of segments to transfer - * @tot_prot_dsds: - * @fw_prot_opts: + * @tot_prot_dsds: Total number of segments with protection information + * @fw_prot_opts: Protection options to be passed to firmware */ inline int qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index d73b04e40590..30d3090842f8 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -25,7 +25,7 @@ static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. - * @irq: + * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. @@ -144,7 +144,7 @@ qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) /** * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. - * @irq: + * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. @@ -3109,7 +3109,7 @@ done: /** * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. - * @irq: + * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 2f3e5075ae76..191b6b7c8747 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3478,9 +3478,9 @@ qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) /** * qla2x00_set_serdes_params() - * @vha: HA context - * @sw_em_1g: - * @sw_em_2g: - * @sw_em_4g: + * @sw_em_1g: serial link options + * @sw_em_2g: serial link options + * @sw_em_4g: serial link options * * Returns */ diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 521a51370554..60f964c53c01 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -2212,7 +2212,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, struct bsg_job *bsg_job; struct fc_bsg_reply *bsg_reply; struct srb_iocb *iocb_job; - int res; + int res = 0; struct qla_mt_iocb_rsp_fx00 fstatus; uint8_t *fw_sts_ptr; @@ -2624,7 +2624,7 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) * qlafx00_multistatus_entry() - Process Multi response queue entries. * @vha: SCSI driver HA context * @rsp: response queue - * @pkt: + * @pkt: received packet */ static void qlafx00_multistatus_entry(struct scsi_qla_host *vha, @@ -2681,12 +2681,10 @@ qlafx00_multistatus_entry(struct scsi_qla_host *vha, * @vha: SCSI driver HA context * @rsp: response queue * @pkt: Entry pointer - * @estatus: - * @etype: */ static void qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, - struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype) + struct sts_entry_fx00 *pkt) { srb_t *sp; struct qla_hw_data *ha = vha->hw; @@ -2695,9 +2693,6 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, struct req_que *req = NULL; int res = DID_ERROR << 16; - ql_dbg(ql_dbg_async, vha, 0x507f, - "type of error status in response: 0x%x\n", estatus); - req = ha->req_q_map[que]; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); @@ -2745,9 +2740,11 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha, if (pkt->entry_status != 0 && pkt->entry_type != IOCTL_IOSB_TYPE_FX00) { + ql_dbg(ql_dbg_async, vha, 0x507f, + "type of error status in response: 0x%x\n", + pkt->entry_status); qlafx00_error_entry(vha, rsp, - (struct sts_entry_fx00 *)pkt, pkt->entry_status, - pkt->entry_type); + (struct sts_entry_fx00 *)pkt); continue; } @@ -2867,7 +2864,7 @@ qlafx00_async_event(scsi_qla_host_t *vha) /** * qlafx00x_mbx_completion() - Process mailbox command completions. * @vha: SCSI driver HA context - * @mb0: + * @mb0: value to be written into mailbox register 0 */ static void qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) @@ -2893,7 +2890,7 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) /** * qlafx00_intr_handler() - Process interrupts for the ISPFX00. - * @irq: + * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 121e18b3b9f8..f2f54806f4da 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2010,7 +2010,7 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) /** * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. - * @irq: + * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index 3a2b0282df14..fe856b602e03 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -3878,7 +3878,7 @@ out: #define PF_BITS_MASK (0xF << 16) /** * qla8044_intr_handler() - Process interrupts for the ISP8044 - * @irq: + * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8794e54f43a9..518f15141170 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1749,7 +1749,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) static void __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) { - int cnt, status; + int cnt; unsigned long flags; srb_t *sp; scsi_qla_host_t *vha = qp->vha; @@ -1799,8 +1799,8 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) if (!sp_get(sp)) { spin_unlock_irqrestore (qp->qp_lock_ptr, flags); - status = qla2xxx_eh_abort( - GET_CMD_SP(sp)); + qla2xxx_eh_abort( + GET_CMD_SP(sp)); spin_lock_irqsave (qp->qp_lock_ptr, flags); } diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 4499c787165f..2a3055c799fb 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -2229,7 +2229,7 @@ qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr, /** * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip. - * @ha: + * @ha: host adapter * @man_id: Flash manufacturer ID * @flash_id: Flash ID */ diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 39828207bc1d..c4504740f0e2 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -4540,7 +4540,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, case QLA_TGT_CLEAR_TS: case QLA_TGT_ABORT_TS: abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); - /* drop through */ + /* fall through */ case QLA_TGT_CLEAR_ACA: h = qlt_find_qphint(vha, mcmd->unpacked_lun); mcmd->qpair = h->qpair; @@ -6598,9 +6598,9 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, * qla_tgt_lport_register - register lport with external module * * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data - * @phys_wwpn: - * @npiv_wwpn: - * @npiv_wwnn: + * @phys_wwpn: physical port WWPN + * @npiv_wwpn: NPIV WWPN + * @npiv_wwnn: NPIV WWNN * @callback: lport initialization callback for tcm_qla2xxx code */ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h index 7c128132799e..4c28fa938ac7 100644 --- a/drivers/soc/ti/knav_qmss.h +++ b/drivers/soc/ti/knav_qmss.h @@ -329,8 +329,8 @@ struct knav_range_ops { }; struct knav_irq_info { - int irq; - u32 cpu_map; + int irq; + struct cpumask *cpu_mask; }; struct knav_range_info { diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c index 316e82e46f6c..2f7fb2dcc1d6 100644 --- a/drivers/soc/ti/knav_qmss_acc.c +++ b/drivers/soc/ti/knav_qmss_acc.c @@ -205,18 +205,18 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range, { struct knav_device *kdev = range->kdev; struct knav_acc_channel *acc; - unsigned long cpu_map; + struct cpumask *cpu_mask; int ret = 0, irq; u32 old, new; if (range->flags & RANGE_MULTI_QUEUE) { acc = range->acc; irq = range->irqs[0].irq; - cpu_map = range->irqs[0].cpu_map; + cpu_mask = range->irqs[0].cpu_mask; } else { acc = range->acc + queue; irq = range->irqs[queue].irq; - cpu_map = range->irqs[queue].cpu_map; + cpu_mask = range->irqs[queue].cpu_mask; } old = acc->open_mask; @@ -239,8 +239,8 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range, acc->name, acc->name); ret = request_irq(irq, knav_acc_int_handler, 0, acc->name, range); - if (!ret && cpu_map) { - ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); + if (!ret && cpu_mask) { + ret = irq_set_affinity_hint(irq, cpu_mask); if (ret) { dev_warn(range->kdev->dev, "Failed to set IRQ affinity\n"); diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index b5d5673c255c..8b418379272d 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -118,19 +118,17 @@ static int knav_queue_setup_irq(struct knav_range_info *range, struct knav_queue_inst *inst) { unsigned queue = inst->id - range->queue_base; - unsigned long cpu_map; int ret = 0, irq; if (range->flags & RANGE_HAS_IRQ) { irq = range->irqs[queue].irq; - cpu_map = range->irqs[queue].cpu_map; ret = request_irq(irq, knav_queue_int_handler, 0, inst->irq_name, inst); if (ret) return ret; disable_irq(irq); - if (cpu_map) { - ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); + if (range->irqs[queue].cpu_mask) { + ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask); if (ret) { dev_warn(range->kdev->dev, "Failed to set IRQ affinity\n"); @@ -1262,9 +1260,19 @@ static int knav_setup_queue_range(struct knav_device *kdev, range->num_irqs++; - if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) - range->irqs[i].cpu_map = - (oirq.args[2] & 0x0000ff00) >> 8; + if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) { + unsigned long mask; + int bit; + + range->irqs[i].cpu_mask = devm_kzalloc(dev, + cpumask_size(), GFP_KERNEL); + if (!range->irqs[i].cpu_mask) + return -ENOMEM; + + mask = (oirq.args[2] & 0x0000ff00) >> 8; + for_each_set_bit(bit, &mask, BITS_PER_LONG) + cpumask_set_cpu(bit, range->irqs[i].cpu_mask); + } } range->num_irqs = min(range->num_irqs, range->num_queues); diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 1227872227dc..36b742932c72 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -1245,8 +1245,7 @@ static int iscsit_do_rx_data( return -1; memset(&msg, 0, sizeof(struct msghdr)); - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, - count->iov, count->iov_count, data); + iov_iter_kvec(&msg.msg_iter, READ, count->iov, count->iov_count, data); while (msg_data_left(&msg)) { rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL); @@ -1302,8 +1301,7 @@ int tx_data( memset(&msg, 0, sizeof(struct msghdr)); - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, - iov, iov_count, data); + iov_iter_kvec(&msg.msg_iter, WRITE, iov, iov_count, data); while (msg_data_left(&msg)) { int tx_loop = sock_sendmsg(conn->sock, &msg); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index e46ca968009c..4f134b0c3e29 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -268,7 +268,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) } transport_kunmap_data_sg(cmd); - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, rd_len + 4); return 0; } diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 16751ae55d7b..49b110d1b972 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -303,7 +303,7 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, len += sg->length; } - iov_iter_bvec(&iter, ITER_BVEC | is_write, bvec, sgl_nents, len); + iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); aio_cmd->cmd = cmd; aio_cmd->len = len; @@ -353,7 +353,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd, len += sg->length; } - iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len); + iov_iter_bvec(&iter, READ, bvec, sgl_nents, len); if (is_write) ret = vfs_iter_write(fd, &iter, &pos, 0); else @@ -490,7 +490,7 @@ fd_execute_write_same(struct se_cmd *cmd) len += se_dev->dev_attrib.block_size; } - iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len); + iov_iter_bvec(&iter, READ, bvec, nolb, len); ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0); kfree(bvec); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4cf33e2cc705..e31e4fc31aa1 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -205,19 +205,19 @@ void transport_subsystem_check_init(void) if (sub_api_initialized) return; - ret = request_module("target_core_iblock"); + ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); if (ret != 0) pr_err("Unable to load target_core_iblock\n"); - ret = request_module("target_core_file"); + ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); if (ret != 0) pr_err("Unable to load target_core_file\n"); - ret = request_module("target_core_pscsi"); + ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); if (ret != 0) pr_err("Unable to load target_core_pscsi\n"); - ret = request_module("target_core_user"); + ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); if (ret != 0) pr_err("Unable to load target_core_user\n"); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 6ab982309e6a..d6ebc1cf6aa9 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -290,10 +290,12 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, int delay) { if (delay > 1000) - mod_delayed_work(system_freezable_wq, &tz->poll_queue, + mod_delayed_work(system_freezable_power_efficient_wq, + &tz->poll_queue, round_jiffies(msecs_to_jiffies(delay))); else if (delay) - mod_delayed_work(system_freezable_wq, &tz->poll_queue, + mod_delayed_work(system_freezable_power_efficient_wq, + &tz->poll_queue, msecs_to_jiffies(delay)); else cancel_delayed_work(&tz->poll_queue); @@ -1102,8 +1104,9 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) mutex_unlock(&thermal_list_lock); ida_simple_remove(&thermal_cdev_ida, cdev->id); - device_unregister(&cdev->device); + device_del(&cdev->device); thermal_cooling_device_destroy_sysfs(cdev); + put_device(&cdev->device); } EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 9756752c0681..45da3e01c7b0 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -309,7 +309,7 @@ int usbip_recv(struct socket *sock, void *buf, int size) if (!sock || !buf || !size) return -EINVAL; - iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size); + iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size); usbip_dbg_xmit("enter\n"); diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index c84333eb5eb5..9de5ed38da83 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -21,7 +21,7 @@ config VFIO_VIRQFD menuconfig VFIO tristate "VFIO Non-Privileged userspace driver framework" depends on IOMMU_API - select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM_SMMU || ARM_SMMU_V3) + select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM || ARM64) select ANON_INODES help VFIO provides a framework for secure userspace device drivers. diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index cddb453a1ba5..50cdedfca9fe 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -434,10 +434,14 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type) { if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) { u8 pin; + + if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || + vdev->nointx || vdev->pdev->is_virtfn) + return 0; + pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin); - if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin) - return 1; + return pin ? 1 : 0; } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) { u8 pos; u16 flags; diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 115a36f6f403..423ea1f98441 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -1180,8 +1180,10 @@ static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos) return -ENOMEM; ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags); - if (ret) + if (ret) { + kfree(vdev->msi_perm); return ret; + } return len; } @@ -1610,6 +1612,15 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev) } /* + * Nag about hardware bugs, hopefully to have vendors fix them, but at least + * to collect a list of dependencies for the VF INTx pin quirk below. + */ +static const struct pci_device_id known_bogus_vf_intx_pin[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x270c) }, + {} +}; + +/* * For each device we allocate a pci_config_map that indicates the * capability occupying each dword and thus the struct perm_bits we * use for read and write. We also allocate a virtualized config @@ -1674,6 +1685,24 @@ int vfio_config_init(struct vfio_pci_device *vdev) if (pdev->is_virtfn) { *(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor); *(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device); + + /* + * Per SR-IOV spec rev 1.1, 3.4.1.18 the interrupt pin register + * does not apply to VFs and VFs must implement this register + * as read-only with value zero. Userspace is not readily able + * to identify whether a device is a VF and thus that the pin + * definition on the device is bogus should it violate this + * requirement. We already virtualize the pin register for + * other purposes, so we simply need to replace the bogus value + * and consider VFs when we determine INTx IRQ count. + */ + if (vconfig[PCI_INTERRUPT_PIN] && + !pci_match_id(known_bogus_vf_intx_pin, pdev)) + pci_warn(pdev, + "Hardware bug: VF reports bogus INTx pin %d\n", + vconfig[PCI_INTERRUPT_PIN]); + + vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ } if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index c24bb690680b..50dffe83714c 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -203,6 +203,19 @@ struct vhost_scsi { int vs_events_nr; /* num of pending events, protected by vq->mutex */ }; +/* + * Context for processing request and control queue operations. + */ +struct vhost_scsi_ctx { + int head; + unsigned int out, in; + size_t req_size, rsp_size; + size_t out_size, in_size; + u8 *target, *lunp; + void *req; + struct iov_iter out_iter; +}; + static struct workqueue_struct *vhost_scsi_workqueue; /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ @@ -800,24 +813,120 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs, pr_err("Faulted on virtio_scsi_cmd_resp\n"); } +static int +vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq, + struct vhost_scsi_ctx *vc) +{ + int ret = -ENXIO; + + vc->head = vhost_get_vq_desc(vq, vq->iov, + ARRAY_SIZE(vq->iov), &vc->out, &vc->in, + NULL, NULL); + + pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", + vc->head, vc->out, vc->in); + + /* On error, stop handling until the next kick. */ + if (unlikely(vc->head < 0)) + goto done; + + /* Nothing new? Wait for eventfd to tell us they refilled. */ + if (vc->head == vq->num) { + if (unlikely(vhost_enable_notify(&vs->dev, vq))) { + vhost_disable_notify(&vs->dev, vq); + ret = -EAGAIN; + } + goto done; + } + + /* + * Get the size of request and response buffers. + * FIXME: Not correct for BIDI operation + */ + vc->out_size = iov_length(vq->iov, vc->out); + vc->in_size = iov_length(&vq->iov[vc->out], vc->in); + + /* + * Copy over the virtio-scsi request header, which for a + * ANY_LAYOUT enabled guest may span multiple iovecs, or a + * single iovec may contain both the header + outgoing + * WRITE payloads. + * + * copy_from_iter() will advance out_iter, so that it will + * point at the start of the outgoing WRITE payload, if + * DMA_TO_DEVICE is set. + */ + iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size); + ret = 0; + +done: + return ret; +} + +static int +vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc) +{ + if (unlikely(vc->in_size < vc->rsp_size)) { + vq_err(vq, + "Response buf too small, need min %zu bytes got %zu", + vc->rsp_size, vc->in_size); + return -EINVAL; + } else if (unlikely(vc->out_size < vc->req_size)) { + vq_err(vq, + "Request buf too small, need min %zu bytes got %zu", + vc->req_size, vc->out_size); + return -EIO; + } + + return 0; +} + +static int +vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc, + struct vhost_scsi_tpg **tpgp) +{ + int ret = -EIO; + + if (unlikely(!copy_from_iter_full(vc->req, vc->req_size, + &vc->out_iter))) { + vq_err(vq, "Faulted on copy_from_iter\n"); + } else if (unlikely(*vc->lunp != 1)) { + /* virtio-scsi spec requires byte 0 of the lun to be 1 */ + vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp); + } else { + struct vhost_scsi_tpg **vs_tpg, *tpg; + + vs_tpg = vq->private_data; /* validated at handler entry */ + + tpg = READ_ONCE(vs_tpg[*vc->target]); + if (unlikely(!tpg)) { + vq_err(vq, "Target 0x%x does not exist\n", *vc->target); + } else { + if (tpgp) + *tpgp = tpg; + ret = 0; + } + } + + return ret; +} + static void vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) { struct vhost_scsi_tpg **vs_tpg, *tpg; struct virtio_scsi_cmd_req v_req; struct virtio_scsi_cmd_req_pi v_req_pi; + struct vhost_scsi_ctx vc; struct vhost_scsi_cmd *cmd; - struct iov_iter out_iter, in_iter, prot_iter, data_iter; + struct iov_iter in_iter, prot_iter, data_iter; u64 tag; u32 exp_data_len, data_direction; - unsigned int out = 0, in = 0; - int head, ret, prot_bytes; - size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); - size_t out_size, in_size; + int ret, prot_bytes; u16 lun; - u8 *target, *lunp, task_attr; + u8 task_attr; bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); - void *req, *cdb; + void *cdb; mutex_lock(&vq->mutex); /* @@ -828,85 +937,47 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (!vs_tpg) goto out; + memset(&vc, 0, sizeof(vc)); + vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp); + vhost_disable_notify(&vs->dev, vq); for (;;) { - head = vhost_get_vq_desc(vq, vq->iov, - ARRAY_SIZE(vq->iov), &out, &in, - NULL, NULL); - pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", - head, out, in); - /* On error, stop handling until the next kick. */ - if (unlikely(head < 0)) - break; - /* Nothing new? Wait for eventfd to tell us they refilled. */ - if (head == vq->num) { - if (unlikely(vhost_enable_notify(&vs->dev, vq))) { - vhost_disable_notify(&vs->dev, vq); - continue; - } - break; - } - /* - * Check for a sane response buffer so we can report early - * errors back to the guest. - */ - if (unlikely(vq->iov[out].iov_len < rsp_size)) { - vq_err(vq, "Expecting at least virtio_scsi_cmd_resp" - " size, got %zu bytes\n", vq->iov[out].iov_len); - break; - } + ret = vhost_scsi_get_desc(vs, vq, &vc); + if (ret) + goto err; + /* * Setup pointers and values based upon different virtio-scsi * request header if T10_PI is enabled in KVM guest. */ if (t10_pi) { - req = &v_req_pi; - req_size = sizeof(v_req_pi); - lunp = &v_req_pi.lun[0]; - target = &v_req_pi.lun[1]; + vc.req = &v_req_pi; + vc.req_size = sizeof(v_req_pi); + vc.lunp = &v_req_pi.lun[0]; + vc.target = &v_req_pi.lun[1]; } else { - req = &v_req; - req_size = sizeof(v_req); - lunp = &v_req.lun[0]; - target = &v_req.lun[1]; + vc.req = &v_req; + vc.req_size = sizeof(v_req); + vc.lunp = &v_req.lun[0]; + vc.target = &v_req.lun[1]; } - /* - * FIXME: Not correct for BIDI operation - */ - out_size = iov_length(vq->iov, out); - in_size = iov_length(&vq->iov[out], in); /* - * Copy over the virtio-scsi request header, which for a - * ANY_LAYOUT enabled guest may span multiple iovecs, or a - * single iovec may contain both the header + outgoing - * WRITE payloads. - * - * copy_from_iter() will advance out_iter, so that it will - * point at the start of the outgoing WRITE payload, if - * DMA_TO_DEVICE is set. + * Validate the size of request and response buffers. + * Check for a sane response buffer so we can report + * early errors back to the guest. */ - iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size); + ret = vhost_scsi_chk_size(vq, &vc); + if (ret) + goto err; - if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) { - vq_err(vq, "Faulted on copy_from_iter\n"); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; - } - /* virtio-scsi spec requires byte 0 of the lun to be 1 */ - if (unlikely(*lunp != 1)) { - vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; - } + ret = vhost_scsi_get_req(vq, &vc, &tpg); + if (ret) + goto err; + + ret = -EIO; /* bad target on any error from here on */ - tpg = READ_ONCE(vs_tpg[*target]); - if (unlikely(!tpg)) { - /* Target does not exist, fail the request */ - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; - } /* * Determine data_direction by calculating the total outgoing * iovec sizes + incoming iovec sizes vs. virtio-scsi request + @@ -924,17 +995,17 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) */ prot_bytes = 0; - if (out_size > req_size) { + if (vc.out_size > vc.req_size) { data_direction = DMA_TO_DEVICE; - exp_data_len = out_size - req_size; - data_iter = out_iter; - } else if (in_size > rsp_size) { + exp_data_len = vc.out_size - vc.req_size; + data_iter = vc.out_iter; + } else if (vc.in_size > vc.rsp_size) { data_direction = DMA_FROM_DEVICE; - exp_data_len = in_size - rsp_size; + exp_data_len = vc.in_size - vc.rsp_size; - iov_iter_init(&in_iter, READ, &vq->iov[out], in, - rsp_size + exp_data_len); - iov_iter_advance(&in_iter, rsp_size); + iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in, + vc.rsp_size + exp_data_len); + iov_iter_advance(&in_iter, vc.rsp_size); data_iter = in_iter; } else { data_direction = DMA_NONE; @@ -950,21 +1021,20 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (data_direction != DMA_TO_DEVICE) { vq_err(vq, "Received non zero pi_bytesout," " but wrong data_direction\n"); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; + goto err; } prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); } else if (v_req_pi.pi_bytesin) { if (data_direction != DMA_FROM_DEVICE) { vq_err(vq, "Received non zero pi_bytesin," " but wrong data_direction\n"); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; + goto err; } prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); } /* - * Set prot_iter to data_iter, and advance past any + * Set prot_iter to data_iter and truncate it to + * prot_bytes, and advance data_iter past any * preceeding prot_bytes that may be present. * * Also fix up the exp_data_len to reflect only the @@ -973,6 +1043,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (prot_bytes) { exp_data_len -= prot_bytes; prot_iter = data_iter; + iov_iter_truncate(&prot_iter, prot_bytes); iov_iter_advance(&data_iter, prot_bytes); } tag = vhost64_to_cpu(vq, v_req_pi.tag); @@ -996,8 +1067,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) vq_err(vq, "Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; + goto err; } cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, exp_data_len + prot_bytes, @@ -1005,13 +1075,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (IS_ERR(cmd)) { vq_err(vq, "vhost_scsi_get_tag failed %ld\n", PTR_ERR(cmd)); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; + goto err; } cmd->tvc_vhost = vs; cmd->tvc_vq = vq; - cmd->tvc_resp_iov = vq->iov[out]; - cmd->tvc_in_iovs = in; + cmd->tvc_resp_iov = vq->iov[vc.out]; + cmd->tvc_in_iovs = vc.in; pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", cmd->tvc_cdb[0], cmd->tvc_lun); @@ -1019,14 +1088,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) " %d\n", cmd, exp_data_len, prot_bytes, data_direction); if (data_direction != DMA_NONE) { - ret = vhost_scsi_mapal(cmd, - prot_bytes, &prot_iter, - exp_data_len, &data_iter); - if (unlikely(ret)) { + if (unlikely(vhost_scsi_mapal(cmd, prot_bytes, + &prot_iter, exp_data_len, + &data_iter))) { vq_err(vq, "Failed to map iov to sgl\n"); vhost_scsi_release_cmd(&cmd->tvc_se_cmd); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; + goto err; } } /* @@ -1034,7 +1101,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) * complete the virtio-scsi request in TCM callback context via * vhost_scsi_queue_data_in() and vhost_scsi_queue_status() */ - cmd->tvc_vq_desc = head; + cmd->tvc_vq_desc = vc.head; /* * Dispatch cmd descriptor for cmwq execution in process * context provided by vhost_scsi_workqueue. This also ensures @@ -1043,6 +1110,166 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) */ INIT_WORK(&cmd->work, vhost_scsi_submission_work); queue_work(vhost_scsi_workqueue, &cmd->work); + ret = 0; +err: + /* + * ENXIO: No more requests, or read error, wait for next kick + * EINVAL: Invalid response buffer, drop the request + * EIO: Respond with bad target + * EAGAIN: Pending request + */ + if (ret == -ENXIO) + break; + else if (ret == -EIO) + vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); + } +out: + mutex_unlock(&vq->mutex); +} + +static void +vhost_scsi_send_tmf_reject(struct vhost_scsi *vs, + struct vhost_virtqueue *vq, + struct vhost_scsi_ctx *vc) +{ + struct virtio_scsi_ctrl_tmf_resp __user *resp; + struct virtio_scsi_ctrl_tmf_resp rsp; + int ret; + + pr_debug("%s\n", __func__); + memset(&rsp, 0, sizeof(rsp)); + rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; + resp = vq->iov[vc->out].iov_base; + ret = __copy_to_user(resp, &rsp, sizeof(rsp)); + if (!ret) + vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); + else + pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); +} + +static void +vhost_scsi_send_an_resp(struct vhost_scsi *vs, + struct vhost_virtqueue *vq, + struct vhost_scsi_ctx *vc) +{ + struct virtio_scsi_ctrl_an_resp __user *resp; + struct virtio_scsi_ctrl_an_resp rsp; + int ret; + + pr_debug("%s\n", __func__); + memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ + rsp.response = VIRTIO_SCSI_S_OK; + resp = vq->iov[vc->out].iov_base; + ret = __copy_to_user(resp, &rsp, sizeof(rsp)); + if (!ret) + vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); + else + pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); +} + +static void +vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) +{ + union { + __virtio32 type; + struct virtio_scsi_ctrl_an_req an; + struct virtio_scsi_ctrl_tmf_req tmf; + } v_req; + struct vhost_scsi_ctx vc; + size_t typ_size; + int ret; + + mutex_lock(&vq->mutex); + /* + * We can handle the vq only after the endpoint is setup by calling the + * VHOST_SCSI_SET_ENDPOINT ioctl. + */ + if (!vq->private_data) + goto out; + + memset(&vc, 0, sizeof(vc)); + + vhost_disable_notify(&vs->dev, vq); + + for (;;) { + ret = vhost_scsi_get_desc(vs, vq, &vc); + if (ret) + goto err; + + /* + * Get the request type first in order to setup + * other parameters dependent on the type. + */ + vc.req = &v_req.type; + typ_size = sizeof(v_req.type); + + if (unlikely(!copy_from_iter_full(vc.req, typ_size, + &vc.out_iter))) { + vq_err(vq, "Faulted on copy_from_iter tmf type\n"); + /* + * The size of the response buffer depends on the + * request type and must be validated against it. + * Since the request type is not known, don't send + * a response. + */ + continue; + } + + switch (v_req.type) { + case VIRTIO_SCSI_T_TMF: + vc.req = &v_req.tmf; + vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req); + vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp); + vc.lunp = &v_req.tmf.lun[0]; + vc.target = &v_req.tmf.lun[1]; + break; + case VIRTIO_SCSI_T_AN_QUERY: + case VIRTIO_SCSI_T_AN_SUBSCRIBE: + vc.req = &v_req.an; + vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req); + vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp); + vc.lunp = &v_req.an.lun[0]; + vc.target = NULL; + break; + default: + vq_err(vq, "Unknown control request %d", v_req.type); + continue; + } + + /* + * Validate the size of request and response buffers. + * Check for a sane response buffer so we can report + * early errors back to the guest. + */ + ret = vhost_scsi_chk_size(vq, &vc); + if (ret) + goto err; + + /* + * Get the rest of the request now that its size is known. + */ + vc.req += typ_size; + vc.req_size -= typ_size; + + ret = vhost_scsi_get_req(vq, &vc, NULL); + if (ret) + goto err; + + if (v_req.type == VIRTIO_SCSI_T_TMF) + vhost_scsi_send_tmf_reject(vs, vq, &vc); + else + vhost_scsi_send_an_resp(vs, vq, &vc); +err: + /* + * ENXIO: No more requests, or read error, wait for next kick + * EINVAL: Invalid response buffer, drop the request + * EIO: Respond with bad target + * EAGAIN: Pending request + */ + if (ret == -ENXIO) + break; + else if (ret == -EIO) + vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); } out: mutex_unlock(&vq->mutex); @@ -1050,7 +1277,12 @@ out: static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) { + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, + poll.work); + struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); + pr_debug("%s: The handling func for control queue.\n", __func__); + vhost_scsi_ctl_handle_vq(vs, vq); } static void diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index f52008bb8df7..3a5f81a66d34 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -30,6 +30,7 @@ #include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/interval_tree_generic.h> +#include <linux/nospec.h> #include "vhost.h" @@ -1387,6 +1388,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg if (idx >= d->nvqs) return -ENOBUFS; + idx = array_index_nospec(idx, d->nvqs); vq = d->vqs[idx]; mutex_lock(&vq->mutex); diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 591a13a59787..e413f54208f4 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -2,6 +2,12 @@ # fbdev configuration # +config FB_CMDLINE + bool + +config FB_NOTIFY + bool + menuconfig FB tristate "Support for frame buffer devices" select FB_CMDLINE @@ -41,7 +47,6 @@ menuconfig FB config FIRMWARE_EDID bool "Enable firmware EDID" depends on FB - default n ---help--- This enables access to the EDID transferred from the firmware. On the i386, this is from the Video BIOS. Enable this if DDC/I2C @@ -54,23 +59,15 @@ config FIRMWARE_EDID combination with certain motherboards and monitors are known to suffer from this problem. -config FB_CMDLINE - bool - -config FB_NOTIFY - bool - config FB_DDC tristate depends on FB select I2C_ALGOBIT select I2C - default n config FB_BOOT_VESA_SUPPORT bool depends on FB - default n ---help--- If true, at least one selected framebuffer driver can take advantage of VESA video modes set at an early boot stage via the vga= parameter. @@ -78,7 +75,6 @@ config FB_BOOT_VESA_SUPPORT config FB_CFB_FILLRECT tristate depends on FB - default n ---help--- Include the cfb_fillrect function for generic software rectangle filling. This is used by drivers that don't provide their own @@ -87,7 +83,6 @@ config FB_CFB_FILLRECT config FB_CFB_COPYAREA tristate depends on FB - default n ---help--- Include the cfb_copyarea function for generic software area copying. This is used by drivers that don't provide their own (accelerated) @@ -96,7 +91,6 @@ config FB_CFB_COPYAREA config FB_CFB_IMAGEBLIT tristate depends on FB - default n ---help--- Include the cfb_imageblit function for generic software image blitting. This is used by drivers that don't provide their own @@ -105,7 +99,6 @@ config FB_CFB_IMAGEBLIT config FB_CFB_REV_PIXELS_IN_BYTE bool depends on FB - default n ---help--- Allow generic frame-buffer functions to work on displays with 1, 2 and 4 bits per pixel depths which has opposite order of pixels in @@ -114,7 +107,6 @@ config FB_CFB_REV_PIXELS_IN_BYTE config FB_SYS_FILLRECT tristate depends on FB - default n ---help--- Include the sys_fillrect function for generic software rectangle filling. This is used by drivers that don't provide their own @@ -123,7 +115,6 @@ config FB_SYS_FILLRECT config FB_SYS_COPYAREA tristate depends on FB - default n ---help--- Include the sys_copyarea function for generic software area copying. This is used by drivers that don't provide their own (accelerated) @@ -132,7 +123,6 @@ config FB_SYS_COPYAREA config FB_SYS_IMAGEBLIT tristate depends on FB - default n ---help--- Include the sys_imageblit function for generic software image blitting. This is used by drivers that don't provide their own @@ -141,7 +131,6 @@ config FB_SYS_IMAGEBLIT config FB_PROVIDE_GET_FB_UNMAPPED_AREA bool depends on FB - default n ---help--- Allow generic frame-buffer to provide get_fb_unmapped_area function. @@ -173,7 +162,6 @@ endchoice config FB_SYS_FOPS tristate depends on FB - default n config FB_DEFERRED_IO bool @@ -187,7 +175,6 @@ config FB_HECUBA config FB_SVGALIB tristate depends on FB - default n ---help--- Common utility functions useful to fbdev drivers of VGA-based cards. @@ -195,19 +182,16 @@ config FB_SVGALIB config FB_MACMODES tristate depends on FB - default n config FB_BACKLIGHT bool depends on FB select BACKLIGHT_LCD_SUPPORT select BACKLIGHT_CLASS_DEVICE - default n config FB_MODE_HELPERS bool "Enable Video Mode Handling Helpers" depends on FB - default n ---help--- This enables functions for handling video modes using the Generalized Timing Formula and the EDID parser. A few drivers rely @@ -218,7 +202,6 @@ config FB_MODE_HELPERS config FB_TILEBLITTING bool "Enable Tile Blitting Support" depends on FB - default n ---help--- This enables tile blitting. Tile blitting is a drawing technique where the screen is divided into rectangular sections (tiles), whereas @@ -329,16 +312,9 @@ config FB_ACORN hardware found in Acorn RISC PCs and other ARM-based machines. If unsure, say N. -config FB_CLPS711X_OLD - tristate - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - config FB_CLPS711X tristate "CLPS711X LCD support" depends on FB && (ARCH_CLPS711X || COMPILE_TEST) - select FB_CLPS711X_OLD if ARCH_CLPS711X && !ARCH_MULTIPLATFORM select BACKLIGHT_LCD_SUPPORT select FB_MODE_HELPERS select FB_SYS_FILLRECT @@ -936,7 +912,6 @@ config FB_NVIDIA_I2C config FB_NVIDIA_DEBUG bool "Lots of debug output" depends on FB_NVIDIA - default n help Say Y here if you want the nVidia driver to output all sorts of debugging information to provide to the maintainer when @@ -983,7 +958,6 @@ config FB_RIVA_I2C config FB_RIVA_DEBUG bool "Lots of debug output" depends on FB_RIVA - default n help Say Y here if you want the Riva driver to output all sorts of debugging information to provide to the maintainer when @@ -1266,7 +1240,6 @@ config FB_RADEON_BACKLIGHT config FB_RADEON_DEBUG bool "Lots of debug output from Radeon driver" depends on FB_RADEON - default n help Say Y here if you want the Radeon driver to output all sorts of debugging information to provide to the maintainer when @@ -1399,7 +1372,6 @@ config FB_SAVAGE_I2C config FB_SAVAGE_ACCEL bool "Enable Console Acceleration" depends on FB_SAVAGE - default n help This option will compile in console acceleration support. If the resulting framebuffer console has bothersome glitches, then @@ -1456,8 +1428,6 @@ if FB_VIA config FB_VIA_DIRECT_PROCFS bool "direct hardware access via procfs (DEPRECATED)(DANGEROUS)" - depends on FB_VIA - default n help Allow direct hardware access to some output registers via procfs. This is dangerous but may provide the only chance to get the @@ -1466,8 +1436,6 @@ config FB_VIA_DIRECT_PROCFS config FB_VIA_X_COMPATIBILITY bool "X server compatibility" - depends on FB_VIA - default n help This option reduces the functionality (power saving, ...) of the framebuffer to avoid negative impact on the OpenChrome X server. @@ -1692,7 +1660,6 @@ config FB_WM8505 config FB_WMT_GE_ROPS bool "VT8500/WM8xxx accelerated raster ops support" depends on (FB = y) && (FB_VT8500 || FB_WM8505) - default n help This adds support for accelerated raster operations on the VIA VT8500 and Wondermedia 85xx series SoCs. @@ -1802,17 +1769,14 @@ config FB_PXA config FB_PXA_OVERLAY bool "Support PXA27x/PXA3xx Overlay(s) as framebuffer" - default n depends on FB_PXA && (PXA27x || PXA3xx) config FB_PXA_SMARTPANEL bool "PXA Smartpanel LCD support" - default n depends on FB_PXA config FB_PXA_PARAMETERS bool "PXA LCD command line parameters" - default n depends on FB_PXA ---help--- Enable the use of kernel command line or module parameters @@ -1850,7 +1814,6 @@ config FB_MBX config FB_MBX_DEBUG bool "Enable debugging info via debugfs" depends on FB_MBX && DEBUG_FS - default n ---help--- Enable this if you want debugging information using the debug filesystem (debugfs) @@ -2240,7 +2203,7 @@ config FB_MX3 config FB_BROADSHEET tristate "E-Ink Broadsheet/Epson S1D13521 controller support" - depends on FB + depends on FB && (ARCH_PXA || COMPILE_TEST) select FB_SYS_FILLRECT select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT @@ -2308,10 +2271,6 @@ config FB_SIMPLE Configuration re: surface address, size, and format must be provided through device tree, or plain old platform data. -source "drivers/video/fbdev/omap/Kconfig" -source "drivers/video/fbdev/omap2/Kconfig" -source "drivers/video/fbdev/mmp/Kconfig" - config FB_SSD1307 tristate "Solomon SSD1307 framebuffer support" depends on FB && I2C @@ -2341,3 +2300,7 @@ config FB_SM712 This driver is also available as a module. The module will be called sm712fb. If you want to compile it as a module, say M here and read <file:Documentation/kbuild/modules.txt>. + +source "drivers/video/fbdev/omap/Kconfig" +source "drivers/video/fbdev/omap2/Kconfig" +source "drivers/video/fbdev/mmp/Kconfig" diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index 13c900320c2c..846b0c9ea9db 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -14,7 +14,6 @@ obj-$(CONFIG_FB_WMT_GE_ROPS) += wmt_ge_rops.o obj-$(CONFIG_FB_AMIGA) += amifb.o c2p_planar.o obj-$(CONFIG_FB_ARC) += arcfb.o obj-$(CONFIG_FB_CLPS711X) += clps711x-fb.o -obj-$(CONFIG_FB_CLPS711X_OLD) += clps711xfb.o obj-$(CONFIG_FB_CYBER2000) += cyber2000fb.o obj-$(CONFIG_FB_GRVGA) += grvga.o obj-$(CONFIG_FB_PM2) += pm2fb.o diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index 7e87d0d61658..a48741aab240 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -419,6 +419,8 @@ static int arcfb_ioctl(struct fb_info *info, schedule(); finish_wait(&arcfb_waitq, &wait); } + /* fall through */ + case FBIO_GETCONTROL2: { unsigned char ctl2; diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 076d24afbd72..4ed55e6bbb84 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <video/of_videomode.h> #include <video/of_display_timing.h> #include <linux/regulator/consumer.h> #include <video/videomode.h> @@ -1028,11 +1029,11 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) struct device *dev = &sinfo->pdev->dev; struct device_node *np =dev->of_node; struct device_node *display_np; - struct device_node *timings_np; - struct display_timings *timings; struct atmel_lcdfb_power_ctrl_gpio *og; bool is_gpio_power = false; + struct fb_videomode fb_vm; struct gpio_desc *gpiod; + struct videomode vm; int ret = -ENOENT; int i; @@ -1105,44 +1106,18 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) pdata->lcdcon_is_backlight = of_property_read_bool(display_np, "atmel,lcdcon-backlight"); pdata->lcdcon_pol_negative = of_property_read_bool(display_np, "atmel,lcdcon-backlight-inverted"); - timings = of_get_display_timings(display_np); - if (!timings) { - dev_err(dev, "failed to get display timings\n"); - ret = -EINVAL; + ret = of_get_videomode(display_np, &vm, OF_USE_NATIVE_MODE); + if (ret) { + dev_err(dev, "failed to get videomode from DT\n"); goto put_display_node; } - timings_np = of_get_child_by_name(display_np, "display-timings"); - if (!timings_np) { - dev_err(dev, "failed to find display-timings node\n"); - ret = -ENODEV; + ret = fb_videomode_from_videomode(&vm, &fb_vm); + if (ret < 0) goto put_display_node; - } - for (i = 0; i < of_get_child_count(timings_np); i++) { - struct videomode vm; - struct fb_videomode fb_vm; - - ret = videomode_from_timings(timings, &vm, i); - if (ret < 0) - goto put_timings_node; - ret = fb_videomode_from_videomode(&vm, &fb_vm); - if (ret < 0) - goto put_timings_node; - - fb_add_videomode(&fb_vm, &info->modelist); - } - - /* - * FIXME: Make sure we are not referencing any fields in display_np - * and timings_np and drop our references to them before returning to - * avoid leaking the nodes on probe deferral and driver unbind. - */ - - return 0; + fb_add_videomode(&fb_vm, &info->modelist); -put_timings_node: - of_node_put(timings_np); put_display_node: of_node_put(display_np); return ret; diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h index d09bab3bf224..e5a347c58180 100644 --- a/drivers/video/fbdev/aty/atyfb.h +++ b/drivers/video/fbdev/aty/atyfb.h @@ -147,6 +147,7 @@ struct atyfb_par { u16 pci_id; u32 accel_flags; int blitter_may_be_busy; + unsigned fifo_space; int asleep; int lock_blank; unsigned long res_start; @@ -346,10 +347,13 @@ extern int aty_init_cursor(struct fb_info *info); * Hardware acceleration */ -static inline void wait_for_fifo(u16 entries, const struct atyfb_par *par) +static inline void wait_for_fifo(u16 entries, struct atyfb_par *par) { - while ((aty_ld_le32(FIFO_STAT, par) & 0xffff) > - ((u32) (0x8000 >> entries))); + unsigned fifo_space = par->fifo_space; + while (entries > fifo_space) { + fifo_space = 16 - fls(aty_ld_le32(FIFO_STAT, par) & 0xffff); + } + par->fifo_space = fifo_space - entries; } static inline void wait_for_idle(struct atyfb_par *par) @@ -359,7 +363,7 @@ static inline void wait_for_idle(struct atyfb_par *par) par->blitter_may_be_busy = 0; } -extern void aty_reset_engine(const struct atyfb_par *par); +extern void aty_reset_engine(struct atyfb_par *par); extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info); void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index 05111e90f168..b6fe103df145 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -1480,24 +1480,28 @@ static int atyfb_set_par(struct fb_info *info) base = 0x2000; printk("debug atyfb: Mach64 non-shadow register values:"); for (i = 0; i < 256; i = i+4) { - if (i % 16 == 0) - printk("\ndebug atyfb: 0x%04X: ", base + i); - printk(" %08X", aty_ld_le32(i, par)); + if (i % 16 == 0) { + pr_cont("\n"); + printk("debug atyfb: 0x%04X: ", base + i); + } + pr_cont(" %08X", aty_ld_le32(i, par)); } - printk("\n\n"); + pr_cont("\n\n"); #ifdef CONFIG_FB_ATY_CT /* PLL registers */ base = 0x00; printk("debug atyfb: Mach64 PLL register values:"); for (i = 0; i < 64; i++) { - if (i % 16 == 0) - printk("\ndebug atyfb: 0x%02X: ", base + i); + if (i % 16 == 0) { + pr_cont("\n"); + printk("debug atyfb: 0x%02X: ", base + i); + } if (i % 4 == 0) - printk(" "); - printk("%02X", aty_ld_pll_ct(i, par)); + pr_cont(" "); + pr_cont("%02X", aty_ld_pll_ct(i, par)); } - printk("\n\n"); + pr_cont("\n\n"); #endif /* CONFIG_FB_ATY_CT */ #ifdef CONFIG_FB_ATY_GENERIC_LCD @@ -1509,19 +1513,19 @@ static int atyfb_set_par(struct fb_info *info) for (i = 0; i <= POWER_MANAGEMENT; i++) { if (i == EXT_VERT_STRETCH) continue; - printk("\ndebug atyfb: 0x%04X: ", + pr_cont("\ndebug atyfb: 0x%04X: ", lt_lcd_regs[i]); - printk(" %08X", aty_ld_lcd(i, par)); + pr_cont(" %08X", aty_ld_lcd(i, par)); } } else { for (i = 0; i < 64; i++) { if (i % 4 == 0) - printk("\ndebug atyfb: 0x%02X: ", + pr_cont("\ndebug atyfb: 0x%02X: ", base + i); - printk(" %08X", aty_ld_lcd(i, par)); + pr_cont(" %08X", aty_ld_lcd(i, par)); } } - printk("\n\n"); + pr_cont("\n\n"); } #endif /* CONFIG_FB_ATY_GENERIC_LCD */ } @@ -2597,8 +2601,8 @@ static int aty_init(struct fb_info *info) aty_ld_le32(DSP_ON_OFF, par), aty_ld_le32(CLOCK_CNTL, par)); for (i = 0; i < 40; i++) - printk(" %02x", aty_ld_pll_ct(i, par)); - printk("\n"); + pr_cont(" %02x", aty_ld_pll_ct(i, par)); + pr_cont("\n"); } #endif if (par->pll_ops->init_pll) diff --git a/drivers/video/fbdev/aty/mach64_accel.c b/drivers/video/fbdev/aty/mach64_accel.c index 2541a0e0de76..e4b2c89baee2 100644 --- a/drivers/video/fbdev/aty/mach64_accel.c +++ b/drivers/video/fbdev/aty/mach64_accel.c @@ -37,7 +37,7 @@ static u32 rotation24bpp(u32 dx, u32 direction) return ((rotation << 8) | DST_24_ROTATION_ENABLE); } -void aty_reset_engine(const struct atyfb_par *par) +void aty_reset_engine(struct atyfb_par *par) { /* reset engine */ aty_st_le32(GEN_TEST_CNTL, @@ -50,6 +50,8 @@ void aty_reset_engine(const struct atyfb_par *par) /* HOST errors */ aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) | BUS_HOST_ERR_ACK | BUS_FIFO_ERR_ACK, par); + + par->fifo_space = 0; } static void reset_GTC_3D_engine(const struct atyfb_par *par) @@ -127,7 +129,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info) /* set host attributes */ wait_for_fifo(13, par); - aty_st_le32(HOST_CNTL, 0, par); + aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par); /* set pattern attributes */ aty_st_le32(PAT_REG0, 0, par); @@ -233,7 +235,8 @@ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) rotation = rotation24bpp(dx, direction); } - wait_for_fifo(4, par); + wait_for_fifo(5, par); + aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par); aty_st_le32(DP_SRC, FRGD_SRC_BLIT, par); aty_st_le32(SRC_Y_X, (sx << 16) | sy, par); aty_st_le32(SRC_HEIGHT1_WIDTH1, (width << 16) | area->height, par); @@ -269,7 +272,8 @@ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) rotation = rotation24bpp(dx, DST_X_LEFT_TO_RIGHT); } - wait_for_fifo(3, par); + wait_for_fifo(4, par); + aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par); aty_st_le32(DP_FRGD_CLR, color, par); aty_st_le32(DP_SRC, BKGD_SRC_BKGD_CLR | FRGD_SRC_FRGD_CLR | MONO_SRC_ONE, @@ -284,7 +288,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 src_bytes, dx = image->dx, dy = image->dy, width = image->width; - u32 pix_width_save, pix_width, host_cntl, rotation = 0, src, mix; + u32 pix_width, rotation = 0, src, mix; if (par->asleep) return; @@ -296,8 +300,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) return; } - pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par); - host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN; + pix_width = par->crtc.dp_pix_width; switch (image->depth) { case 1: @@ -345,7 +348,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) * since Rage 3D IIc we have DP_HOST_TRIPLE_EN bit * this hwaccelerated triple has an issue with not aligned data */ - if (M64_HAS(HW_TRIPLE) && image->width % 8 == 0) + if (image->depth == 1 && M64_HAS(HW_TRIPLE) && image->width % 8 == 0) pix_width |= DP_HOST_TRIPLE_EN; } @@ -370,19 +373,18 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) mix = FRGD_MIX_D_XOR_S | BKGD_MIX_D; } - wait_for_fifo(6, par); - aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF, par); + wait_for_fifo(5, par); aty_st_le32(DP_PIX_WIDTH, pix_width, par); aty_st_le32(DP_MIX, mix, par); aty_st_le32(DP_SRC, src, par); - aty_st_le32(HOST_CNTL, host_cntl, par); + aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par); aty_st_le32(DST_CNTL, DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT | rotation, par); draw_rect(dx, dy, width, image->height, par); src_bytes = (((image->width * image->depth) + 7) / 8) * image->height; /* manual triple each pixel */ - if (info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) { + if (image->depth == 1 && info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) { int inbit, outbit, mult24, byte_id_in_dword, width; u8 *pbitmapin = (u8*)image->data, *pbitmapout; u32 hostdword; @@ -415,7 +417,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) } } wait_for_fifo(1, par); - aty_st_le32(HOST_DATA0, hostdword, par); + aty_st_le32(HOST_DATA0, le32_to_cpu(hostdword), par); } } else { u32 *pbitmap, dwords = (src_bytes + 3) / 4; @@ -424,8 +426,4 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par); } } - - /* restore pix_width */ - wait_for_fifo(1, par); - aty_st_le32(DP_PIX_WIDTH, pix_width_save, par); } diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c index 8de88b129b62..9af54c2368fd 100644 --- a/drivers/video/fbdev/cg14.c +++ b/drivers/video/fbdev/cg14.c @@ -355,9 +355,7 @@ static int cg14_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) static void cg14_init_fix(struct fb_info *info, int linebytes, struct device_node *dp) { - const char *name = dp->name; - - strlcpy(info->fix.id, name, sizeof(info->fix.id)); + snprintf(info->fix.id, sizeof(info->fix.id), "%pOFn", dp); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c index 6c334260cf53..1bd95b02f3aa 100644 --- a/drivers/video/fbdev/cg3.c +++ b/drivers/video/fbdev/cg3.c @@ -246,7 +246,7 @@ static int cg3_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) static void cg3_init_fix(struct fb_info *info, int linebytes, struct device_node *dp) { - strlcpy(info->fix.id, dp->name, sizeof(info->fix.id)); + snprintf(info->fix.id, sizeof(info->fix.id), "%pOFn", dp); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; diff --git a/drivers/video/fbdev/clps711xfb.c b/drivers/video/fbdev/clps711xfb.c deleted file mode 100644 index 7693aea8fb23..000000000000 --- a/drivers/video/fbdev/clps711xfb.c +++ /dev/null @@ -1,314 +0,0 @@ -/* - * linux/drivers/video/clps711xfb.c - * - * Copyright (C) 2000-2001 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * Framebuffer driver for the CLPS7111 and EP7212 processors. - */ -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/fb.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/platform_device.h> - -#include <mach/hardware.h> -#include <asm/mach-types.h> -#include <linux/uaccess.h> - -struct fb_info *cfb; - -#define CMAP_MAX_SIZE 16 - -/* - * LCD AC Prescale. This comes from the LCD panel manufacturers specifications. - * This determines how many clocks + 1 of CL1 before the M signal toggles. - * The number of lines on the display must not be divisible by this number. - */ -static unsigned int lcd_ac_prescale = 13; - -/* - * Set a single color register. Return != 0 for invalid regno. - */ -static int -clps7111fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, - u_int transp, struct fb_info *info) -{ - unsigned int level, mask, shift, pal; - - if (regno >= (1 << info->var.bits_per_pixel)) - return 1; - - /* gray = 0.30*R + 0.58*G + 0.11*B */ - level = (red * 77 + green * 151 + blue * 28) >> 20; - - /* - * On an LCD, a high value is dark, while a low value is light. - * So we invert the level. - * - * This isn't true on all machines, so we only do it on EDB7211. - * --rmk - */ - if (machine_is_edb7211()) { - level = 15 - level; - } - - shift = 4 * (regno & 7); - level <<= shift; - mask = 15 << shift; - level &= mask; - - regno = regno < 8 ? PALLSW : PALMSW; - - pal = clps_readl(regno); - pal = (pal & ~mask) | level; - clps_writel(pal, regno); - - return 0; -} - -/* - * Validate the purposed mode. - */ -static int -clps7111fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) -{ - var->transp.msb_right = 0; - var->transp.offset = 0; - var->transp.length = 0; - var->red.msb_right = 0; - var->red.offset = 0; - var->red.length = var->bits_per_pixel; - var->green = var->red; - var->blue = var->red; - - if (var->bits_per_pixel > 4) - return -EINVAL; - - return 0; -} - -/* - * Set the hardware state. - */ -static int -clps7111fb_set_par(struct fb_info *info) -{ - unsigned int lcdcon, syscon, pixclock; - - switch (info->var.bits_per_pixel) { - case 1: - info->fix.visual = FB_VISUAL_MONO01; - break; - case 2: - info->fix.visual = FB_VISUAL_PSEUDOCOLOR; - break; - case 4: - info->fix.visual = FB_VISUAL_PSEUDOCOLOR; - break; - } - - info->fix.line_length = info->var.xres_virtual * info->var.bits_per_pixel / 8; - - lcdcon = (info->var.xres_virtual * info->var.yres_virtual * info->var.bits_per_pixel) / 128 - 1; - lcdcon |= ((info->var.xres_virtual / 16) - 1) << 13; - lcdcon |= lcd_ac_prescale << 25; - - /* - * Calculate pixel prescale value from the pixclock. This is: - * 36.864MHz / pixclock_mhz - 1. - * However, pixclock is in picoseconds, so this ends up being: - * 36864000 * pixclock_ps / 10^12 - 1 - * and this will overflow the 32-bit math. We perform this as - * (9 * 4096000 == 36864000): - * pixclock_ps * 9 * (4096000 / 10^12) - 1 - */ - pixclock = 9 * info->var.pixclock / 244140 - 1; - lcdcon |= pixclock << 19; - - if (info->var.bits_per_pixel == 4) - lcdcon |= LCDCON_GSMD; - if (info->var.bits_per_pixel >= 2) - lcdcon |= LCDCON_GSEN; - - /* - * LCDCON must only be changed while the LCD is disabled - */ - syscon = clps_readl(SYSCON1); - clps_writel(syscon & ~SYSCON1_LCDEN, SYSCON1); - clps_writel(lcdcon, LCDCON); - clps_writel(syscon | SYSCON1_LCDEN, SYSCON1); - return 0; -} - -static int clps7111fb_blank(int blank, struct fb_info *info) -{ - /* Enable/Disable LCD controller. */ - if (blank) - clps_writel(clps_readl(SYSCON1) & ~SYSCON1_LCDEN, SYSCON1); - else - clps_writel(clps_readl(SYSCON1) | SYSCON1_LCDEN, SYSCON1); - - return 0; -} - -static struct fb_ops clps7111fb_ops = { - .owner = THIS_MODULE, - .fb_check_var = clps7111fb_check_var, - .fb_set_par = clps7111fb_set_par, - .fb_setcolreg = clps7111fb_setcolreg, - .fb_blank = clps7111fb_blank, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, -}; - -static void clps711x_guess_lcd_params(struct fb_info *info) -{ - unsigned int lcdcon, syscon, size; - unsigned long phys_base = PAGE_OFFSET; - void *virt_base = (void *)PAGE_OFFSET; - - info->var.xres_virtual = 640; - info->var.yres_virtual = 240; - info->var.bits_per_pixel = 4; - info->var.activate = FB_ACTIVATE_NOW; - info->var.height = -1; - info->var.width = -1; - info->var.pixclock = 93006; /* 10.752MHz pixel clock */ - - /* - * If the LCD controller is already running, decode the values - * in LCDCON to xres/yres/bpp/pixclock/acprescale - */ - syscon = clps_readl(SYSCON1); - if (syscon & SYSCON1_LCDEN) { - lcdcon = clps_readl(LCDCON); - - /* - * Decode GSMD and GSEN bits to bits per pixel - */ - switch (lcdcon & (LCDCON_GSMD | LCDCON_GSEN)) { - case LCDCON_GSMD | LCDCON_GSEN: - info->var.bits_per_pixel = 4; - break; - - case LCDCON_GSEN: - info->var.bits_per_pixel = 2; - break; - - default: - info->var.bits_per_pixel = 1; - break; - } - - /* - * Decode xres/yres - */ - info->var.xres_virtual = (((lcdcon >> 13) & 0x3f) + 1) * 16; - info->var.yres_virtual = (((lcdcon & 0x1fff) + 1) * 128) / - (info->var.xres_virtual * - info->var.bits_per_pixel); - - /* - * Calculate pixclock - */ - info->var.pixclock = (((lcdcon >> 19) & 0x3f) + 1) * 244140 / 9; - - /* - * Grab AC prescale - */ - lcd_ac_prescale = (lcdcon >> 25) & 0x1f; - } - - info->var.xres = info->var.xres_virtual; - info->var.yres = info->var.yres_virtual; - info->var.grayscale = info->var.bits_per_pixel > 1; - - size = info->var.xres * info->var.yres * info->var.bits_per_pixel / 8; - - /* - * Might be worth checking to see if we can use the on-board - * RAM if size here... - * CLPS7110 - no on-board SRAM - * EP7212 - 38400 bytes - */ - if (size <= 38400) { - printk(KERN_INFO "CLPS711xFB: could use on-board SRAM?\n"); - } - - if ((syscon & SYSCON1_LCDEN) == 0) { - /* - * The display isn't running. Ensure that - * the display memory is empty. - */ - memset(virt_base, 0, size); - } - - info->screen_base = virt_base; - info->fix.smem_start = phys_base; - info->fix.smem_len = PAGE_ALIGN(size); - info->fix.type = FB_TYPE_PACKED_PIXELS; -} - -static int clps711x_fb_probe(struct platform_device *pdev) -{ - int err = -ENOMEM; - - if (fb_get_options("clps711xfb", NULL)) - return -ENODEV; - - cfb = kzalloc(sizeof(*cfb), GFP_KERNEL); - if (!cfb) - goto out; - - strcpy(cfb->fix.id, "clps711x"); - - cfb->fbops = &clps7111fb_ops; - cfb->flags = FBINFO_DEFAULT; - - clps711x_guess_lcd_params(cfb); - - fb_alloc_cmap(&cfb->cmap, CMAP_MAX_SIZE, 0); - - err = register_framebuffer(cfb); - -out: return err; -} - -static int clps711x_fb_remove(struct platform_device *pdev) -{ - unregister_framebuffer(cfb); - kfree(cfb); - - return 0; -} - -static struct platform_driver clps711x_fb_driver = { - .driver = { - .name = "video-clps711x", - }, - .probe = clps711x_fb_probe, - .remove = clps711x_fb_remove, -}; -module_platform_driver(clps711x_fb_driver); - -MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); -MODULE_DESCRIPTION("CLPS711X framebuffer driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 852d86c1c527..dd3128990776 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -1480,8 +1480,8 @@ int of_get_fb_videomode(struct device_node *np, struct fb_videomode *fb, if (ret) return ret; - pr_debug("%pOF: got %dx%d display mode from %s\n", - np, vm.hactive, vm.vactive, np->name); + pr_debug("%pOF: got %dx%d display mode\n", + np, vm.hactive, vm.vactive); dump_fb_videomode(fb); return 0; diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index ecdcf358ad5e..901ca4ed10e9 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1473,7 +1473,7 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dp = pci_device_to_OF_node(pdev); if(dp) - printk(KERN_INFO "%s: OF name %s\n",__func__, dp->name); + printk(KERN_INFO "%s: OF name %pOFn\n",__func__, dp); else if (IS_ENABLED(CONFIG_OF)) printk(KERN_ERR "imsttfb: no OF node for pci device\n"); diff --git a/drivers/video/fbdev/leo.c b/drivers/video/fbdev/leo.c index 71862188f528..446ac3364bad 100644 --- a/drivers/video/fbdev/leo.c +++ b/drivers/video/fbdev/leo.c @@ -434,7 +434,7 @@ static int leo_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) static void leo_init_fix(struct fb_info *info, struct device_node *dp) { - strlcpy(info->fix.id, dp->name, sizeof(info->fix.id)); + snprintf(info->fix.id, sizeof(info->fix.id), "%pOFn", dp); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_TRUECOLOR; diff --git a/drivers/video/fbdev/mmp/hw/Kconfig b/drivers/video/fbdev/mmp/hw/Kconfig index c735d133895c..fcb711143fb2 100644 --- a/drivers/video/fbdev/mmp/hw/Kconfig +++ b/drivers/video/fbdev/mmp/hw/Kconfig @@ -3,7 +3,6 @@ if MMP_DISP config MMP_DISP_CONTROLLER bool "mmp display controller hw support" depends on CPU_PXA910 || CPU_MMP2 - default n help Marvell MMP display hw controller support this controller is used on Marvell PXA910 and diff --git a/drivers/video/fbdev/mmp/panel/Kconfig b/drivers/video/fbdev/mmp/panel/Kconfig index 808890f7064b..f58558795f39 100644 --- a/drivers/video/fbdev/mmp/panel/Kconfig +++ b/drivers/video/fbdev/mmp/panel/Kconfig @@ -2,6 +2,5 @@ config MMP_PANEL_TPOHVGA bool "tpohvga panel TJ032MD01BW support" depends on SPI_MASTER - default n help tpohvga panel support diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index 77c0a2f45b3b..31f769d67195 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -419,9 +419,13 @@ static void __init offb_init_fb(const char *name, var = &info->var; info->par = par; - strcpy(fix->id, "OFfb "); - strncat(fix->id, name, sizeof(fix->id) - sizeof("OFfb ")); - fix->id[sizeof(fix->id) - 1] = '\0'; + if (name) { + strcpy(fix->id, "OFfb "); + strncat(fix->id, name, sizeof(fix->id) - sizeof("OFfb ")); + fix->id[sizeof(fix->id) - 1] = '\0'; + } else + snprintf(fix->id, sizeof(fix->id), "OFfb %pOFn", dp); + var->xres = var->xres_virtual = width; var->yres = var->yres_virtual = height; @@ -644,7 +648,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) /* kludge for valkyrie */ if (strcmp(dp->name, "valkyrie") == 0) address += 0x1000; - offb_init_fb(no_real_node ? "bootx" : dp->name, + offb_init_fb(no_real_node ? "bootx" : NULL, width, height, depth, pitch, address, foreign_endian, no_real_node ? NULL : dp); } diff --git a/drivers/video/fbdev/omap/lcd_ams_delta.c b/drivers/video/fbdev/omap/lcd_ams_delta.c index e8c748a0dfe2..cddbd00cbf9f 100644 --- a/drivers/video/fbdev/omap/lcd_ams_delta.c +++ b/drivers/video/fbdev/omap/lcd_ams_delta.c @@ -24,11 +24,10 @@ #include <linux/platform_device.h> #include <linux/io.h> #include <linux/delay.h> +#include <linux/gpio/consumer.h> #include <linux/lcd.h> -#include <linux/gpio.h> #include <mach/hardware.h> -#include <mach/board-ams-delta.h> #include "omapfb.h" @@ -41,6 +40,8 @@ /* LCD class device section */ static int ams_delta_lcd; +static struct gpio_desc *gpiod_vblen; +static struct gpio_desc *gpiod_ndisp; static int ams_delta_lcd_set_power(struct lcd_device *dev, int power) { @@ -99,41 +100,17 @@ static struct lcd_ops ams_delta_lcd_ops = { /* omapfb panel section */ -static const struct gpio _gpios[] = { - { - .gpio = AMS_DELTA_GPIO_PIN_LCD_VBLEN, - .flags = GPIOF_OUT_INIT_LOW, - .label = "lcd_vblen", - }, - { - .gpio = AMS_DELTA_GPIO_PIN_LCD_NDISP, - .flags = GPIOF_OUT_INIT_LOW, - .label = "lcd_ndisp", - }, -}; - -static int ams_delta_panel_init(struct lcd_panel *panel, - struct omapfb_device *fbdev) -{ - return gpio_request_array(_gpios, ARRAY_SIZE(_gpios)); -} - -static void ams_delta_panel_cleanup(struct lcd_panel *panel) -{ - gpio_free_array(_gpios, ARRAY_SIZE(_gpios)); -} - static int ams_delta_panel_enable(struct lcd_panel *panel) { - gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_NDISP, 1); - gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_VBLEN, 1); + gpiod_set_value(gpiod_ndisp, 1); + gpiod_set_value(gpiod_vblen, 1); return 0; } static void ams_delta_panel_disable(struct lcd_panel *panel) { - gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_VBLEN, 0); - gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_NDISP, 0); + gpiod_set_value(gpiod_vblen, 0); + gpiod_set_value(gpiod_ndisp, 0); } static struct lcd_panel ams_delta_panel = { @@ -154,8 +131,6 @@ static struct lcd_panel ams_delta_panel = { .pcd = 0, .acb = 37, - .init = ams_delta_panel_init, - .cleanup = ams_delta_panel_cleanup, .enable = ams_delta_panel_enable, .disable = ams_delta_panel_disable, }; @@ -166,9 +141,23 @@ static struct lcd_panel ams_delta_panel = { static int ams_delta_panel_probe(struct platform_device *pdev) { struct lcd_device *lcd_device = NULL; -#ifdef CONFIG_LCD_CLASS_DEVICE int ret; + gpiod_vblen = devm_gpiod_get(&pdev->dev, "vblen", GPIOD_OUT_LOW); + if (IS_ERR(gpiod_vblen)) { + ret = PTR_ERR(gpiod_vblen); + dev_err(&pdev->dev, "VBLEN GPIO request failed (%d)\n", ret); + return ret; + } + + gpiod_ndisp = devm_gpiod_get(&pdev->dev, "ndisp", GPIOD_OUT_LOW); + if (IS_ERR(gpiod_ndisp)) { + ret = PTR_ERR(gpiod_ndisp); + dev_err(&pdev->dev, "NDISP GPIO request failed (%d)\n", ret); + return ret; + } + +#ifdef CONFIG_LCD_CLASS_DEVICE lcd_device = lcd_device_register("omapfb", &pdev->dev, NULL, &ams_delta_lcd_ops); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/Kconfig b/drivers/video/fbdev/omap2/omapfb/dss/Kconfig index 6d0bb27e4f85..356b89b378d4 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/Kconfig +++ b/drivers/video/fbdev/omap2/omapfb/dss/Kconfig @@ -10,7 +10,6 @@ config FB_OMAP2_DSS config FB_OMAP2_DSS_DEBUG bool "Debug support" - default n help This enables printing of debug messages. Alternatively, debug messages can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting @@ -19,7 +18,6 @@ config FB_OMAP2_DSS_DEBUG config FB_OMAP2_DSS_DEBUGFS bool "Debugfs filesystem support" depends on DEBUG_FS - default n help This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables querying about clock configuration and register configuration of dss, @@ -28,7 +26,6 @@ config FB_OMAP2_DSS_DEBUGFS config FB_OMAP2_DSS_COLLECT_IRQ_STATS bool "Collect DSS IRQ statistics" depends on FB_OMAP2_DSS_DEBUGFS - default n help Collect DSS IRQ statistics, printable via debugfs. @@ -45,7 +42,6 @@ config FB_OMAP2_DSS_DPI config FB_OMAP2_DSS_RFBI bool "RFBI support" depends on BROKEN - default n help MIPI DBI support (RFBI, Remote Framebuffer Interface, in Texas Instrument's terminology). @@ -73,7 +69,6 @@ config FB_OMAP4_DSS_HDMI config FB_OMAP5_DSS_HDMI bool "HDMI support for OMAP5" - default n select FB_OMAP2_DSS_HDMI_COMMON help HDMI Interface for OMAP5 and similar cores. This adds the High @@ -82,7 +77,6 @@ config FB_OMAP5_DSS_HDMI config FB_OMAP2_DSS_SDI bool "SDI support" - default n help SDI (Serial Display Interface) support. @@ -91,7 +85,6 @@ config FB_OMAP2_DSS_SDI config FB_OMAP2_DSS_DSI bool "DSI support" - default n help MIPI DSI (Display Serial Interface) support. diff --git a/drivers/video/fbdev/p9100.c b/drivers/video/fbdev/p9100.c index 64de5cda541d..c4283e9e95af 100644 --- a/drivers/video/fbdev/p9100.c +++ b/drivers/video/fbdev/p9100.c @@ -239,7 +239,7 @@ static int p9100_ioctl(struct fb_info *info, unsigned int cmd, static void p9100_init_fix(struct fb_info *info, int linebytes, struct device_node *dp) { - strlcpy(info->fix.id, dp->name, sizeof(info->fix.id)); + snprintf(info->fix.id, sizeof(info->fix.id), "%pOFn", dp); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c index d059d04c63ac..e31340fad3c7 100644 --- a/drivers/video/fbdev/pxa168fb.c +++ b/drivers/video/fbdev/pxa168fb.c @@ -405,9 +405,6 @@ static int pxa168fb_set_par(struct fb_info *info) struct fb_var_screeninfo *var = &info->var; struct fb_videomode mode; u32 x; - struct pxa168fb_mach_info *mi; - - mi = dev_get_platdata(fbi->dev); /* * Set additional mode info. diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c index a436d44f1b7f..01a7110e61a7 100644 --- a/drivers/video/fbdev/sbuslib.c +++ b/drivers/video/fbdev/sbuslib.c @@ -106,11 +106,11 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, struct fbtype __user *f = (struct fbtype __user *) arg; if (put_user(type, &f->fb_type) || - __put_user(info->var.yres, &f->fb_height) || - __put_user(info->var.xres, &f->fb_width) || - __put_user(fb_depth, &f->fb_depth) || - __put_user(0, &f->fb_cmsize) || - __put_user(fb_size, &f->fb_cmsize)) + put_user(info->var.yres, &f->fb_height) || + put_user(info->var.xres, &f->fb_width) || + put_user(fb_depth, &f->fb_depth) || + put_user(0, &f->fb_cmsize) || + put_user(fb_size, &f->fb_cmsize)) return -EFAULT; return 0; } @@ -125,10 +125,10 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, unsigned int index, count, i; if (get_user(index, &c->index) || - __get_user(count, &c->count) || - __get_user(ured, &c->red) || - __get_user(ugreen, &c->green) || - __get_user(ublue, &c->blue)) + get_user(count, &c->count) || + get_user(ured, &c->red) || + get_user(ugreen, &c->green) || + get_user(ublue, &c->blue)) return -EFAULT; cmap.len = 1; @@ -165,13 +165,13 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, u8 red, green, blue; if (get_user(index, &c->index) || - __get_user(count, &c->count) || - __get_user(ured, &c->red) || - __get_user(ugreen, &c->green) || - __get_user(ublue, &c->blue)) + get_user(count, &c->count) || + get_user(ured, &c->red) || + get_user(ugreen, &c->green) || + get_user(ublue, &c->blue)) return -EFAULT; - if (index + count > cmap->len) + if (index > cmap->len || count > cmap->len - index) return -EINVAL; for (i = 0; i < count; i++) { diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c index 27a2b72e50e8..a8fb41f1a258 100644 --- a/drivers/video/fbdev/sis/init301.c +++ b/drivers/video/fbdev/sis/init301.c @@ -848,9 +848,7 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) SiS_DDC2Delay(SiS_Pr, 0x4000); } - } else if((SiS_Pr->SiS_IF_DEF_LVDS == 1) /* || - (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) || - (SiS_Pr->SiS_CustomT == CUT_CLEVO1400) */ ) { /* 315 series, LVDS; Special */ + } else if (SiS_Pr->SiS_IF_DEF_LVDS == 1) { /* 315 series, LVDS; Special */ if(SiS_Pr->SiS_IF_DEF_CH70xx == 0) { PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36); diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 6439231f2db2..4061a20cfe24 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -667,9 +667,9 @@ static int ssd1307fb_probe(struct i2c_client *client, if (par->reset) { /* Reset the screen */ - gpiod_set_value(par->reset, 0); + gpiod_set_value_cansleep(par->reset, 0); udelay(4); - gpiod_set_value(par->reset, 1); + gpiod_set_value_cansleep(par->reset, 1); udelay(4); } diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index afbd6101c78e..070026a7e55a 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -916,8 +916,6 @@ static int dlfb_ops_open(struct fb_info *info, int user) dlfb->fb_count++; - kref_get(&dlfb->kref); - if (fb_defio && (info->fbdefio == NULL)) { /* enable defio at last moment if not disabled by client */ @@ -940,14 +938,17 @@ static int dlfb_ops_open(struct fb_info *info, int user) return 0; } -/* - * Called when all client interfaces to start transactions have been disabled, - * and all references to our device instance (dlfb_data) are released. - * Every transaction must have a reference, so we know are fully spun down - */ -static void dlfb_free(struct kref *kref) +static void dlfb_ops_destroy(struct fb_info *info) { - struct dlfb_data *dlfb = container_of(kref, struct dlfb_data, kref); + struct dlfb_data *dlfb = info->par; + + if (info->cmap.len != 0) + fb_dealloc_cmap(&info->cmap); + if (info->monspecs.modedb) + fb_destroy_modedb(info->monspecs.modedb); + vfree(info->screen_base); + + fb_destroy_modelist(&info->modelist); while (!list_empty(&dlfb->deferred_free)) { struct dlfb_deferred_free *d = list_entry(dlfb->deferred_free.next, struct dlfb_deferred_free, list); @@ -957,40 +958,13 @@ static void dlfb_free(struct kref *kref) } vfree(dlfb->backing_buffer); kfree(dlfb->edid); + usb_put_dev(dlfb->udev); kfree(dlfb); -} - -static void dlfb_free_framebuffer(struct dlfb_data *dlfb) -{ - struct fb_info *info = dlfb->info; - - if (info) { - unregister_framebuffer(info); - - if (info->cmap.len != 0) - fb_dealloc_cmap(&info->cmap); - if (info->monspecs.modedb) - fb_destroy_modedb(info->monspecs.modedb); - vfree(info->screen_base); - - fb_destroy_modelist(&info->modelist); - - dlfb->info = NULL; - - /* Assume info structure is freed after this point */ - framebuffer_release(info); - } - /* ref taken in probe() as part of registering framebfufer */ - kref_put(&dlfb->kref, dlfb_free); + /* Assume info structure is freed after this point */ + framebuffer_release(info); } -static void dlfb_free_framebuffer_work(struct work_struct *work) -{ - struct dlfb_data *dlfb = container_of(work, struct dlfb_data, - free_framebuffer_work.work); - dlfb_free_framebuffer(dlfb); -} /* * Assumes caller is holding info->lock mutex (for open and release at least) */ @@ -1000,10 +974,6 @@ static int dlfb_ops_release(struct fb_info *info, int user) dlfb->fb_count--; - /* We can't free fb_info here - fbmem will touch it when we return */ - if (dlfb->virtualized && (dlfb->fb_count == 0)) - schedule_delayed_work(&dlfb->free_framebuffer_work, HZ); - if ((dlfb->fb_count == 0) && (info->fbdefio)) { fb_deferred_io_cleanup(info); kfree(info->fbdefio); @@ -1013,8 +983,6 @@ static int dlfb_ops_release(struct fb_info *info, int user) dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count); - kref_put(&dlfb->kref, dlfb_free); - return 0; } @@ -1172,6 +1140,7 @@ static struct fb_ops dlfb_ops = { .fb_blank = dlfb_ops_blank, .fb_check_var = dlfb_ops_check_var, .fb_set_par = dlfb_ops_set_par, + .fb_destroy = dlfb_ops_destroy, }; @@ -1615,12 +1584,13 @@ success: return true; } -static void dlfb_init_framebuffer_work(struct work_struct *work); - static int dlfb_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { + int i; + const struct device_attribute *attr; struct dlfb_data *dlfb; + struct fb_info *info; int retval = -ENOMEM; struct usb_device *usbdev = interface_to_usbdev(intf); @@ -1631,10 +1601,9 @@ static int dlfb_usb_probe(struct usb_interface *intf, goto error; } - kref_init(&dlfb->kref); /* matching kref_put in usb .disconnect fn */ INIT_LIST_HEAD(&dlfb->deferred_free); - dlfb->udev = usbdev; + dlfb->udev = usb_get_dev(usbdev); usb_set_intfdata(intf, dlfb); dev_dbg(&intf->dev, "console enable=%d\n", console); @@ -1657,42 +1626,6 @@ static int dlfb_usb_probe(struct usb_interface *intf, } - if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) { - retval = -ENOMEM; - dev_err(&intf->dev, "unable to allocate urb list\n"); - goto error; - } - - kref_get(&dlfb->kref); /* matching kref_put in free_framebuffer_work */ - - /* We don't register a new USB class. Our client interface is dlfbev */ - - /* Workitem keep things fast & simple during USB enumeration */ - INIT_DELAYED_WORK(&dlfb->init_framebuffer_work, - dlfb_init_framebuffer_work); - schedule_delayed_work(&dlfb->init_framebuffer_work, 0); - - return 0; - -error: - if (dlfb) { - - kref_put(&dlfb->kref, dlfb_free); /* last ref from kref_init */ - - /* dev has been deallocated. Do not dereference */ - } - - return retval; -} - -static void dlfb_init_framebuffer_work(struct work_struct *work) -{ - int i, retval; - struct fb_info *info; - const struct device_attribute *attr; - struct dlfb_data *dlfb = container_of(work, struct dlfb_data, - init_framebuffer_work.work); - /* allocates framebuffer driver structure, not framebuffer memory */ info = framebuffer_alloc(0, &dlfb->udev->dev); if (!info) { @@ -1706,17 +1639,22 @@ static void dlfb_init_framebuffer_work(struct work_struct *work) dlfb->ops = dlfb_ops; info->fbops = &dlfb->ops; + INIT_LIST_HEAD(&info->modelist); + + if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) { + retval = -ENOMEM; + dev_err(&intf->dev, "unable to allocate urb list\n"); + goto error; + } + + /* We don't register a new USB class. Our client interface is dlfbev */ + retval = fb_alloc_cmap(&info->cmap, 256, 0); if (retval < 0) { dev_err(info->device, "cmap allocation failed: %d\n", retval); goto error; } - INIT_DELAYED_WORK(&dlfb->free_framebuffer_work, - dlfb_free_framebuffer_work); - - INIT_LIST_HEAD(&info->modelist); - retval = dlfb_setup_modes(dlfb, info, NULL, 0); if (retval != 0) { dev_err(info->device, @@ -1760,10 +1698,16 @@ static void dlfb_init_framebuffer_work(struct work_struct *work) dev_name(info->dev), info->var.xres, info->var.yres, ((dlfb->backing_buffer) ? info->fix.smem_len * 2 : info->fix.smem_len) >> 10); - return; + return 0; error: - dlfb_free_framebuffer(dlfb); + if (dlfb->info) { + dlfb_ops_destroy(dlfb->info); + } else if (dlfb) { + usb_put_dev(dlfb->udev); + kfree(dlfb); + } + return retval; } static void dlfb_usb_disconnect(struct usb_interface *intf) @@ -1791,20 +1735,9 @@ static void dlfb_usb_disconnect(struct usb_interface *intf) for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) device_remove_file(info->dev, &fb_device_attrs[i]); device_remove_bin_file(info->dev, &edid_attr); - unlink_framebuffer(info); } - usb_set_intfdata(intf, NULL); - dlfb->udev = NULL; - - /* if clients still have us open, will be freed on last close */ - if (dlfb->fb_count == 0) - schedule_delayed_work(&dlfb->free_framebuffer_work, 0); - - /* release reference taken by kref_init in probe() */ - kref_put(&dlfb->kref, dlfb_free); - - /* consider dlfb_data freed */ + unregister_framebuffer(info); } static struct usb_driver dlfb_driver = { diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c index 5244e93ceafc..c2e7aa103fa5 100644 --- a/drivers/video/of_display_timing.c +++ b/drivers/video/of_display_timing.c @@ -170,7 +170,7 @@ struct display_timings *of_get_display_timings(const struct device_node *np) goto entryfail; } - pr_debug("%pOF: using %s as default timing\n", np, entry->name); + pr_debug("%pOF: using %pOFn as default timing\n", np, entry); native_mode = entry; diff --git a/drivers/video/vgastate.c b/drivers/video/vgastate.c index 548c751d2415..122fb3c3ec9d 100644 --- a/drivers/video/vgastate.c +++ b/drivers/video/vgastate.c @@ -455,7 +455,7 @@ int save_vga(struct vgastate *state) return 0; } -int restore_vga (struct vgastate *state) +int restore_vga(struct vgastate *state) { if (state->vidstate == NULL) return 1; diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index d1c1f6283729..728ecd1eea30 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -41,13 +41,34 @@ #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 #define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 +#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ + __GFP_NOMEMALLOC) +/* The order of free page blocks to report to host */ +#define VIRTIO_BALLOON_FREE_PAGE_ORDER (MAX_ORDER - 1) +/* The size of a free page block in bytes */ +#define VIRTIO_BALLOON_FREE_PAGE_SIZE \ + (1 << (VIRTIO_BALLOON_FREE_PAGE_ORDER + PAGE_SHIFT)) + #ifdef CONFIG_BALLOON_COMPACTION static struct vfsmount *balloon_mnt; #endif +enum virtio_balloon_vq { + VIRTIO_BALLOON_VQ_INFLATE, + VIRTIO_BALLOON_VQ_DEFLATE, + VIRTIO_BALLOON_VQ_STATS, + VIRTIO_BALLOON_VQ_FREE_PAGE, + VIRTIO_BALLOON_VQ_MAX +}; + struct virtio_balloon { struct virtio_device *vdev; - struct virtqueue *inflate_vq, *deflate_vq, *stats_vq; + struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; + + /* Balloon's own wq for cpu-intensive work items */ + struct workqueue_struct *balloon_wq; + /* The free page reporting work item submitted to the balloon wq */ + struct work_struct report_free_page_work; /* The balloon servicing is delegated to a freezable workqueue. */ struct work_struct update_balloon_stats_work; @@ -57,6 +78,18 @@ struct virtio_balloon { spinlock_t stop_update_lock; bool stop_update; + /* The list of allocated free pages, waiting to be given back to mm */ + struct list_head free_page_list; + spinlock_t free_page_list_lock; + /* The number of free page blocks on the above list */ + unsigned long num_free_page_blocks; + /* The cmd id received from host */ + u32 cmd_id_received; + /* The cmd id that is actively in use */ + __virtio32 cmd_id_active; + /* Buffer to store the stop sign */ + __virtio32 cmd_id_stop; + /* Waiting for host to ack the pages we released. */ wait_queue_head_t acked; @@ -320,17 +353,6 @@ static void stats_handle_request(struct virtio_balloon *vb) virtqueue_kick(vq); } -static void virtballoon_changed(struct virtio_device *vdev) -{ - struct virtio_balloon *vb = vdev->priv; - unsigned long flags; - - spin_lock_irqsave(&vb->stop_update_lock, flags); - if (!vb->stop_update) - queue_work(system_freezable_wq, &vb->update_balloon_size_work); - spin_unlock_irqrestore(&vb->stop_update_lock, flags); -} - static inline s64 towards_target(struct virtio_balloon *vb) { s64 target; @@ -347,6 +369,60 @@ static inline s64 towards_target(struct virtio_balloon *vb) return target - vb->num_pages; } +/* Gives back @num_to_return blocks of free pages to mm. */ +static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, + unsigned long num_to_return) +{ + struct page *page; + unsigned long num_returned; + + spin_lock_irq(&vb->free_page_list_lock); + for (num_returned = 0; num_returned < num_to_return; num_returned++) { + page = balloon_page_pop(&vb->free_page_list); + if (!page) + break; + free_pages((unsigned long)page_address(page), + VIRTIO_BALLOON_FREE_PAGE_ORDER); + } + vb->num_free_page_blocks -= num_returned; + spin_unlock_irq(&vb->free_page_list_lock); + + return num_returned; +} + +static void virtballoon_changed(struct virtio_device *vdev) +{ + struct virtio_balloon *vb = vdev->priv; + unsigned long flags; + s64 diff = towards_target(vb); + + if (diff) { + spin_lock_irqsave(&vb->stop_update_lock, flags); + if (!vb->stop_update) + queue_work(system_freezable_wq, + &vb->update_balloon_size_work); + spin_unlock_irqrestore(&vb->stop_update_lock, flags); + } + + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { + virtio_cread(vdev, struct virtio_balloon_config, + free_page_report_cmd_id, &vb->cmd_id_received); + if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { + /* Pass ULONG_MAX to give back all the free pages */ + return_free_pages_to_mm(vb, ULONG_MAX); + } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && + vb->cmd_id_received != + virtio32_to_cpu(vdev, vb->cmd_id_active)) { + spin_lock_irqsave(&vb->stop_update_lock, flags); + if (!vb->stop_update) { + queue_work(vb->balloon_wq, + &vb->report_free_page_work); + } + spin_unlock_irqrestore(&vb->stop_update_lock, flags); + } + } +} + static void update_balloon_size(struct virtio_balloon *vb) { u32 actual = vb->num_pages; @@ -389,26 +465,44 @@ static void update_balloon_size_func(struct work_struct *work) static int init_vqs(struct virtio_balloon *vb) { - struct virtqueue *vqs[3]; - vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request }; - static const char * const names[] = { "inflate", "deflate", "stats" }; - int err, nvqs; + struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX]; + vq_callback_t *callbacks[VIRTIO_BALLOON_VQ_MAX]; + const char *names[VIRTIO_BALLOON_VQ_MAX]; + int err; /* - * We expect two virtqueues: inflate and deflate, and - * optionally stat. + * Inflateq and deflateq are used unconditionally. The names[] + * will be NULL if the related feature is not enabled, which will + * cause no allocation for the corresponding virtqueue in find_vqs. */ - nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; - err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL); + callbacks[VIRTIO_BALLOON_VQ_INFLATE] = balloon_ack; + names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate"; + callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack; + names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate"; + names[VIRTIO_BALLOON_VQ_STATS] = NULL; + names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; + + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { + names[VIRTIO_BALLOON_VQ_STATS] = "stats"; + callbacks[VIRTIO_BALLOON_VQ_STATS] = stats_request; + } + + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { + names[VIRTIO_BALLOON_VQ_FREE_PAGE] = "free_page_vq"; + callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; + } + + err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, + vqs, callbacks, names, NULL, NULL); if (err) return err; - vb->inflate_vq = vqs[0]; - vb->deflate_vq = vqs[1]; + vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE]; + vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE]; if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { struct scatterlist sg; unsigned int num_stats; - vb->stats_vq = vqs[2]; + vb->stats_vq = vqs[VIRTIO_BALLOON_VQ_STATS]; /* * Prime this virtqueue with one buffer so the hypervisor can @@ -426,9 +520,145 @@ static int init_vqs(struct virtio_balloon *vb) } virtqueue_kick(vb->stats_vq); } + + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) + vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE]; + + return 0; +} + +static int send_cmd_id_start(struct virtio_balloon *vb) +{ + struct scatterlist sg; + struct virtqueue *vq = vb->free_page_vq; + int err, unused; + + /* Detach all the used buffers from the vq */ + while (virtqueue_get_buf(vq, &unused)) + ; + + vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received); + sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); + err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); + if (!err) + virtqueue_kick(vq); + return err; +} + +static int send_cmd_id_stop(struct virtio_balloon *vb) +{ + struct scatterlist sg; + struct virtqueue *vq = vb->free_page_vq; + int err, unused; + + /* Detach all the used buffers from the vq */ + while (virtqueue_get_buf(vq, &unused)) + ; + + sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop)); + err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL); + if (!err) + virtqueue_kick(vq); + return err; +} + +static int get_free_page_and_send(struct virtio_balloon *vb) +{ + struct virtqueue *vq = vb->free_page_vq; + struct page *page; + struct scatterlist sg; + int err, unused; + void *p; + + /* Detach all the used buffers from the vq */ + while (virtqueue_get_buf(vq, &unused)) + ; + + page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG, + VIRTIO_BALLOON_FREE_PAGE_ORDER); + /* + * When the allocation returns NULL, it indicates that we have got all + * the possible free pages, so return -EINTR to stop. + */ + if (!page) + return -EINTR; + + p = page_address(page); + sg_init_one(&sg, p, VIRTIO_BALLOON_FREE_PAGE_SIZE); + /* There is always 1 entry reserved for the cmd id to use. */ + if (vq->num_free > 1) { + err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL); + if (unlikely(err)) { + free_pages((unsigned long)p, + VIRTIO_BALLOON_FREE_PAGE_ORDER); + return err; + } + virtqueue_kick(vq); + spin_lock_irq(&vb->free_page_list_lock); + balloon_page_push(&vb->free_page_list, page); + vb->num_free_page_blocks++; + spin_unlock_irq(&vb->free_page_list_lock); + } else { + /* + * The vq has no available entry to add this page block, so + * just free it. + */ + free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER); + } + + return 0; +} + +static int send_free_pages(struct virtio_balloon *vb) +{ + int err; + u32 cmd_id_active; + + while (1) { + /* + * If a stop id or a new cmd id was just received from host, + * stop the reporting. + */ + cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); + if (cmd_id_active != vb->cmd_id_received) + break; + + /* + * The free page blocks are allocated and sent to host one by + * one. + */ + err = get_free_page_and_send(vb); + if (err == -EINTR) + break; + else if (unlikely(err)) + return err; + } + return 0; } +static void report_free_page_func(struct work_struct *work) +{ + int err; + struct virtio_balloon *vb = container_of(work, struct virtio_balloon, + report_free_page_work); + struct device *dev = &vb->vdev->dev; + + /* Start by sending the received cmd id to host with an outbuf. */ + err = send_cmd_id_start(vb); + if (unlikely(err)) + dev_err(dev, "Failed to send a start id, err = %d\n", err); + + err = send_free_pages(vb); + if (unlikely(err)) + dev_err(dev, "Failed to send a free page, err = %d\n", err); + + /* End by sending a stop id to host with an outbuf. */ + err = send_cmd_id_stop(vb); + if (unlikely(err)) + dev_err(dev, "Failed to send a stop id, err = %d\n", err); +} + #ifdef CONFIG_BALLOON_COMPACTION /* * virtballoon_migratepage - perform the balloon page migration on behalf of @@ -512,14 +742,23 @@ static struct file_system_type balloon_fs = { #endif /* CONFIG_BALLOON_COMPACTION */ -static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker, - struct shrink_control *sc) +static unsigned long shrink_free_pages(struct virtio_balloon *vb, + unsigned long pages_to_free) { - unsigned long pages_to_free, pages_freed = 0; - struct virtio_balloon *vb = container_of(shrinker, - struct virtio_balloon, shrinker); + unsigned long blocks_to_free, blocks_freed; - pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE; + pages_to_free = round_up(pages_to_free, + 1 << VIRTIO_BALLOON_FREE_PAGE_ORDER); + blocks_to_free = pages_to_free >> VIRTIO_BALLOON_FREE_PAGE_ORDER; + blocks_freed = return_free_pages_to_mm(vb, blocks_to_free); + + return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER; +} + +static unsigned long shrink_balloon_pages(struct virtio_balloon *vb, + unsigned long pages_to_free) +{ + unsigned long pages_freed = 0; /* * One invocation of leak_balloon can deflate at most @@ -527,12 +766,33 @@ static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker, * multiple times to deflate pages till reaching pages_to_free. */ while (vb->num_pages && pages_to_free) { + pages_freed += leak_balloon(vb, pages_to_free) / + VIRTIO_BALLOON_PAGES_PER_PAGE; pages_to_free -= pages_freed; - pages_freed += leak_balloon(vb, pages_to_free); } update_balloon_size(vb); - return pages_freed / VIRTIO_BALLOON_PAGES_PER_PAGE; + return pages_freed; +} + +static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + unsigned long pages_to_free, pages_freed = 0; + struct virtio_balloon *vb = container_of(shrinker, + struct virtio_balloon, shrinker); + + pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE; + + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) + pages_freed = shrink_free_pages(vb, pages_to_free); + + if (pages_freed >= pages_to_free) + return pages_freed; + + pages_freed += shrink_balloon_pages(vb, pages_to_free - pages_freed); + + return pages_freed; } static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, @@ -540,8 +800,12 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, { struct virtio_balloon *vb = container_of(shrinker, struct virtio_balloon, shrinker); + unsigned long count; - return vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE; + count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE; + count += vb->num_free_page_blocks >> VIRTIO_BALLOON_FREE_PAGE_ORDER; + + return count; } static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb) @@ -561,6 +825,7 @@ static int virtio_balloon_register_shrinker(struct virtio_balloon *vb) static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; + __u32 poison_val; int err; if (!vdev->config->get) { @@ -604,6 +869,36 @@ static int virtballoon_probe(struct virtio_device *vdev) } vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops; #endif + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { + /* + * There is always one entry reserved for cmd id, so the ring + * size needs to be at least two to report free page hints. + */ + if (virtqueue_get_vring_size(vb->free_page_vq) < 2) { + err = -ENOSPC; + goto out_del_vqs; + } + vb->balloon_wq = alloc_workqueue("balloon-wq", + WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0); + if (!vb->balloon_wq) { + err = -ENOMEM; + goto out_del_vqs; + } + INIT_WORK(&vb->report_free_page_work, report_free_page_func); + vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; + vb->cmd_id_active = cpu_to_virtio32(vb->vdev, + VIRTIO_BALLOON_CMD_ID_STOP); + vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, + VIRTIO_BALLOON_CMD_ID_STOP); + vb->num_free_page_blocks = 0; + spin_lock_init(&vb->free_page_list_lock); + INIT_LIST_HEAD(&vb->free_page_list); + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) { + memset(&poison_val, PAGE_POISON, sizeof(poison_val)); + virtio_cwrite(vb->vdev, struct virtio_balloon_config, + poison_val, &poison_val); + } + } /* * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a * shrinker needs to be registered to relieve memory pressure. @@ -611,7 +906,7 @@ static int virtballoon_probe(struct virtio_device *vdev) if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) { err = virtio_balloon_register_shrinker(vb); if (err) - goto out_del_vqs; + goto out_del_balloon_wq; } virtio_device_ready(vdev); @@ -619,6 +914,9 @@ static int virtballoon_probe(struct virtio_device *vdev) virtballoon_changed(vdev); return 0; +out_del_balloon_wq: + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) + destroy_workqueue(vb->balloon_wq); out_del_vqs: vdev->config->del_vqs(vdev); out_free_vb: @@ -652,6 +950,11 @@ static void virtballoon_remove(struct virtio_device *vdev) cancel_work_sync(&vb->update_balloon_size_work); cancel_work_sync(&vb->update_balloon_stats_work); + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { + cancel_work_sync(&vb->report_free_page_work); + destroy_workqueue(vb->balloon_wq); + } + remove_common(vb); #ifdef CONFIG_BALLOON_COMPACTION if (vb->vb_dev_info.inode) @@ -695,6 +998,9 @@ static int virtballoon_restore(struct virtio_device *vdev) static int virtballoon_validate(struct virtio_device *vdev) { + if (!page_poisoning_enabled()) + __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON); + __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); return 0; } @@ -703,6 +1009,8 @@ static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST, VIRTIO_BALLOON_F_STATS_VQ, VIRTIO_BALLOON_F_DEFLATE_ON_OOM, + VIRTIO_BALLOON_F_FREE_PAGE_HINT, + VIRTIO_BALLOON_F_PAGE_POISON, }; static struct virtio_driver virtio_balloon_driver = { diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index b1092fbefa63..2e5d845b5091 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -137,13 +137,13 @@ static void pvcalls_conn_back_read(void *opaque) if (masked_prod < masked_cons) { vec[0].iov_base = data->in + masked_prod; vec[0].iov_len = wanted; - iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 1, wanted); + iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, wanted); } else { vec[0].iov_base = data->in + masked_prod; vec[0].iov_len = array_size - masked_prod; vec[1].iov_base = data->in; vec[1].iov_len = wanted - vec[0].iov_len; - iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 2, wanted); + iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, wanted); } atomic_set(&map->read, 0); @@ -195,13 +195,13 @@ static void pvcalls_conn_back_write(struct sock_mapping *map) if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) { vec[0].iov_base = data->out + pvcalls_mask(cons, array_size); vec[0].iov_len = size; - iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 1, size); + iov_iter_kvec(&msg.msg_iter, READ, vec, 1, size); } else { vec[0].iov_base = data->out + pvcalls_mask(cons, array_size); vec[0].iov_len = array_size - pvcalls_mask(cons, array_size); vec[1].iov_base = data->out; vec[1].iov_len = size - vec[0].iov_len; - iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 2, size); + iov_iter_kvec(&msg.msg_iter, READ, vec, 2, size); } atomic_set(&map->write, 0); diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index e1cbdfdb7c68..0bcbcc20f769 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -65,7 +65,7 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) if (retval == 0) return retval; - iov_iter_bvec(&to, ITER_BVEC | READ, &bvec, 1, PAGE_SIZE); + iov_iter_bvec(&to, READ, &bvec, 1, PAGE_SIZE); retval = p9_client_read(fid, page_offset(page), &to, &err); if (err) { @@ -175,7 +175,7 @@ static int v9fs_vfs_writepage_locked(struct page *page) bvec.bv_page = page; bvec.bv_offset = 0; bvec.bv_len = len; - iov_iter_bvec(&from, ITER_BVEC | WRITE, &bvec, 1, len); + iov_iter_bvec(&from, WRITE, &bvec, 1, len); /* We should have writeback_fid always set */ BUG_ON(!v9inode->writeback_fid); diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index cb6c4031af55..00745147329d 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -123,7 +123,7 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) if (rdir->tail == rdir->head) { struct iov_iter to; int n; - iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buflen); + iov_iter_kvec(&to, READ, &kvec, 1, buflen); n = p9_client_read(file->private_data, ctx->pos, &to, &err); if (err) diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c index 352abc39e891..ac8ff8ca4c11 100644 --- a/fs/9p/xattr.c +++ b/fs/9p/xattr.c @@ -32,7 +32,7 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name, struct iov_iter to; int err; - iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buffer_size); + iov_iter_kvec(&to, READ, &kvec, 1, buffer_size); attr_fid = p9_client_xattrwalk(fid, name, &attr_size); if (IS_ERR(attr_fid)) { @@ -107,7 +107,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, struct iov_iter from; int retval, err; - iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len); + iov_iter_kvec(&from, WRITE, &kvec, 1, value_len); p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n", name, value_len, flags); diff --git a/fs/afs/Kconfig b/fs/afs/Kconfig index ebba3b18e5da..701aaa9b1899 100644 --- a/fs/afs/Kconfig +++ b/fs/afs/Kconfig @@ -27,3 +27,15 @@ config AFS_FSCACHE help Say Y here if you want AFS data to be cached locally on disk through the generic filesystem cache manager + +config AFS_DEBUG_CURSOR + bool "AFS server cursor debugging" + depends on AFS_FS + help + Say Y here to cause the contents of a server cursor to be dumped to + the dmesg log if the server rotation algorithm fails to successfully + contact a server. + + See <file:Documentation/filesystems/afs.txt> for more information. + + If unsure, say N. diff --git a/fs/afs/Makefile b/fs/afs/Makefile index 546874057bd3..0738e2bf5193 100644 --- a/fs/afs/Makefile +++ b/fs/afs/Makefile @@ -17,6 +17,7 @@ kafs-y := \ file.o \ flock.o \ fsclient.o \ + fs_probe.o \ inode.o \ main.o \ misc.o \ @@ -29,9 +30,13 @@ kafs-y := \ super.o \ netdevices.o \ vlclient.o \ + vl_list.o \ + vl_probe.o \ + vl_rotate.o \ volume.o \ write.o \ - xattr.o + xattr.o \ + yfsclient.o kafs-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_AFS_FS) := kafs.o diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c index 55a756c60746..967db336d11a 100644 --- a/fs/afs/addr_list.c +++ b/fs/afs/addr_list.c @@ -64,19 +64,25 @@ struct afs_addr_list *afs_alloc_addrlist(unsigned int nr, /* * Parse a text string consisting of delimited addresses. */ -struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, - char delim, - unsigned short service, - unsigned short port) +struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *net, + const char *text, size_t len, + char delim, + unsigned short service, + unsigned short port) { + struct afs_vlserver_list *vllist; struct afs_addr_list *alist; const char *p, *end = text + len; + const char *problem; unsigned int nr = 0; + int ret = -ENOMEM; _enter("%*.*s,%c", (int)len, (int)len, text, delim); - if (!len) + if (!len) { + _leave(" = -EDESTADDRREQ [empty]"); return ERR_PTR(-EDESTADDRREQ); + } if (delim == ':' && (memchr(text, ',', len) || !memchr(text, '.', len))) delim = ','; @@ -84,18 +90,24 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, /* Count the addresses */ p = text; do { - if (!*p) - return ERR_PTR(-EINVAL); + if (!*p) { + problem = "nul"; + goto inval; + } if (*p == delim) continue; nr++; if (*p == '[') { p++; - if (p == end) - return ERR_PTR(-EINVAL); + if (p == end) { + problem = "brace1"; + goto inval; + } p = memchr(p, ']', end - p); - if (!p) - return ERR_PTR(-EINVAL); + if (!p) { + problem = "brace2"; + goto inval; + } p++; if (p >= end) break; @@ -109,10 +121,19 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, _debug("%u/%u addresses", nr, AFS_MAX_ADDRESSES); - alist = afs_alloc_addrlist(nr, service, port); - if (!alist) + vllist = afs_alloc_vlserver_list(1); + if (!vllist) return ERR_PTR(-ENOMEM); + vllist->nr_servers = 1; + vllist->servers[0].server = afs_alloc_vlserver("<dummy>", 7, AFS_VL_PORT); + if (!vllist->servers[0].server) + goto error_vl; + + alist = afs_alloc_addrlist(nr, service, AFS_VL_PORT); + if (!alist) + goto error; + /* Extract the addresses */ p = text; do { @@ -135,17 +156,21 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, break; } - if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop)) + if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop)) { family = AF_INET; - else if (in6_pton(p, q - p, (u8 *)x, -1, &stop)) + } else if (in6_pton(p, q - p, (u8 *)x, -1, &stop)) { family = AF_INET6; - else + } else { + problem = "family"; goto bad_address; + } - if (stop != q) + p = q; + if (stop != p) { + problem = "nostop"; goto bad_address; + } - p = q; if (q < end && *q == ']') p++; @@ -154,18 +179,23 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, /* Port number specification "+1234" */ xport = 0; p++; - if (p >= end || !isdigit(*p)) + if (p >= end || !isdigit(*p)) { + problem = "port"; goto bad_address; + } do { xport *= 10; xport += *p - '0'; - if (xport > 65535) + if (xport > 65535) { + problem = "pval"; goto bad_address; + } p++; } while (p < end && isdigit(*p)); } else if (*p == delim) { p++; } else { + problem = "weird"; goto bad_address; } } @@ -177,12 +207,23 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len, } while (p < end); + rcu_assign_pointer(vllist->servers[0].server->addresses, alist); _leave(" = [nr %u]", alist->nr_addrs); - return alist; + return vllist; -bad_address: - kfree(alist); +inval: + _leave(" = -EINVAL [%s %zu %*.*s]", + problem, p - text, (int)len, (int)len, text); return ERR_PTR(-EINVAL); +bad_address: + _leave(" = -EINVAL [%s %zu %*.*s]", + problem, p - text, (int)len, (int)len, text); + ret = -EINVAL; +error: + afs_put_addrlist(alist); +error_vl: + afs_put_vlserverlist(net, vllist); + return ERR_PTR(ret); } /* @@ -201,30 +242,34 @@ static int afs_cmp_addr_list(const struct afs_addr_list *a1, /* * Perform a DNS query for VL servers and build a up an address list. */ -struct afs_addr_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry) +struct afs_vlserver_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry) { - struct afs_addr_list *alist; - char *vllist = NULL; + struct afs_vlserver_list *vllist; + char *result = NULL; int ret; _enter("%s", cell->name); - ret = dns_query("afsdb", cell->name, cell->name_len, - "", &vllist, _expiry); - if (ret < 0) + ret = dns_query("afsdb", cell->name, cell->name_len, "srv=1", + &result, _expiry); + if (ret < 0) { + _leave(" = %d [dns]", ret); return ERR_PTR(ret); - - alist = afs_parse_text_addrs(vllist, strlen(vllist), ',', - VL_SERVICE, AFS_VL_PORT); - if (IS_ERR(alist)) { - kfree(vllist); - if (alist != ERR_PTR(-ENOMEM)) - pr_err("Failed to parse DNS data\n"); - return alist; } - kfree(vllist); - return alist; + if (*_expiry == 0) + *_expiry = ktime_get_real_seconds() + 60; + + if (ret > 1 && result[0] == 0) + vllist = afs_extract_vlserver_list(cell, result, ret); + else + vllist = afs_parse_text_addrs(cell->net, result, ret, ',', + VL_SERVICE, AFS_VL_PORT); + kfree(result); + if (IS_ERR(vllist) && vllist != ERR_PTR(-ENOMEM)) + pr_err("Failed to parse DNS data %ld\n", PTR_ERR(vllist)); + + return vllist; } /* @@ -258,6 +303,8 @@ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port) sizeof(alist->addrs[0]) * (alist->nr_addrs - i)); srx = &alist->addrs[i]; + srx->srx_family = AF_RXRPC; + srx->transport_type = SOCK_DGRAM; srx->transport_len = sizeof(srx->transport.sin); srx->transport.sin.sin_family = AF_INET; srx->transport.sin.sin_port = htons(port); @@ -296,6 +343,8 @@ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port) sizeof(alist->addrs[0]) * (alist->nr_addrs - i)); srx = &alist->addrs[i]; + srx->srx_family = AF_RXRPC; + srx->transport_type = SOCK_DGRAM; srx->transport_len = sizeof(srx->transport.sin6); srx->transport.sin6.sin6_family = AF_INET6; srx->transport.sin6.sin6_port = htons(port); @@ -308,25 +357,33 @@ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port) */ bool afs_iterate_addresses(struct afs_addr_cursor *ac) { - _enter("%hu+%hd", ac->start, (short)ac->index); + unsigned long set, failed; + int index; if (!ac->alist) return false; - if (ac->begun) { - ac->index++; - if (ac->index == ac->alist->nr_addrs) - ac->index = 0; + set = ac->alist->responded; + failed = ac->alist->failed; + _enter("%lx-%lx-%lx,%d", set, failed, ac->tried, ac->index); - if (ac->index == ac->start) { - ac->error = -EDESTADDRREQ; - return false; - } - } + ac->nr_iterations++; + + set &= ~(failed | ac->tried); + + if (!set) + return false; - ac->begun = true; + index = READ_ONCE(ac->alist->preferred); + if (test_bit(index, &set)) + goto selected; + + index = __ffs(set); + +selected: + ac->index = index; + set_bit(index, &ac->tried); ac->responded = false; - ac->addr = &ac->alist->addrs[ac->index]; return true; } @@ -339,53 +396,13 @@ int afs_end_cursor(struct afs_addr_cursor *ac) alist = ac->alist; if (alist) { - if (ac->responded && ac->index != ac->start) - WRITE_ONCE(alist->index, ac->index); + if (ac->responded && + ac->index != alist->preferred && + test_bit(ac->alist->preferred, &ac->tried)) + WRITE_ONCE(alist->preferred, ac->index); afs_put_addrlist(alist); + ac->alist = NULL; } - ac->addr = NULL; - ac->alist = NULL; - ac->begun = false; return ac->error; } - -/* - * Set the address cursor for iterating over VL servers. - */ -int afs_set_vl_cursor(struct afs_addr_cursor *ac, struct afs_cell *cell) -{ - struct afs_addr_list *alist; - int ret; - - if (!rcu_access_pointer(cell->vl_addrs)) { - ret = wait_on_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET, - TASK_INTERRUPTIBLE); - if (ret < 0) - return ret; - - if (!rcu_access_pointer(cell->vl_addrs) && - ktime_get_real_seconds() < cell->dns_expiry) - return cell->error; - } - - read_lock(&cell->vl_addrs_lock); - alist = rcu_dereference_protected(cell->vl_addrs, - lockdep_is_held(&cell->vl_addrs_lock)); - if (alist->nr_addrs > 0) - afs_get_addrlist(alist); - else - alist = NULL; - read_unlock(&cell->vl_addrs_lock); - - if (!alist) - return -EDESTADDRREQ; - - ac->alist = alist; - ac->addr = NULL; - ac->start = READ_ONCE(alist->index); - ac->index = ac->start; - ac->error = 0; - ac->begun = false; - return 0; -} diff --git a/fs/afs/afs.h b/fs/afs/afs.h index b4ff1f7ae4ab..d12ffb457e47 100644 --- a/fs/afs/afs.h +++ b/fs/afs/afs.h @@ -23,9 +23,9 @@ #define AFSPATHMAX 1024 /* Maximum length of a pathname plus NUL */ #define AFSOPAQUEMAX 1024 /* Maximum length of an opaque field */ -typedef unsigned afs_volid_t; -typedef unsigned afs_vnodeid_t; -typedef unsigned long long afs_dataversion_t; +typedef u64 afs_volid_t; +typedef u64 afs_vnodeid_t; +typedef u64 afs_dataversion_t; typedef enum { AFSVL_RWVOL, /* read/write volume */ @@ -52,8 +52,9 @@ typedef enum { */ struct afs_fid { afs_volid_t vid; /* volume ID */ - afs_vnodeid_t vnode; /* file index within volume */ - unsigned unique; /* unique ID number (file index version) */ + afs_vnodeid_t vnode; /* Lower 64-bits of file index within volume */ + u32 vnode_hi; /* Upper 32-bits of file index */ + u32 unique; /* unique ID number (file index version) */ }; /* @@ -67,14 +68,14 @@ typedef enum { } afs_callback_type_t; struct afs_callback { + time64_t expires_at; /* Time at which expires */ unsigned version; /* Callback version */ - unsigned expiry; /* Time at which expires */ afs_callback_type_t type; /* Type of callback */ }; struct afs_callback_break { struct afs_fid fid; /* File identifier */ - struct afs_callback cb; /* Callback details */ + //struct afs_callback cb; /* Callback details */ }; #define AFSCBMAX 50 /* maximum callbacks transferred per bulk op */ @@ -129,19 +130,18 @@ typedef u32 afs_access_t; struct afs_file_status { u64 size; /* file size */ afs_dataversion_t data_version; /* current data version */ - time_t mtime_client; /* last time client changed data */ - time_t mtime_server; /* last time server changed data */ - unsigned abort_code; /* Abort if bulk-fetching this failed */ - - afs_file_type_t type; /* file type */ - unsigned nlink; /* link count */ - u32 author; /* author ID */ - u32 owner; /* owner ID */ - u32 group; /* group ID */ + struct timespec64 mtime_client; /* Last time client changed data */ + struct timespec64 mtime_server; /* Last time server changed data */ + s64 author; /* author ID */ + s64 owner; /* owner ID */ + s64 group; /* group ID */ afs_access_t caller_access; /* access rights for authenticated caller */ afs_access_t anon_access; /* access rights for unauthenticated caller */ umode_t mode; /* UNIX mode */ + afs_file_type_t type; /* file type */ + u32 nlink; /* link count */ s32 lock_count; /* file lock count (0=UNLK -1=WRLCK +ve=#RDLCK */ + u32 abort_code; /* Abort if bulk-fetching this failed */ }; /* @@ -158,25 +158,27 @@ struct afs_file_status { * AFS volume synchronisation information */ struct afs_volsync { - time_t creation; /* volume creation time */ + time64_t creation; /* volume creation time */ }; /* * AFS volume status record */ struct afs_volume_status { - u32 vid; /* volume ID */ - u32 parent_id; /* parent volume ID */ + afs_volid_t vid; /* volume ID */ + afs_volid_t parent_id; /* parent volume ID */ u8 online; /* true if volume currently online and available */ u8 in_service; /* true if volume currently in service */ u8 blessed; /* same as in_service */ u8 needs_salvage; /* true if consistency checking required */ u32 type; /* volume type (afs_voltype_t) */ - u32 min_quota; /* minimum space set aside (blocks) */ - u32 max_quota; /* maximum space this volume may occupy (blocks) */ - u32 blocks_in_use; /* space this volume currently occupies (blocks) */ - u32 part_blocks_avail; /* space available in volume's partition */ - u32 part_max_blocks; /* size of volume's partition */ + u64 min_quota; /* minimum space set aside (blocks) */ + u64 max_quota; /* maximum space this volume may occupy (blocks) */ + u64 blocks_in_use; /* space this volume currently occupies (blocks) */ + u64 part_blocks_avail; /* space available in volume's partition */ + u64 part_max_blocks; /* size of volume's partition */ + s64 vol_copy_date; + s64 vol_backup_date; }; #define AFS_BLOCK_SIZE 1024 diff --git a/fs/afs/cache.c b/fs/afs/cache.c index b1c31ec4523a..f6d0a21e8052 100644 --- a/fs/afs/cache.c +++ b/fs/afs/cache.c @@ -49,7 +49,7 @@ static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data, struct afs_vnode *vnode = cookie_netfs_data; struct afs_vnode_cache_aux aux; - _enter("{%x,%x,%llx},%p,%u", + _enter("{%llx,%x,%llx},%p,%u", vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version, buffer, buflen); diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 5f261fbf2182..1c7955f5cdaf 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -210,12 +210,10 @@ void afs_init_callback_state(struct afs_server *server) /* * actually break a callback */ -void afs_break_callback(struct afs_vnode *vnode) +void __afs_break_callback(struct afs_vnode *vnode) { _enter(""); - write_seqlock(&vnode->cb_lock); - clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { vnode->cb_break++; @@ -230,7 +228,12 @@ void afs_break_callback(struct afs_vnode *vnode) afs_lock_may_be_available(vnode); spin_unlock(&vnode->lock); } +} +void afs_break_callback(struct afs_vnode *vnode) +{ + write_seqlock(&vnode->cb_lock); + __afs_break_callback(vnode); write_sequnlock(&vnode->cb_lock); } @@ -310,14 +313,10 @@ void afs_break_callbacks(struct afs_server *server, size_t count, /* TODO: Sort the callback break list by volume ID */ for (; count > 0; callbacks++, count--) { - _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", + _debug("- Fid { vl=%08llx n=%llu u=%u }", callbacks->fid.vid, callbacks->fid.vnode, - callbacks->fid.unique, - callbacks->cb.version, - callbacks->cb.expiry, - callbacks->cb.type - ); + callbacks->fid.unique); afs_break_one_callback(server, &callbacks->fid); } diff --git a/fs/afs/cell.c b/fs/afs/cell.c index 6127f0fcd62c..cf445dbd5f2e 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -20,6 +20,8 @@ #include "internal.h" static unsigned __read_mostly afs_cell_gc_delay = 10; +static unsigned __read_mostly afs_cell_min_ttl = 10 * 60; +static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60; static void afs_manage_cell(struct work_struct *); @@ -119,7 +121,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, */ static struct afs_cell *afs_alloc_cell(struct afs_net *net, const char *name, unsigned int namelen, - const char *vllist) + const char *addresses) { struct afs_cell *cell; int i, ret; @@ -134,7 +136,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, if (namelen == 5 && memcmp(name, "@cell", 5) == 0) return ERR_PTR(-EINVAL); - _enter("%*.*s,%s", namelen, namelen, name, vllist); + _enter("%*.*s,%s", namelen, namelen, name, addresses); cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL); if (!cell) { @@ -153,23 +155,26 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, (1 << AFS_CELL_FL_NO_LOOKUP_YET)); INIT_LIST_HEAD(&cell->proc_volumes); rwlock_init(&cell->proc_lock); - rwlock_init(&cell->vl_addrs_lock); + rwlock_init(&cell->vl_servers_lock); /* Fill in the VL server list if we were given a list of addresses to * use. */ - if (vllist) { - struct afs_addr_list *alist; - - alist = afs_parse_text_addrs(vllist, strlen(vllist), ':', - VL_SERVICE, AFS_VL_PORT); - if (IS_ERR(alist)) { - ret = PTR_ERR(alist); + if (addresses) { + struct afs_vlserver_list *vllist; + + vllist = afs_parse_text_addrs(net, + addresses, strlen(addresses), ':', + VL_SERVICE, AFS_VL_PORT); + if (IS_ERR(vllist)) { + ret = PTR_ERR(vllist); goto parse_failed; } - rcu_assign_pointer(cell->vl_addrs, alist); + rcu_assign_pointer(cell->vl_servers, vllist); cell->dns_expiry = TIME64_MAX; + } else { + cell->dns_expiry = ktime_get_real_seconds(); } _leave(" = %p", cell); @@ -356,26 +361,40 @@ int afs_cell_init(struct afs_net *net, const char *rootcell) */ static void afs_update_cell(struct afs_cell *cell) { - struct afs_addr_list *alist, *old; - time64_t now, expiry; + struct afs_vlserver_list *vllist, *old; + unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl); + unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl); + time64_t now, expiry = 0; _enter("%s", cell->name); - alist = afs_dns_query(cell, &expiry); - if (IS_ERR(alist)) { - switch (PTR_ERR(alist)) { + vllist = afs_dns_query(cell, &expiry); + + now = ktime_get_real_seconds(); + if (min_ttl > max_ttl) + max_ttl = min_ttl; + if (expiry < now + min_ttl) + expiry = now + min_ttl; + else if (expiry > now + max_ttl) + expiry = now + max_ttl; + + if (IS_ERR(vllist)) { + switch (PTR_ERR(vllist)) { case -ENODATA: - /* The DNS said that the cell does not exist */ + case -EDESTADDRREQ: + /* The DNS said that the cell does not exist or there + * weren't any addresses to be had. + */ set_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags); clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags); - cell->dns_expiry = ktime_get_real_seconds() + 61; + cell->dns_expiry = expiry; break; case -EAGAIN: case -ECONNREFUSED: default: set_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags); - cell->dns_expiry = ktime_get_real_seconds() + 10; + cell->dns_expiry = now + 10; break; } @@ -387,12 +406,12 @@ static void afs_update_cell(struct afs_cell *cell) /* Exclusion on changing vl_addrs is achieved by a * non-reentrant work item. */ - old = rcu_dereference_protected(cell->vl_addrs, true); - rcu_assign_pointer(cell->vl_addrs, alist); + old = rcu_dereference_protected(cell->vl_servers, true); + rcu_assign_pointer(cell->vl_servers, vllist); cell->dns_expiry = expiry; if (old) - afs_put_addrlist(old); + afs_put_vlserverlist(cell->net, old); } if (test_and_clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags)) @@ -414,7 +433,7 @@ static void afs_cell_destroy(struct rcu_head *rcu) ASSERTCMP(atomic_read(&cell->usage), ==, 0); - afs_put_addrlist(rcu_access_pointer(cell->vl_addrs)); + afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers)); key_put(cell->anonymous_key); kfree(cell); diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 9e51d6fe7e8f..8ee5972893ed 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -16,6 +16,7 @@ #include <linux/ip.h> #include "internal.h" #include "afs_cm.h" +#include "protocol_yfs.h" static int afs_deliver_cb_init_call_back_state(struct afs_call *); static int afs_deliver_cb_init_call_back_state3(struct afs_call *); @@ -30,6 +31,8 @@ static void SRXAFSCB_Probe(struct work_struct *); static void SRXAFSCB_ProbeUuid(struct work_struct *); static void SRXAFSCB_TellMeAboutYourself(struct work_struct *); +static int afs_deliver_yfs_cb_callback(struct afs_call *); + #define CM_NAME(name) \ const char afs_SRXCB##name##_name[] __tracepoint_string = \ "CB." #name @@ -101,12 +104,25 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { }; /* + * YFS CB.CallBack operation type + */ +static CM_NAME(YFS_CallBack); +static const struct afs_call_type afs_SRXYFSCB_CallBack = { + .name = afs_SRXCBYFS_CallBack_name, + .deliver = afs_deliver_yfs_cb_callback, + .destructor = afs_cm_destructor, + .work = SRXAFSCB_CallBack, +}; + +/* * route an incoming cache manager call * - return T if supported, F if not */ bool afs_cm_incoming_call(struct afs_call *call) { - _enter("{CB.OP %u}", call->operation_ID); + _enter("{%u, CB.OP %u}", call->service_id, call->operation_ID); + + call->epoch = rxrpc_kernel_get_epoch(call->net->socket, call->rxcall); switch (call->operation_ID) { case CBCallBack: @@ -127,12 +143,102 @@ bool afs_cm_incoming_call(struct afs_call *call) case CBTellMeAboutYourself: call->type = &afs_SRXCBTellMeAboutYourself; return true; + case YFSCBCallBack: + if (call->service_id != YFS_CM_SERVICE) + return false; + call->type = &afs_SRXYFSCB_CallBack; + return true; default: return false; } } /* + * Record a probe to the cache manager from a server. + */ +static int afs_record_cm_probe(struct afs_call *call, struct afs_server *server) +{ + _enter(""); + + if (test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags) && + !test_bit(AFS_SERVER_FL_PROBING, &server->flags)) { + if (server->cm_epoch == call->epoch) + return 0; + + if (!server->probe.said_rebooted) { + pr_notice("kAFS: FS rebooted %pU\n", &server->uuid); + server->probe.said_rebooted = true; + } + } + + spin_lock(&server->probe_lock); + + if (!test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) { + server->cm_epoch = call->epoch; + server->probe.cm_epoch = call->epoch; + goto out; + } + + if (server->probe.cm_probed && + call->epoch != server->probe.cm_epoch && + !server->probe.said_inconsistent) { + pr_notice("kAFS: FS endpoints inconsistent %pU\n", + &server->uuid); + server->probe.said_inconsistent = true; + } + + if (!server->probe.cm_probed || call->epoch == server->cm_epoch) + server->probe.cm_epoch = server->cm_epoch; + +out: + server->probe.cm_probed = true; + spin_unlock(&server->probe_lock); + return 0; +} + +/* + * Find the server record by peer address and record a probe to the cache + * manager from a server. + */ +static int afs_find_cm_server_by_peer(struct afs_call *call) +{ + struct sockaddr_rxrpc srx; + struct afs_server *server; + + rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx); + + server = afs_find_server(call->net, &srx); + if (!server) { + trace_afs_cm_no_server(call, &srx); + return 0; + } + + call->cm_server = server; + return afs_record_cm_probe(call, server); +} + +/* + * Find the server record by server UUID and record a probe to the cache + * manager from a server. + */ +static int afs_find_cm_server_by_uuid(struct afs_call *call, + struct afs_uuid *uuid) +{ + struct afs_server *server; + + rcu_read_lock(); + server = afs_find_server_by_uuid(call->net, call->request); + rcu_read_unlock(); + if (!server) { + trace_afs_cm_no_server_u(call, call->request); + return 0; + } + + call->cm_server = server; + return afs_record_cm_probe(call, server); +} + +/* * Clean up a cache manager call. */ static void afs_cm_destructor(struct afs_call *call) @@ -168,7 +274,6 @@ static void SRXAFSCB_CallBack(struct work_struct *work) static int afs_deliver_cb_callback(struct afs_call *call) { struct afs_callback_break *cb; - struct sockaddr_rxrpc srx; __be32 *bp; int ret, loop; @@ -176,32 +281,32 @@ static int afs_deliver_cb_callback(struct afs_call *call) switch (call->unmarshall) { case 0: - call->offset = 0; + afs_extract_to_tmp(call); call->unmarshall++; /* extract the FID array and its count in two steps */ case 1: _debug("extract FID count"); - ret = afs_extract_data(call, &call->tmp, 4, true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count = ntohl(call->tmp); _debug("FID count: %u", call->count); if (call->count > AFSCBMAX) - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_cb_fid_count); call->buffer = kmalloc(array3_size(call->count, 3, 4), GFP_KERNEL); if (!call->buffer) return -ENOMEM; - call->offset = 0; + afs_extract_to_buf(call, call->count * 3 * 4); call->unmarshall++; case 2: _debug("extract FID array"); - ret = afs_extract_data(call, call->buffer, - call->count * 3 * 4, true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; @@ -218,59 +323,46 @@ static int afs_deliver_cb_callback(struct afs_call *call) cb->fid.vid = ntohl(*bp++); cb->fid.vnode = ntohl(*bp++); cb->fid.unique = ntohl(*bp++); - cb->cb.type = AFSCM_CB_UNTYPED; } - call->offset = 0; + afs_extract_to_tmp(call); call->unmarshall++; /* extract the callback array and its count in two steps */ case 3: _debug("extract CB count"); - ret = afs_extract_data(call, &call->tmp, 4, true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count2 = ntohl(call->tmp); _debug("CB count: %u", call->count2); if (call->count2 != call->count && call->count2 != 0) - return afs_protocol_error(call, -EBADMSG); - call->offset = 0; + return afs_protocol_error(call, -EBADMSG, + afs_eproto_cb_count); + call->_iter = &call->iter; + iov_iter_discard(&call->iter, READ, call->count2 * 3 * 4); call->unmarshall++; case 4: - _debug("extract CB array"); - ret = afs_extract_data(call, call->buffer, - call->count2 * 3 * 4, false); + _debug("extract discard %zu/%u", + iov_iter_count(&call->iter), call->count2 * 3 * 4); + + ret = afs_extract_data(call, false); if (ret < 0) return ret; - _debug("unmarshall CB array"); - cb = call->request; - bp = call->buffer; - for (loop = call->count2; loop > 0; loop--, cb++) { - cb->cb.version = ntohl(*bp++); - cb->cb.expiry = ntohl(*bp++); - cb->cb.type = ntohl(*bp++); - } - - call->offset = 0; call->unmarshall++; case 5: break; } if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) - return -EIO; + return afs_io_error(call, afs_io_error_cm_reply); /* we'll need the file server record as that tells us which set of * vnodes to operate upon */ - rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx); - call->cm_server = afs_find_server(call->net, &srx); - if (!call->cm_server) - trace_afs_cm_no_server(call, &srx); - - return afs_queue_call_work(call); + return afs_find_cm_server_by_peer(call); } /* @@ -294,24 +386,18 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work) */ static int afs_deliver_cb_init_call_back_state(struct afs_call *call) { - struct sockaddr_rxrpc srx; int ret; _enter(""); - rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx); - - ret = afs_extract_data(call, NULL, 0, false); + afs_extract_discard(call, 0); + ret = afs_extract_data(call, false); if (ret < 0) return ret; /* we'll need the file server record as that tells us which set of * vnodes to operate upon */ - call->cm_server = afs_find_server(call->net, &srx); - if (!call->cm_server) - trace_afs_cm_no_server(call, &srx); - - return afs_queue_call_work(call); + return afs_find_cm_server_by_peer(call); } /* @@ -330,16 +416,15 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) switch (call->unmarshall) { case 0: - call->offset = 0; call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL); if (!call->buffer) return -ENOMEM; + afs_extract_to_buf(call, 11 * sizeof(__be32)); call->unmarshall++; case 1: _debug("extract UUID"); - ret = afs_extract_data(call, call->buffer, - 11 * sizeof(__be32), false); + ret = afs_extract_data(call, false); switch (ret) { case 0: break; case -EAGAIN: return 0; @@ -362,7 +447,6 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) for (loop = 0; loop < 6; loop++) r->node[loop] = ntohl(b[loop + 5]); - call->offset = 0; call->unmarshall++; case 2: @@ -370,17 +454,11 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) } if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) - return -EIO; + return afs_io_error(call, afs_io_error_cm_reply); /* we'll need the file server record as that tells us which set of * vnodes to operate upon */ - rcu_read_lock(); - call->cm_server = afs_find_server_by_uuid(call->net, call->request); - rcu_read_unlock(); - if (!call->cm_server) - trace_afs_cm_no_server_u(call, call->request); - - return afs_queue_call_work(call); + return afs_find_cm_server_by_uuid(call, call->request); } /* @@ -405,14 +483,14 @@ static int afs_deliver_cb_probe(struct afs_call *call) _enter(""); - ret = afs_extract_data(call, NULL, 0, false); + afs_extract_discard(call, 0); + ret = afs_extract_data(call, false); if (ret < 0) return ret; if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) - return -EIO; - - return afs_queue_call_work(call); + return afs_io_error(call, afs_io_error_cm_reply); + return afs_find_cm_server_by_peer(call); } /* @@ -453,16 +531,15 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call) switch (call->unmarshall) { case 0: - call->offset = 0; call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL); if (!call->buffer) return -ENOMEM; + afs_extract_to_buf(call, 11 * sizeof(__be32)); call->unmarshall++; case 1: _debug("extract UUID"); - ret = afs_extract_data(call, call->buffer, - 11 * sizeof(__be32), false); + ret = afs_extract_data(call, false); switch (ret) { case 0: break; case -EAGAIN: return 0; @@ -485,7 +562,6 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call) for (loop = 0; loop < 6; loop++) r->node[loop] = ntohl(b[loop + 5]); - call->offset = 0; call->unmarshall++; case 2: @@ -493,9 +569,8 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call) } if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) - return -EIO; - - return afs_queue_call_work(call); + return afs_io_error(call, afs_io_error_cm_reply); + return afs_find_cm_server_by_uuid(call, call->request); } /* @@ -570,12 +645,88 @@ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call) _enter(""); - ret = afs_extract_data(call, NULL, 0, false); + afs_extract_discard(call, 0); + ret = afs_extract_data(call, false); if (ret < 0) return ret; if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) - return -EIO; + return afs_io_error(call, afs_io_error_cm_reply); + return afs_find_cm_server_by_peer(call); +} + +/* + * deliver request data to a YFS CB.CallBack call + */ +static int afs_deliver_yfs_cb_callback(struct afs_call *call) +{ + struct afs_callback_break *cb; + struct yfs_xdr_YFSFid *bp; + size_t size; + int ret, loop; + + _enter("{%u}", call->unmarshall); + + switch (call->unmarshall) { + case 0: + afs_extract_to_tmp(call); + call->unmarshall++; + + /* extract the FID array and its count in two steps */ + case 1: + _debug("extract FID count"); + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + call->count = ntohl(call->tmp); + _debug("FID count: %u", call->count); + if (call->count > YFSCBMAX) + return afs_protocol_error(call, -EBADMSG, + afs_eproto_cb_fid_count); + + size = array_size(call->count, sizeof(struct yfs_xdr_YFSFid)); + call->buffer = kmalloc(size, GFP_KERNEL); + if (!call->buffer) + return -ENOMEM; + afs_extract_to_buf(call, size); + call->unmarshall++; + + case 2: + _debug("extract FID array"); + ret = afs_extract_data(call, false); + if (ret < 0) + return ret; + + _debug("unmarshall FID array"); + call->request = kcalloc(call->count, + sizeof(struct afs_callback_break), + GFP_KERNEL); + if (!call->request) + return -ENOMEM; + + cb = call->request; + bp = call->buffer; + for (loop = call->count; loop > 0; loop--, cb++) { + cb->fid.vid = xdr_to_u64(bp->volume); + cb->fid.vnode = xdr_to_u64(bp->vnode.lo); + cb->fid.vnode_hi = ntohl(bp->vnode.hi); + cb->fid.unique = ntohl(bp->vnode.unique); + bp++; + } + + afs_extract_to_tmp(call); + call->unmarshall++; + + case 3: + break; + } + + if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING)) + return afs_io_error(call, afs_io_error_cm_reply); - return afs_queue_call_work(call); + /* We'll need the file server record as that tells us which set of + * vnodes to operate upon. + */ + return afs_find_cm_server_by_peer(call); } diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 855bf2b79fed..43dea3b00c29 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -138,6 +138,7 @@ static bool afs_dir_check_page(struct afs_vnode *dvnode, struct page *page, ntohs(dbuf->blocks[tmp].hdr.magic)); trace_afs_dir_check_failed(dvnode, off, i_size); kunmap(page); + trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic); goto error; } @@ -190,9 +191,11 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key) retry: i_size = i_size_read(&dvnode->vfs_inode); if (i_size < 2048) - return ERR_PTR(-EIO); - if (i_size > 2048 * 1024) + return ERR_PTR(afs_bad(dvnode, afs_file_error_dir_small)); + if (i_size > 2048 * 1024) { + trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big); return ERR_PTR(-EFBIG); + } _enter("%llu", i_size); @@ -315,7 +318,8 @@ content_has_grown: /* * deal with one block in an AFS directory */ -static int afs_dir_iterate_block(struct dir_context *ctx, +static int afs_dir_iterate_block(struct afs_vnode *dvnode, + struct dir_context *ctx, union afs_xdr_dir_block *block, unsigned blkoff) { @@ -365,7 +369,7 @@ static int afs_dir_iterate_block(struct dir_context *ctx, " (len %u/%zu)", blkoff / sizeof(union afs_xdr_dir_block), offset, next, tmp, nlen); - return -EIO; + return afs_bad(dvnode, afs_file_error_dir_over_end); } if (!(block->hdr.bitmap[next / 8] & (1 << (next % 8)))) { @@ -373,7 +377,7 @@ static int afs_dir_iterate_block(struct dir_context *ctx, " %u unmarked extension (len %u/%zu)", blkoff / sizeof(union afs_xdr_dir_block), offset, next, tmp, nlen); - return -EIO; + return afs_bad(dvnode, afs_file_error_dir_unmarked_ext); } _debug("ENT[%zu.%u]: ext %u/%zu", @@ -442,7 +446,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, */ page = req->pages[blkoff / PAGE_SIZE]; if (!page) { - ret = -EIO; + ret = afs_bad(dvnode, afs_file_error_dir_missing_page); break; } mark_page_accessed(page); @@ -455,7 +459,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, do { dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) / sizeof(union afs_xdr_dir_block)]; - ret = afs_dir_iterate_block(ctx, dblock, blkoff); + ret = afs_dir_iterate_block(dvnode, ctx, dblock, blkoff); if (ret != 1) { kunmap(page); goto out; @@ -548,7 +552,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, } *fid = cookie.fid; - _leave(" = 0 { vn=%u u=%u }", fid->vnode, fid->unique); + _leave(" = 0 { vn=%llu u=%u }", fid->vnode, fid->unique); return 0; } @@ -826,7 +830,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, struct key *key; int ret; - _enter("{%x:%u},%p{%pd},", + _enter("{%llx:%llu},%p{%pd},", dvnode->fid.vid, dvnode->fid.vnode, dentry, dentry); ASSERTCMP(d_inode(dentry), ==, NULL); @@ -896,7 +900,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) if (d_really_is_positive(dentry)) { vnode = AFS_FS_I(d_inode(dentry)); - _enter("{v={%x:%u} n=%pd fl=%lx},", + _enter("{v={%llx:%llu} n=%pd fl=%lx},", vnode->fid.vid, vnode->fid.vnode, dentry, vnode->flags); } else { @@ -965,7 +969,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) /* if the vnode ID has changed, then the dirent points to a * different file */ if (fid.vnode != vnode->fid.vnode) { - _debug("%pd: dirent changed [%u != %u]", + _debug("%pd: dirent changed [%llu != %llu]", dentry, fid.vnode, vnode->fid.vnode); goto not_found; @@ -1085,6 +1089,7 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc, vnode = AFS_FS_I(inode); set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); + afs_vnode_commit_status(fc, vnode, 0); d_add(new_dentry, inode); } @@ -1104,7 +1109,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) mode |= S_IFDIR; - _enter("{%x:%u},{%pd},%ho", + _enter("{%llx:%llu},{%pd},%ho", dvnode->fid.vid, dvnode->fid.vnode, dentry, mode); key = afs_request_key(dvnode->volume->cell); @@ -1169,12 +1174,12 @@ static void afs_dir_remove_subdir(struct dentry *dentry) static int afs_rmdir(struct inode *dir, struct dentry *dentry) { struct afs_fs_cursor fc; - struct afs_vnode *dvnode = AFS_FS_I(dir); + struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL; struct key *key; u64 data_version = dvnode->status.data_version; int ret; - _enter("{%x:%u},{%pd}", + _enter("{%llx:%llu},{%pd}", dvnode->fid.vid, dvnode->fid.vnode, dentry); key = afs_request_key(dvnode->volume->cell); @@ -1183,11 +1188,19 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) goto error; } + /* Try to make sure we have a callback promise on the victim. */ + if (d_really_is_positive(dentry)) { + vnode = AFS_FS_I(d_inode(dentry)); + ret = afs_validate(vnode, key); + if (ret < 0) + goto error_key; + } + ret = -ERESTARTSYS; if (afs_begin_vnode_operation(&fc, dvnode, key)) { while (afs_select_fileserver(&fc)) { fc.cb_break = afs_calc_vnode_cb_break(dvnode); - afs_fs_remove(&fc, dentry->d_name.name, true, + afs_fs_remove(&fc, vnode, dentry->d_name.name, true, data_version); } @@ -1201,6 +1214,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) } } +error_key: key_put(key); error: return ret; @@ -1231,7 +1245,9 @@ static int afs_dir_remove_link(struct dentry *dentry, struct key *key, if (d_really_is_positive(dentry)) { struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); - if (dir_valid) { + if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { + /* Already done */ + } else if (dir_valid) { drop_nlink(&vnode->vfs_inode); if (vnode->vfs_inode.i_nlink == 0) { set_bit(AFS_VNODE_DELETED, &vnode->flags); @@ -1260,13 +1276,13 @@ static int afs_dir_remove_link(struct dentry *dentry, struct key *key, static int afs_unlink(struct inode *dir, struct dentry *dentry) { struct afs_fs_cursor fc; - struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; + struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL; struct key *key; unsigned long d_version = (unsigned long)dentry->d_fsdata; u64 data_version = dvnode->status.data_version; int ret; - _enter("{%x:%u},{%pd}", + _enter("{%llx:%llu},{%pd}", dvnode->fid.vid, dvnode->fid.vnode, dentry); if (dentry->d_name.len >= AFSNAMEMAX) @@ -1290,7 +1306,18 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) if (afs_begin_vnode_operation(&fc, dvnode, key)) { while (afs_select_fileserver(&fc)) { fc.cb_break = afs_calc_vnode_cb_break(dvnode); - afs_fs_remove(&fc, dentry->d_name.name, false, + + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc.cbi->server->flags) && + !test_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags)) { + yfs_fs_remove_file2(&fc, vnode, dentry->d_name.name, + data_version); + if (fc.ac.error != -ECONNABORTED || + fc.ac.abort_code != RXGEN_OPCODE) + continue; + set_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags); + } + + afs_fs_remove(&fc, vnode, dentry->d_name.name, false, data_version); } @@ -1330,7 +1357,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, mode |= S_IFREG; - _enter("{%x:%u},{%pd},%ho,", + _enter("{%llx:%llu},{%pd},%ho,", dvnode->fid.vid, dvnode->fid.vnode, dentry, mode); ret = -ENAMETOOLONG; @@ -1393,7 +1420,7 @@ static int afs_link(struct dentry *from, struct inode *dir, dvnode = AFS_FS_I(dir); data_version = dvnode->status.data_version; - _enter("{%x:%u},{%x:%u},{%pd}", + _enter("{%llx:%llu},{%llx:%llu},{%pd}", vnode->fid.vid, vnode->fid.vnode, dvnode->fid.vid, dvnode->fid.vnode, dentry); @@ -1464,7 +1491,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, u64 data_version = dvnode->status.data_version; int ret; - _enter("{%x:%u},{%pd},%s", + _enter("{%llx:%llu},{%pd},%s", dvnode->fid.vid, dvnode->fid.vnode, dentry, content); @@ -1540,7 +1567,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, orig_data_version = orig_dvnode->status.data_version; new_data_version = new_dvnode->status.data_version; - _enter("{%x:%u},{%x:%u},{%x:%u},{%pd}", + _enter("{%llx:%llu},{%llx:%llu},{%llx:%llu},{%pd}", orig_dvnode->fid.vid, orig_dvnode->fid.vnode, vnode->fid.vid, vnode->fid.vnode, new_dvnode->fid.vid, new_dvnode->fid.vnode, @@ -1607,7 +1634,7 @@ static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags) { struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host); - _enter("{{%x:%u}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, page->index); + _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, page->index); set_page_private(page, 0); ClearPagePrivate(page); diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index f29c6dade7f6..a9ba81ddf154 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -46,7 +46,7 @@ static int afs_probe_cell_name(struct dentry *dentry) return 0; } - ret = dns_query("afsdb", name, len, "", NULL, NULL); + ret = dns_query("afsdb", name, len, "srv=1", NULL, NULL); if (ret == -ENODATA) ret = -EDESTADDRREQ; return ret; @@ -62,7 +62,7 @@ struct inode *afs_try_auto_mntpt(struct dentry *dentry, struct inode *dir) struct inode *inode; int ret = -ENOENT; - _enter("%p{%pd}, {%x:%u}", + _enter("%p{%pd}, {%llx:%llu}", dentry, dentry, vnode->fid.vid, vnode->fid.vnode); if (!test_bit(AFS_VNODE_AUTOCELL, &vnode->flags)) diff --git a/fs/afs/file.c b/fs/afs/file.c index 7d4f26198573..d6bc3f5d784b 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -121,7 +121,7 @@ int afs_open(struct inode *inode, struct file *file) struct key *key; int ret; - _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode); + _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode); key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) { @@ -170,7 +170,7 @@ int afs_release(struct inode *inode, struct file *file) struct afs_vnode *vnode = AFS_FS_I(inode); struct afs_file *af = file->private_data; - _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode); + _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode); if ((file->f_mode & FMODE_WRITE)) return vfs_fsync(file, 0); @@ -228,7 +228,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de struct afs_fs_cursor fc; int ret; - _enter("%s{%x:%u.%u},%x,,,", + _enter("%s{%llx:%llu.%u},%x,,,", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, @@ -634,7 +634,7 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags) struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); unsigned long priv; - _enter("{{%x:%u}[%lu],%lx},%x", + _enter("{{%llx:%llu}[%lu],%lx},%x", vnode->fid.vid, vnode->fid.vnode, page->index, page->flags, gfp_flags); diff --git a/fs/afs/flock.c b/fs/afs/flock.c index dc62d15a964b..0568fd986821 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -29,7 +29,7 @@ static const struct file_lock_operations afs_lock_ops = { */ void afs_lock_may_be_available(struct afs_vnode *vnode) { - _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); + _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0); } @@ -76,7 +76,7 @@ static int afs_set_lock(struct afs_vnode *vnode, struct key *key, struct afs_fs_cursor fc; int ret; - _enter("%s{%x:%u.%u},%x,%u", + _enter("%s{%llx:%llu.%u},%x,%u", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, @@ -107,7 +107,7 @@ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key) struct afs_fs_cursor fc; int ret; - _enter("%s{%x:%u.%u},%x", + _enter("%s{%llx:%llu.%u},%x", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, @@ -138,7 +138,7 @@ static int afs_release_lock(struct afs_vnode *vnode, struct key *key) struct afs_fs_cursor fc; int ret; - _enter("%s{%x:%u.%u},%x", + _enter("%s{%llx:%llu.%u},%x", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, @@ -175,7 +175,7 @@ void afs_lock_work(struct work_struct *work) struct key *key; int ret; - _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); + _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); spin_lock(&vnode->lock); @@ -192,7 +192,7 @@ again: ret = afs_release_lock(vnode, vnode->lock_key); if (ret < 0) printk(KERN_WARNING "AFS:" - " Failed to release lock on {%x:%x} error %d\n", + " Failed to release lock on {%llx:%llx} error %d\n", vnode->fid.vid, vnode->fid.vnode, ret); spin_lock(&vnode->lock); @@ -229,7 +229,7 @@ again: key_put(key); if (ret < 0) - pr_warning("AFS: Failed to extend lock on {%x:%x} error %d\n", + pr_warning("AFS: Failed to extend lock on {%llx:%llx} error %d\n", vnode->fid.vid, vnode->fid.vnode, ret); spin_lock(&vnode->lock); @@ -430,7 +430,7 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl) struct key *key = afs_file_key(file); int ret; - _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); + _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); /* only whole-file locks are supported */ if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX) @@ -582,7 +582,7 @@ static int afs_do_unlk(struct file *file, struct file_lock *fl) struct afs_vnode *vnode = AFS_FS_I(locks_inode(file)); int ret; - _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); + _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); /* Flush all pending writes before doing anything with locks. */ vfs_fsync(file, 0); @@ -639,7 +639,7 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(locks_inode(file)); - _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}", + _enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}", vnode->fid.vid, vnode->fid.vnode, cmd, fl->fl_type, fl->fl_flags, (long long) fl->fl_start, (long long) fl->fl_end); @@ -662,7 +662,7 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl) { struct afs_vnode *vnode = AFS_FS_I(locks_inode(file)); - _enter("{%x:%u},%d,{t=%x,fl=%x}", + _enter("{%llx:%llu},%d,{t=%x,fl=%x}", vnode->fid.vid, vnode->fid.vnode, cmd, fl->fl_type, fl->fl_flags); diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c new file mode 100644 index 000000000000..d049cb459742 --- /dev/null +++ b/fs/afs/fs_probe.c @@ -0,0 +1,270 @@ +/* AFS fileserver probing + * + * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include <linux/sched.h> +#include <linux/slab.h> +#include "afs_fs.h" +#include "internal.h" +#include "protocol_yfs.h" + +static bool afs_fs_probe_done(struct afs_server *server) +{ + if (!atomic_dec_and_test(&server->probe_outstanding)) + return false; + + wake_up_var(&server->probe_outstanding); + clear_bit_unlock(AFS_SERVER_FL_PROBING, &server->flags); + wake_up_bit(&server->flags, AFS_SERVER_FL_PROBING); + return true; +} + +/* + * Process the result of probing a fileserver. This is called after successful + * or failed delivery of an FS.GetCapabilities operation. + */ +void afs_fileserver_probe_result(struct afs_call *call) +{ + struct afs_addr_list *alist = call->alist; + struct afs_server *server = call->reply[0]; + unsigned int server_index = (long)call->reply[1]; + unsigned int index = call->addr_ix; + unsigned int rtt = UINT_MAX; + bool have_result = false; + u64 _rtt; + int ret = call->error; + + _enter("%pU,%u", &server->uuid, index); + + spin_lock(&server->probe_lock); + + switch (ret) { + case 0: + server->probe.error = 0; + goto responded; + case -ECONNABORTED: + if (!server->probe.responded) { + server->probe.abort_code = call->abort_code; + server->probe.error = ret; + } + goto responded; + case -ENOMEM: + case -ENONET: + server->probe.local_failure = true; + afs_io_error(call, afs_io_error_fs_probe_fail); + goto out; + case -ECONNRESET: /* Responded, but call expired. */ + case -ENETUNREACH: + case -EHOSTUNREACH: + case -ECONNREFUSED: + case -ETIMEDOUT: + case -ETIME: + default: + clear_bit(index, &alist->responded); + set_bit(index, &alist->failed); + if (!server->probe.responded && + (server->probe.error == 0 || + server->probe.error == -ETIMEDOUT || + server->probe.error == -ETIME)) + server->probe.error = ret; + afs_io_error(call, afs_io_error_fs_probe_fail); + goto out; + } + +responded: + set_bit(index, &alist->responded); + clear_bit(index, &alist->failed); + + if (call->service_id == YFS_FS_SERVICE) { + server->probe.is_yfs = true; + set_bit(AFS_SERVER_FL_IS_YFS, &server->flags); + alist->addrs[index].srx_service = call->service_id; + } else { + server->probe.not_yfs = true; + if (!server->probe.is_yfs) { + clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags); + alist->addrs[index].srx_service = call->service_id; + } + } + + /* Get the RTT and scale it to fit into a 32-bit value that represents + * over a minute of time so that we can access it with one instruction + * on a 32-bit system. + */ + _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); + _rtt /= 64; + rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt; + if (rtt < server->probe.rtt) { + server->probe.rtt = rtt; + alist->preferred = index; + have_result = true; + } + + smp_wmb(); /* Set rtt before responded. */ + server->probe.responded = true; + set_bit(AFS_SERVER_FL_PROBED, &server->flags); +out: + spin_unlock(&server->probe_lock); + + _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", + server_index, index, &alist->addrs[index].transport, + (unsigned int)rtt, ret); + + have_result |= afs_fs_probe_done(server); + if (have_result) { + server->probe.have_result = true; + wake_up_var(&server->probe.have_result); + wake_up_all(&server->probe_wq); + } +} + +/* + * Probe all of a fileserver's addresses to find out the best route and to + * query its capabilities. + */ +static int afs_do_probe_fileserver(struct afs_net *net, + struct afs_server *server, + struct key *key, + unsigned int server_index) +{ + struct afs_addr_cursor ac = { + .index = 0, + }; + int ret; + + _enter("%pU", &server->uuid); + + read_lock(&server->fs_lock); + ac.alist = rcu_dereference_protected(server->addresses, + lockdep_is_held(&server->fs_lock)); + read_unlock(&server->fs_lock); + + atomic_set(&server->probe_outstanding, ac.alist->nr_addrs); + memset(&server->probe, 0, sizeof(server->probe)); + server->probe.rtt = UINT_MAX; + + for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) { + ret = afs_fs_get_capabilities(net, server, &ac, key, server_index, + true); + if (ret != -EINPROGRESS) { + afs_fs_probe_done(server); + return ret; + } + } + + return 0; +} + +/* + * Send off probes to all unprobed servers. + */ +int afs_probe_fileservers(struct afs_net *net, struct key *key, + struct afs_server_list *list) +{ + struct afs_server *server; + int i, ret; + + for (i = 0; i < list->nr_servers; i++) { + server = list->servers[i].server; + if (test_bit(AFS_SERVER_FL_PROBED, &server->flags)) + continue; + + if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &server->flags)) { + ret = afs_do_probe_fileserver(net, server, key, i); + if (ret) + return ret; + } + } + + return 0; +} + +/* + * Wait for the first as-yet untried fileserver to respond. + */ +int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried) +{ + struct wait_queue_entry *waits; + struct afs_server *server; + unsigned int rtt = UINT_MAX; + bool have_responders = false; + int pref = -1, i; + + _enter("%u,%lx", slist->nr_servers, untried); + + /* Only wait for servers that have a probe outstanding. */ + for (i = 0; i < slist->nr_servers; i++) { + if (test_bit(i, &untried)) { + server = slist->servers[i].server; + if (!test_bit(AFS_SERVER_FL_PROBING, &server->flags)) + __clear_bit(i, &untried); + if (server->probe.responded) + have_responders = true; + } + } + if (have_responders || !untried) + return 0; + + waits = kmalloc(array_size(slist->nr_servers, sizeof(*waits)), GFP_KERNEL); + if (!waits) + return -ENOMEM; + + for (i = 0; i < slist->nr_servers; i++) { + if (test_bit(i, &untried)) { + server = slist->servers[i].server; + init_waitqueue_entry(&waits[i], current); + add_wait_queue(&server->probe_wq, &waits[i]); + } + } + + for (;;) { + bool still_probing = false; + + set_current_state(TASK_INTERRUPTIBLE); + for (i = 0; i < slist->nr_servers; i++) { + if (test_bit(i, &untried)) { + server = slist->servers[i].server; + if (server->probe.responded) + goto stop; + if (test_bit(AFS_SERVER_FL_PROBING, &server->flags)) + still_probing = true; + } + } + + if (!still_probing || unlikely(signal_pending(current))) + goto stop; + schedule(); + } + +stop: + set_current_state(TASK_RUNNING); + + for (i = 0; i < slist->nr_servers; i++) { + if (test_bit(i, &untried)) { + server = slist->servers[i].server; + if (server->probe.responded && + server->probe.rtt < rtt) { + pref = i; + rtt = server->probe.rtt; + } + + remove_wait_queue(&server->probe_wq, &waits[i]); + } + } + + kfree(waits); + + if (pref == -1 && signal_pending(current)) + return -ERESTARTSYS; + + if (pref >= 0) + slist->preferred = pref; + return 0; +} diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 50929cb91732..ca08c83168f5 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -17,15 +17,10 @@ #include "internal.h" #include "afs_fs.h" #include "xdr_fs.h" +#include "protocol_yfs.h" static const struct afs_fid afs_zero_fid; -/* - * We need somewhere to discard into in case the server helpfully returns more - * than we asked for in FS.FetchData{,64}. - */ -static u8 afs_discard_buffer[64]; - static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi) { call->cbi = afs_get_cb_interest(cbi); @@ -75,8 +70,7 @@ void afs_update_inode_from_status(struct afs_vnode *vnode, struct timespec64 t; umode_t mode; - t.tv_sec = status->mtime_client; - t.tv_nsec = 0; + t = status->mtime_client; vnode->vfs_inode.i_ctime = t; vnode->vfs_inode.i_mtime = t; vnode->vfs_inode.i_atime = t; @@ -96,7 +90,7 @@ void afs_update_inode_from_status(struct afs_vnode *vnode, if (!(flags & AFS_VNODE_NOT_YET_SET)) { if (expected_version && *expected_version != status->data_version) { - _debug("vnode modified %llx on {%x:%u} [exp %llx]", + _debug("vnode modified %llx on {%llx:%llu} [exp %llx]", (unsigned long long) status->data_version, vnode->fid.vid, vnode->fid.vnode, (unsigned long long) *expected_version); @@ -170,7 +164,7 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call, if (type != status->type && vnode && !test_bit(AFS_VNODE_UNSET, &vnode->flags)) { - pr_warning("Vnode %x:%x:%x changed type %u to %u\n", + pr_warning("Vnode %llx:%llx:%x changed type %u to %u\n", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, @@ -200,8 +194,10 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call, EXTRACT_M(mode); EXTRACT_M(group); - status->mtime_client = ntohl(xdr->mtime_client); - status->mtime_server = ntohl(xdr->mtime_server); + status->mtime_client.tv_sec = ntohl(xdr->mtime_client); + status->mtime_client.tv_nsec = 0; + status->mtime_server.tv_sec = ntohl(xdr->mtime_server); + status->mtime_server.tv_nsec = 0; status->lock_count = ntohl(xdr->lock_count); size = (u64)ntohl(xdr->size_lo); @@ -233,7 +229,7 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call, bad: xdr_dump_bad(*_bp); - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); } /* @@ -273,7 +269,7 @@ static void xdr_decode_AFSCallBack(struct afs_call *call, write_seqlock(&vnode->cb_lock); - if (call->cb_break == afs_cb_break_sum(vnode, cbi)) { + if (!afs_cb_is_broken(call->cb_break, vnode, cbi)) { vnode->cb_version = ntohl(*bp++); cb_expiry = ntohl(*bp++); vnode->cb_type = ntohl(*bp++); @@ -293,13 +289,19 @@ static void xdr_decode_AFSCallBack(struct afs_call *call, *_bp = bp; } -static void xdr_decode_AFSCallBack_raw(const __be32 **_bp, +static ktime_t xdr_decode_expiry(struct afs_call *call, u32 expiry) +{ + return ktime_add_ns(call->reply_time, expiry * NSEC_PER_SEC); +} + +static void xdr_decode_AFSCallBack_raw(struct afs_call *call, + const __be32 **_bp, struct afs_callback *cb) { const __be32 *bp = *_bp; cb->version = ntohl(*bp++); - cb->expiry = ntohl(*bp++); + cb->expires_at = xdr_decode_expiry(call, ntohl(*bp++)); cb->type = ntohl(*bp++); *_bp = bp; } @@ -311,14 +313,18 @@ static void xdr_decode_AFSVolSync(const __be32 **_bp, struct afs_volsync *volsync) { const __be32 *bp = *_bp; + u32 creation; - volsync->creation = ntohl(*bp++); + creation = ntohl(*bp++); bp++; /* spare2 */ bp++; /* spare3 */ bp++; /* spare4 */ bp++; /* spare5 */ bp++; /* spare6 */ *_bp = bp; + + if (volsync) + volsync->creation = creation; } /* @@ -379,6 +385,8 @@ static void xdr_decode_AFSFetchVolumeStatus(const __be32 **_bp, vs->blocks_in_use = ntohl(*bp++); vs->part_blocks_avail = ntohl(*bp++); vs->part_max_blocks = ntohl(*bp++); + vs->vol_copy_date = 0; + vs->vol_backup_date = 0; *_bp = bp; } @@ -395,16 +403,16 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call) if (ret < 0) return ret; - _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); + _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); /* unmarshall the reply once we've received all of it */ bp = call->buffer; - if (afs_decode_status(call, &bp, &vnode->status, vnode, - &call->expected_version, NULL) < 0) - return afs_protocol_error(call, -EBADMSG); + ret = afs_decode_status(call, &bp, &vnode->status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; xdr_decode_AFSCallBack(call, vnode, &bp); - if (call->reply[1]) - xdr_decode_AFSVolSync(&bp, call->reply[1]); + xdr_decode_AFSVolSync(&bp, call->reply[1]); _leave(" = 0 [done]"); return 0; @@ -431,7 +439,10 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy struct afs_net *net = afs_v2net(vnode); __be32 *bp; - _enter(",%x,{%x:%u},,", + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_fetch_file_status(fc, volsync, new_inode); + + _enter(",%x,{%llx:%llu},,", key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus_vnode, @@ -445,6 +456,7 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy call->reply[0] = vnode; call->reply[1] = volsync; call->expected_version = new_inode ? 1 : vnode->status.data_version; + call->want_reply_time = true; /* marshall the parameters */ bp = call->request; @@ -468,139 +480,117 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) struct afs_read *req = call->reply[2]; const __be32 *bp; unsigned int size; - void *buffer; int ret; - _enter("{%u,%zu/%u;%llu/%llu}", - call->unmarshall, call->offset, call->count, - req->remain, req->actual_len); + _enter("{%u,%zu/%llu}", + call->unmarshall, iov_iter_count(&call->iter), req->actual_len); switch (call->unmarshall) { case 0: req->actual_len = 0; - call->offset = 0; + req->index = 0; + req->offset = req->pos & (PAGE_SIZE - 1); call->unmarshall++; - if (call->operation_ID != FSFETCHDATA64) { - call->unmarshall++; - goto no_msw; + if (call->operation_ID == FSFETCHDATA64) { + afs_extract_to_tmp64(call); + } else { + call->tmp_u = htonl(0); + afs_extract_to_tmp(call); } - /* extract the upper part of the returned data length of an - * FSFETCHDATA64 op (which should always be 0 using this - * client) */ - case 1: - _debug("extract data length (MSW)"); - ret = afs_extract_data(call, &call->tmp, 4, true); - if (ret < 0) - return ret; - - req->actual_len = ntohl(call->tmp); - req->actual_len <<= 32; - call->offset = 0; - call->unmarshall++; - - no_msw: /* extract the returned data length */ - case 2: + case 1: _debug("extract data length"); - ret = afs_extract_data(call, &call->tmp, 4, true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; - req->actual_len |= ntohl(call->tmp); + req->actual_len = be64_to_cpu(call->tmp64); _debug("DATA length: %llu", req->actual_len); - - req->remain = req->actual_len; - call->offset = req->pos & (PAGE_SIZE - 1); - req->index = 0; - if (req->actual_len == 0) + req->remain = min(req->len, req->actual_len); + if (req->remain == 0) goto no_more_data; + call->unmarshall++; begin_page: ASSERTCMP(req->index, <, req->nr_pages); - if (req->remain > PAGE_SIZE - call->offset) - size = PAGE_SIZE - call->offset; + if (req->remain > PAGE_SIZE - req->offset) + size = PAGE_SIZE - req->offset; else size = req->remain; - call->count = call->offset + size; - ASSERTCMP(call->count, <=, PAGE_SIZE); - req->remain -= size; + call->bvec[0].bv_len = size; + call->bvec[0].bv_offset = req->offset; + call->bvec[0].bv_page = req->pages[req->index]; + iov_iter_bvec(&call->iter, READ, call->bvec, 1, size); + ASSERTCMP(size, <=, PAGE_SIZE); /* extract the returned data */ - case 3: - _debug("extract data %llu/%llu %zu/%u", - req->remain, req->actual_len, call->offset, call->count); + case 2: + _debug("extract data %zu/%llu", + iov_iter_count(&call->iter), req->remain); - buffer = kmap(req->pages[req->index]); - ret = afs_extract_data(call, buffer, call->count, true); - kunmap(req->pages[req->index]); + ret = afs_extract_data(call, true); if (ret < 0) return ret; - if (call->offset == PAGE_SIZE) { + req->remain -= call->bvec[0].bv_len; + req->offset += call->bvec[0].bv_len; + ASSERTCMP(req->offset, <=, PAGE_SIZE); + if (req->offset == PAGE_SIZE) { + req->offset = 0; if (req->page_done) req->page_done(call, req); req->index++; - if (req->remain > 0) { - call->offset = 0; - if (req->index >= req->nr_pages) { - call->unmarshall = 4; - goto begin_discard; - } + if (req->remain > 0) goto begin_page; - } } - goto no_more_data; + + ASSERTCMP(req->remain, ==, 0); + if (req->actual_len <= req->len) + goto no_more_data; /* Discard any excess data the server gave us */ - begin_discard: - case 4: - size = min_t(loff_t, sizeof(afs_discard_buffer), req->remain); - call->count = size; - _debug("extract discard %llu/%llu %zu/%u", - req->remain, req->actual_len, call->offset, call->count); - - call->offset = 0; - ret = afs_extract_data(call, afs_discard_buffer, call->count, true); - req->remain -= call->offset; + iov_iter_discard(&call->iter, READ, req->actual_len - req->len); + call->unmarshall = 3; + case 3: + _debug("extract discard %zu/%llu", + iov_iter_count(&call->iter), req->actual_len - req->len); + + ret = afs_extract_data(call, true); if (ret < 0) return ret; - if (req->remain > 0) - goto begin_discard; no_more_data: - call->offset = 0; - call->unmarshall = 5; + call->unmarshall = 4; + afs_extract_to_buf(call, (21 + 3 + 6) * 4); /* extract the metadata */ - case 5: - ret = afs_extract_data(call, call->buffer, - (21 + 3 + 6) * 4, false); + case 4: + ret = afs_extract_data(call, false); if (ret < 0) return ret; bp = call->buffer; - if (afs_decode_status(call, &bp, &vnode->status, vnode, - &vnode->status.data_version, req) < 0) - return afs_protocol_error(call, -EBADMSG); + ret = afs_decode_status(call, &bp, &vnode->status, vnode, + &vnode->status.data_version, req); + if (ret < 0) + return ret; xdr_decode_AFSCallBack(call, vnode, &bp); - if (call->reply[1]) - xdr_decode_AFSVolSync(&bp, call->reply[1]); + xdr_decode_AFSVolSync(&bp, call->reply[1]); - call->offset = 0; call->unmarshall++; - case 6: + case 5: break; } for (; req->index < req->nr_pages; req->index++) { - if (call->count < PAGE_SIZE) + if (req->offset < PAGE_SIZE) zero_user_segment(req->pages[req->index], - call->count, PAGE_SIZE); + req->offset, PAGE_SIZE); if (req->page_done) req->page_done(call, req); - call->count = 0; + req->offset = 0; } _leave(" = 0 [done]"); @@ -653,6 +643,7 @@ static int afs_fs_fetch_data64(struct afs_fs_cursor *fc, struct afs_read *req) call->reply[1] = NULL; /* volsync */ call->reply[2] = req; call->expected_version = vnode->status.data_version; + call->want_reply_time = true; /* marshall the parameters */ bp = call->request; @@ -682,6 +673,9 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req) struct afs_net *net = afs_v2net(vnode); __be32 *bp; + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_fetch_data(fc, req); + if (upper_32_bits(req->pos) || upper_32_bits(req->len) || upper_32_bits(req->pos + req->len)) @@ -698,6 +692,7 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req) call->reply[1] = NULL; /* volsync */ call->reply[2] = req; call->expected_version = vnode->status.data_version; + call->want_reply_time = true; /* marshall the parameters */ bp = call->request; @@ -733,11 +728,14 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSFid(&bp, call->reply[1]); - if (afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL) < 0 || - afs_decode_status(call, &bp, &vnode->status, vnode, - &call->expected_version, NULL) < 0) - return afs_protocol_error(call, -EBADMSG); - xdr_decode_AFSCallBack_raw(&bp, call->reply[3]); + ret = afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL); + if (ret < 0) + return ret; + ret = afs_decode_status(call, &bp, &vnode->status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + xdr_decode_AFSCallBack_raw(call, &bp, call->reply[3]); /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ _leave(" = 0 [done]"); @@ -778,6 +776,15 @@ int afs_fs_create(struct afs_fs_cursor *fc, size_t namesz, reqsz, padsz; __be32 *bp; + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)){ + if (S_ISDIR(mode)) + return yfs_fs_make_dir(fc, name, mode, current_data_version, + newfid, newstatus, newcb); + else + return yfs_fs_create_file(fc, name, mode, current_data_version, + newfid, newstatus, newcb); + } + _enter(""); namesz = strlen(name); @@ -796,6 +803,7 @@ int afs_fs_create(struct afs_fs_cursor *fc, call->reply[2] = newstatus; call->reply[3] = newcb; call->expected_version = current_data_version + 1; + call->want_reply_time = true; /* marshall the parameters */ bp = call->request; @@ -839,9 +847,10 @@ static int afs_deliver_fs_remove(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - if (afs_decode_status(call, &bp, &vnode->status, vnode, - &call->expected_version, NULL) < 0) - return afs_protocol_error(call, -EBADMSG); + ret = afs_decode_status(call, &bp, &vnode->status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ _leave(" = 0 [done]"); @@ -868,15 +877,18 @@ static const struct afs_call_type afs_RXFSRemoveDir = { /* * remove a file or directory */ -int afs_fs_remove(struct afs_fs_cursor *fc, const char *name, bool isdir, - u64 current_data_version) +int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode, + const char *name, bool isdir, u64 current_data_version) { - struct afs_vnode *vnode = fc->vnode; + struct afs_vnode *dvnode = fc->vnode; struct afs_call *call; - struct afs_net *net = afs_v2net(vnode); + struct afs_net *net = afs_v2net(dvnode); size_t namesz, reqsz, padsz; __be32 *bp; + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_remove(fc, vnode, name, isdir, current_data_version); + _enter(""); namesz = strlen(name); @@ -890,15 +902,16 @@ int afs_fs_remove(struct afs_fs_cursor *fc, const char *name, bool isdir, return -ENOMEM; call->key = fc->key; - call->reply[0] = vnode; + call->reply[0] = dvnode; + call->reply[1] = vnode; call->expected_version = current_data_version + 1; /* marshall the parameters */ bp = call->request; *bp++ = htonl(isdir ? FSREMOVEDIR : FSREMOVEFILE); - *bp++ = htonl(vnode->fid.vid); - *bp++ = htonl(vnode->fid.vnode); - *bp++ = htonl(vnode->fid.unique); + *bp++ = htonl(dvnode->fid.vid); + *bp++ = htonl(dvnode->fid.vnode); + *bp++ = htonl(dvnode->fid.unique); *bp++ = htonl(namesz); memcpy(bp, name, namesz); bp = (void *) bp + namesz; @@ -908,7 +921,7 @@ int afs_fs_remove(struct afs_fs_cursor *fc, const char *name, bool isdir, } afs_use_fs_server(call, fc->cbi); - trace_afs_make_fs_call(call, &vnode->fid); + trace_afs_make_fs_call(call, &dvnode->fid); return afs_make_call(&fc->ac, call, GFP_NOFS, false); } @@ -929,10 +942,13 @@ static int afs_deliver_fs_link(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - if (afs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL) < 0 || - afs_decode_status(call, &bp, &dvnode->status, dvnode, - &call->expected_version, NULL) < 0) - return afs_protocol_error(call, -EBADMSG); + ret = afs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL); + if (ret < 0) + return ret; + ret = afs_decode_status(call, &bp, &dvnode->status, dvnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ _leave(" = 0 [done]"); @@ -961,6 +977,9 @@ int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode, size_t namesz, reqsz, padsz; __be32 *bp; + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_link(fc, vnode, name, current_data_version); + _enter(""); namesz = strlen(name); @@ -1016,10 +1035,13 @@ static int afs_deliver_fs_symlink(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSFid(&bp, call->reply[1]); - if (afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL) || - afs_decode_status(call, &bp, &vnode->status, vnode, - &call->expected_version, NULL) < 0) - return afs_protocol_error(call, -EBADMSG); + ret = afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL); + if (ret < 0) + return ret; + ret = afs_decode_status(call, &bp, &vnode->status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ _leave(" = 0 [done]"); @@ -1052,6 +1074,10 @@ int afs_fs_symlink(struct afs_fs_cursor *fc, size_t namesz, reqsz, padsz, c_namesz, c_padsz; __be32 *bp; + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_symlink(fc, name, contents, current_data_version, + newfid, newstatus); + _enter(""); namesz = strlen(name); @@ -1122,13 +1148,16 @@ static int afs_deliver_fs_rename(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - if (afs_decode_status(call, &bp, &orig_dvnode->status, orig_dvnode, - &call->expected_version, NULL) < 0) - return afs_protocol_error(call, -EBADMSG); - if (new_dvnode != orig_dvnode && - afs_decode_status(call, &bp, &new_dvnode->status, new_dvnode, - &call->expected_version_2, NULL) < 0) - return afs_protocol_error(call, -EBADMSG); + ret = afs_decode_status(call, &bp, &orig_dvnode->status, orig_dvnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + if (new_dvnode != orig_dvnode) { + ret = afs_decode_status(call, &bp, &new_dvnode->status, new_dvnode, + &call->expected_version_2, NULL); + if (ret < 0) + return ret; + } /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ _leave(" = 0 [done]"); @@ -1161,6 +1190,12 @@ int afs_fs_rename(struct afs_fs_cursor *fc, size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz; __be32 *bp; + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_rename(fc, orig_name, + new_dvnode, new_name, + current_orig_data_version, + current_new_data_version); + _enter(""); o_namesz = strlen(orig_name); @@ -1231,9 +1266,10 @@ static int afs_deliver_fs_store_data(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - if (afs_decode_status(call, &bp, &vnode->status, vnode, - &call->expected_version, NULL) < 0) - return afs_protocol_error(call, -EBADMSG); + ret = afs_decode_status(call, &bp, &vnode->status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ afs_pages_written_back(vnode, call); @@ -1273,7 +1309,7 @@ static int afs_fs_store_data64(struct afs_fs_cursor *fc, struct afs_net *net = afs_v2net(vnode); __be32 *bp; - _enter(",%x,{%x:%u},,", + _enter(",%x,{%llx:%llu},,", key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); call = afs_alloc_flat_call(net, &afs_RXFSStoreData64, @@ -1330,7 +1366,10 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping, loff_t size, pos, i_size; __be32 *bp; - _enter(",%x,{%x:%u},,", + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_store_data(fc, mapping, first, last, offset, to); + + _enter(",%x,{%llx:%llu},,", key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); size = (loff_t)to - (loff_t)offset; @@ -1407,9 +1446,10 @@ static int afs_deliver_fs_store_status(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - if (afs_decode_status(call, &bp, &vnode->status, vnode, - &call->expected_version, NULL) < 0) - return afs_protocol_error(call, -EBADMSG); + ret = afs_decode_status(call, &bp, &vnode->status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */ _leave(" = 0 [done]"); @@ -1451,7 +1491,7 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr) struct afs_net *net = afs_v2net(vnode); __be32 *bp; - _enter(",%x,{%x:%u},,", + _enter(",%x,{%llx:%llu},,", key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); ASSERT(attr->ia_valid & ATTR_SIZE); @@ -1498,7 +1538,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr) struct afs_net *net = afs_v2net(vnode); __be32 *bp; - _enter(",%x,{%x:%u},,", + _enter(",%x,{%llx:%llu},,", key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); ASSERT(attr->ia_valid & ATTR_SIZE); @@ -1544,10 +1584,13 @@ int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr) struct afs_net *net = afs_v2net(vnode); __be32 *bp; + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_setattr(fc, attr); + if (attr->ia_valid & ATTR_SIZE) return afs_fs_setattr_size(fc, attr); - _enter(",%x,{%x:%u},,", + _enter(",%x,{%llx:%llu},,", key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); call = afs_alloc_flat_call(net, &afs_RXFSStoreStatus, @@ -1581,164 +1624,114 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) { const __be32 *bp; char *p; + u32 size; int ret; _enter("{%u}", call->unmarshall); switch (call->unmarshall) { case 0: - call->offset = 0; call->unmarshall++; + afs_extract_to_buf(call, 12 * 4); /* extract the returned status record */ case 1: _debug("extract status"); - ret = afs_extract_data(call, call->buffer, - 12 * 4, true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; bp = call->buffer; xdr_decode_AFSFetchVolumeStatus(&bp, call->reply[1]); - call->offset = 0; call->unmarshall++; + afs_extract_to_tmp(call); /* extract the volume name length */ case 2: - ret = afs_extract_data(call, &call->tmp, 4, true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count = ntohl(call->tmp); _debug("volname length: %u", call->count); if (call->count >= AFSNAMEMAX) - return afs_protocol_error(call, -EBADMSG); - call->offset = 0; + return afs_protocol_error(call, -EBADMSG, + afs_eproto_volname_len); + size = (call->count + 3) & ~3; /* It's padded */ + afs_extract_begin(call, call->reply[2], size); call->unmarshall++; /* extract the volume name */ case 3: _debug("extract volname"); - if (call->count > 0) { - ret = afs_extract_data(call, call->reply[2], - call->count, true); - if (ret < 0) - return ret; - } + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; p = call->reply[2]; p[call->count] = 0; _debug("volname '%s'", p); - - call->offset = 0; + afs_extract_to_tmp(call); call->unmarshall++; - /* extract the volume name padding */ - if ((call->count & 3) == 0) { - call->unmarshall++; - goto no_volname_padding; - } - call->count = 4 - (call->count & 3); - - case 4: - ret = afs_extract_data(call, call->buffer, - call->count, true); - if (ret < 0) - return ret; - - call->offset = 0; - call->unmarshall++; - no_volname_padding: - /* extract the offline message length */ - case 5: - ret = afs_extract_data(call, &call->tmp, 4, true); + case 4: + ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count = ntohl(call->tmp); _debug("offline msg length: %u", call->count); if (call->count >= AFSNAMEMAX) - return afs_protocol_error(call, -EBADMSG); - call->offset = 0; + return afs_protocol_error(call, -EBADMSG, + afs_eproto_offline_msg_len); + size = (call->count + 3) & ~3; /* It's padded */ + afs_extract_begin(call, call->reply[2], size); call->unmarshall++; /* extract the offline message */ - case 6: + case 5: _debug("extract offline"); - if (call->count > 0) { - ret = afs_extract_data(call, call->reply[2], - call->count, true); - if (ret < 0) - return ret; - } + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; p = call->reply[2]; p[call->count] = 0; _debug("offline '%s'", p); - call->offset = 0; + afs_extract_to_tmp(call); call->unmarshall++; - /* extract the offline message padding */ - if ((call->count & 3) == 0) { - call->unmarshall++; - goto no_offline_padding; - } - call->count = 4 - (call->count & 3); - - case 7: - ret = afs_extract_data(call, call->buffer, - call->count, true); - if (ret < 0) - return ret; - - call->offset = 0; - call->unmarshall++; - no_offline_padding: - /* extract the message of the day length */ - case 8: - ret = afs_extract_data(call, &call->tmp, 4, true); + case 6: + ret = afs_extract_data(call, true); if (ret < 0) return ret; call->count = ntohl(call->tmp); _debug("motd length: %u", call->count); if (call->count >= AFSNAMEMAX) - return afs_protocol_error(call, -EBADMSG); - call->offset = 0; + return afs_protocol_error(call, -EBADMSG, + afs_eproto_motd_len); + size = (call->count + 3) & ~3; /* It's padded */ + afs_extract_begin(call, call->reply[2], size); call->unmarshall++; /* extract the message of the day */ - case 9: + case 7: _debug("extract motd"); - if (call->count > 0) { - ret = afs_extract_data(call, call->reply[2], - call->count, true); - if (ret < 0) - return ret; - } + ret = afs_extract_data(call, false); + if (ret < 0) + return ret; p = call->reply[2]; p[call->count] = 0; _debug("motd '%s'", p); - call->offset = 0; call->unmarshall++; - /* extract the message of the day padding */ - call->count = (4 - (call->count & 3)) & 3; - - case 10: - ret = afs_extract_data(call, call->buffer, - call->count, false); - if (ret < 0) - return ret; - - call->offset = 0; - call->unmarshall++; - case 11: + case 8: break; } @@ -1778,6 +1771,9 @@ int afs_fs_get_volume_status(struct afs_fs_cursor *fc, __be32 *bp; void *tmpbuf; + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_get_volume_status(fc, vs); + _enter(""); tmpbuf = kmalloc(AFSOPAQUEMAX, GFP_KERNEL); @@ -1867,6 +1863,9 @@ int afs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type) struct afs_net *net = afs_v2net(vnode); __be32 *bp; + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_set_lock(fc, type); + _enter(""); call = afs_alloc_flat_call(net, &afs_RXFSSetLock, 5 * 4, 6 * 4); @@ -1899,6 +1898,9 @@ int afs_fs_extend_lock(struct afs_fs_cursor *fc) struct afs_net *net = afs_v2net(vnode); __be32 *bp; + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_extend_lock(fc); + _enter(""); call = afs_alloc_flat_call(net, &afs_RXFSExtendLock, 4 * 4, 6 * 4); @@ -1930,6 +1932,9 @@ int afs_fs_release_lock(struct afs_fs_cursor *fc) struct afs_net *net = afs_v2net(vnode); __be32 *bp; + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_release_lock(fc); + _enter(""); call = afs_alloc_flat_call(net, &afs_RXFSReleaseLock, 4 * 4, 6 * 4); @@ -2004,19 +2009,16 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call) u32 count; int ret; - _enter("{%u,%zu/%u}", call->unmarshall, call->offset, call->count); + _enter("{%u,%zu}", call->unmarshall, iov_iter_count(&call->iter)); -again: switch (call->unmarshall) { case 0: - call->offset = 0; + afs_extract_to_tmp(call); call->unmarshall++; /* Extract the capabilities word count */ case 1: - ret = afs_extract_data(call, &call->tmp, - 1 * sizeof(__be32), - true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; @@ -2024,24 +2026,17 @@ again: call->count = count; call->count2 = count; - call->offset = 0; + iov_iter_discard(&call->iter, READ, count * sizeof(__be32)); call->unmarshall++; /* Extract capabilities words */ case 2: - count = min(call->count, 16U); - ret = afs_extract_data(call, call->buffer, - count * sizeof(__be32), - call->count > 16); + ret = afs_extract_data(call, false); if (ret < 0) return ret; /* TODO: Examine capabilities */ - call->count -= count; - if (call->count > 0) - goto again; - call->offset = 0; call->unmarshall++; break; } @@ -2050,6 +2045,14 @@ again: return 0; } +static void afs_destroy_fs_get_capabilities(struct afs_call *call) +{ + struct afs_server *server = call->reply[0]; + + afs_put_server(call->net, server); + afs_flat_call_destructor(call); +} + /* * FS.GetCapabilities operation type */ @@ -2057,7 +2060,8 @@ static const struct afs_call_type afs_RXFSGetCapabilities = { .name = "FS.GetCapabilities", .op = afs_FS_GetCapabilities, .deliver = afs_deliver_fs_get_capabilities, - .destructor = afs_flat_call_destructor, + .done = afs_fileserver_probe_result, + .destructor = afs_destroy_fs_get_capabilities, }; /* @@ -2067,7 +2071,9 @@ static const struct afs_call_type afs_RXFSGetCapabilities = { int afs_fs_get_capabilities(struct afs_net *net, struct afs_server *server, struct afs_addr_cursor *ac, - struct key *key) + struct key *key, + unsigned int server_index, + bool async) { struct afs_call *call; __be32 *bp; @@ -2079,6 +2085,10 @@ int afs_fs_get_capabilities(struct afs_net *net, return -ENOMEM; call->key = key; + call->reply[0] = afs_get_server(server); + call->reply[1] = (void *)(long)server_index; + call->upgrade = true; + call->want_reply_time = true; /* marshall the parameters */ bp = call->request; @@ -2086,7 +2096,7 @@ int afs_fs_get_capabilities(struct afs_net *net, /* Can't take a ref on server */ trace_afs_make_fs_call(call, NULL); - return afs_make_call(ac, call, GFP_NOFS, false); + return afs_make_call(ac, call, GFP_NOFS, async); } /* @@ -2097,7 +2107,7 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call) struct afs_file_status *status = call->reply[1]; struct afs_callback *callback = call->reply[2]; struct afs_volsync *volsync = call->reply[3]; - struct afs_vnode *vnode = call->reply[0]; + struct afs_fid *fid = call->reply[0]; const __be32 *bp; int ret; @@ -2105,21 +2115,16 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call) if (ret < 0) return ret; - _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); + _enter("{%llx:%llu}", fid->vid, fid->vnode); /* unmarshall the reply once we've received all of it */ bp = call->buffer; - afs_decode_status(call, &bp, status, vnode, - &call->expected_version, NULL); - callback[call->count].version = ntohl(bp[0]); - callback[call->count].expiry = ntohl(bp[1]); - callback[call->count].type = ntohl(bp[2]); - if (vnode) - xdr_decode_AFSCallBack(call, vnode, &bp); - else - bp += 3; - if (volsync) - xdr_decode_AFSVolSync(&bp, volsync); + ret = afs_decode_status(call, &bp, status, NULL, + &call->expected_version, NULL); + if (ret < 0) + return ret; + xdr_decode_AFSCallBack_raw(call, &bp, callback); + xdr_decode_AFSVolSync(&bp, volsync); _leave(" = 0 [done]"); return 0; @@ -2148,7 +2153,10 @@ int afs_fs_fetch_status(struct afs_fs_cursor *fc, struct afs_call *call; __be32 *bp; - _enter(",%x,{%x:%u},,", + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_fetch_status(fc, net, fid, status, callback, volsync); + + _enter(",%x,{%llx:%llu},,", key_serial(fc->key), fid->vid, fid->vnode); call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4); @@ -2158,11 +2166,12 @@ int afs_fs_fetch_status(struct afs_fs_cursor *fc, } call->key = fc->key; - call->reply[0] = NULL; /* vnode for fid[0] */ + call->reply[0] = fid; call->reply[1] = status; call->reply[2] = callback; call->reply[3] = volsync; call->expected_version = 1; /* vnode->status.data_version */ + call->want_reply_time = true; /* marshall the parameters */ bp = call->request; @@ -2193,38 +2202,40 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) switch (call->unmarshall) { case 0: - call->offset = 0; + afs_extract_to_tmp(call); call->unmarshall++; /* Extract the file status count and array in two steps */ case 1: _debug("extract status count"); - ret = afs_extract_data(call, &call->tmp, 4, true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; tmp = ntohl(call->tmp); _debug("status count: %u/%u", tmp, call->count2); if (tmp != call->count2) - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_ibulkst_count); call->count = 0; call->unmarshall++; more_counts: - call->offset = 0; + afs_extract_to_buf(call, 21 * sizeof(__be32)); case 2: _debug("extract status array %u", call->count); - ret = afs_extract_data(call, call->buffer, 21 * 4, true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; bp = call->buffer; statuses = call->reply[1]; - if (afs_decode_status(call, &bp, &statuses[call->count], - call->count == 0 ? vnode : NULL, - NULL, NULL) < 0) - return afs_protocol_error(call, -EBADMSG); + ret = afs_decode_status(call, &bp, &statuses[call->count], + call->count == 0 ? vnode : NULL, + NULL, NULL); + if (ret < 0) + return ret; call->count++; if (call->count < call->count2) @@ -2232,27 +2243,28 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->count = 0; call->unmarshall++; - call->offset = 0; + afs_extract_to_tmp(call); /* Extract the callback count and array in two steps */ case 3: _debug("extract CB count"); - ret = afs_extract_data(call, &call->tmp, 4, true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; tmp = ntohl(call->tmp); _debug("CB count: %u", tmp); if (tmp != call->count2) - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_ibulkst_cb_count); call->count = 0; call->unmarshall++; more_cbs: - call->offset = 0; + afs_extract_to_buf(call, 3 * sizeof(__be32)); case 4: _debug("extract CB array"); - ret = afs_extract_data(call, call->buffer, 3 * 4, true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; @@ -2260,7 +2272,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) bp = call->buffer; callbacks = call->reply[2]; callbacks[call->count].version = ntohl(bp[0]); - callbacks[call->count].expiry = ntohl(bp[1]); + callbacks[call->count].expires_at = xdr_decode_expiry(call, ntohl(bp[1])); callbacks[call->count].type = ntohl(bp[2]); statuses = call->reply[1]; if (call->count == 0 && vnode && statuses[0].abort_code == 0) @@ -2269,19 +2281,17 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) if (call->count < call->count2) goto more_cbs; - call->offset = 0; + afs_extract_to_buf(call, 6 * sizeof(__be32)); call->unmarshall++; case 5: - ret = afs_extract_data(call, call->buffer, 6 * 4, false); + ret = afs_extract_data(call, false); if (ret < 0) return ret; bp = call->buffer; - if (call->reply[3]) - xdr_decode_AFSVolSync(&bp, call->reply[3]); + xdr_decode_AFSVolSync(&bp, call->reply[3]); - call->offset = 0; call->unmarshall++; case 6: @@ -2317,7 +2327,11 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc, __be32 *bp; int i; - _enter(",%x,{%x:%u},%u", + if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)) + return yfs_fs_inline_bulk_status(fc, net, fids, statuses, callbacks, + nr_fids, volsync); + + _enter(",%x,{%llx:%llu},%u", key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids); call = afs_alloc_flat_call(net, &afs_RXFSInlineBulkStatus, @@ -2334,6 +2348,7 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc, call->reply[2] = callbacks; call->reply[3] = volsync; call->count2 = nr_fids; + call->want_reply_time = true; /* marshall the parameters */ bp = call->request; diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 479b7fdda124..4c6d8e1112c2 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -82,7 +82,7 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key) default: printk("kAFS: AFS vnode with undefined type\n"); read_sequnlock_excl(&vnode->cb_lock); - return afs_protocol_error(NULL, -EBADMSG); + return afs_protocol_error(NULL, -EBADMSG, afs_eproto_file_type); } inode->i_blocks = 0; @@ -100,7 +100,7 @@ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode) struct afs_fs_cursor fc; int ret; - _enter("%s,{%x:%u.%u,S=%lx}", + _enter("%s,{%llx:%llu.%u,S=%lx}", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, vnode->flags); @@ -127,9 +127,9 @@ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode) int afs_iget5_test(struct inode *inode, void *opaque) { struct afs_iget_data *data = opaque; + struct afs_vnode *vnode = AFS_FS_I(inode); - return inode->i_ino == data->fid.vnode && - inode->i_generation == data->fid.unique; + return memcmp(&vnode->fid, &data->fid, sizeof(data->fid)) == 0; } /* @@ -150,11 +150,14 @@ static int afs_iget5_set(struct inode *inode, void *opaque) struct afs_iget_data *data = opaque; struct afs_vnode *vnode = AFS_FS_I(inode); - inode->i_ino = data->fid.vnode; - inode->i_generation = data->fid.unique; vnode->fid = data->fid; vnode->volume = data->volume; + /* YFS supports 96-bit vnode IDs, but Linux only supports + * 64-bit inode numbers. + */ + inode->i_ino = data->fid.vnode; + inode->i_generation = data->fid.unique; return 0; } @@ -193,7 +196,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root) return ERR_PTR(-ENOMEM); } - _debug("GOT INODE %p { ino=%lu, vl=%x, vn=%x, u=%x }", + _debug("GOT INODE %p { ino=%lu, vl=%llx, vn=%llx, u=%x }", inode, inode->i_ino, data.fid.vid, data.fid.vnode, data.fid.unique); @@ -252,8 +255,8 @@ static void afs_get_inode_cache(struct afs_vnode *vnode) key.vnode_id = vnode->fid.vnode; key.unique = vnode->fid.unique; - key.vnode_id_ext[0] = 0; - key.vnode_id_ext[1] = 0; + key.vnode_id_ext[0] = vnode->fid.vnode >> 32; + key.vnode_id_ext[1] = vnode->fid.vnode_hi; aux.data_version = vnode->status.data_version; vnode->cache = fscache_acquire_cookie(vnode->volume->cache, @@ -277,7 +280,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key, struct inode *inode; int ret; - _enter(",{%x:%u.%u},,", fid->vid, fid->vnode, fid->unique); + _enter(",{%llx:%llu.%u},,", fid->vid, fid->vnode, fid->unique); as = sb->s_fs_info; data.volume = as->volume; @@ -289,7 +292,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key, return ERR_PTR(-ENOMEM); } - _debug("GOT INODE %p { vl=%x vn=%x, u=%x }", + _debug("GOT INODE %p { vl=%llx vn=%llx, u=%x }", inode, fid->vid, fid->vnode, fid->unique); vnode = AFS_FS_I(inode); @@ -314,11 +317,11 @@ struct inode *afs_iget(struct super_block *sb, struct key *key, * didn't give us a callback) */ vnode->cb_version = 0; vnode->cb_type = 0; - vnode->cb_expires_at = 0; + vnode->cb_expires_at = ktime_get(); } else { vnode->cb_version = cb->version; vnode->cb_type = cb->type; - vnode->cb_expires_at = cb->expiry; + vnode->cb_expires_at = cb->expires_at; vnode->cb_interest = afs_get_cb_interest(cbi); set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); } @@ -352,7 +355,7 @@ bad_inode: */ void afs_zap_data(struct afs_vnode *vnode) { - _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); + _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); #ifdef CONFIG_AFS_FSCACHE fscache_invalidate(vnode->cache); @@ -382,7 +385,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) bool valid = false; int ret; - _enter("{v={%x:%u} fl=%lx},%x", + _enter("{v={%llx:%llu} fl=%lx},%x", vnode->fid.vid, vnode->fid.vnode, vnode->flags, key_serial(key)); @@ -501,7 +504,7 @@ void afs_evict_inode(struct inode *inode) vnode = AFS_FS_I(inode); - _enter("{%x:%u.%d}", + _enter("{%llx:%llu.%d}", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); @@ -550,7 +553,7 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr) struct key *key; int ret; - _enter("{%x:%u},{n=%pd},%x", + _enter("{%llx:%llu},{n=%pd},%x", vnode->fid.vid, vnode->fid.vnode, dentry, attr->ia_valid); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 72de1f157d20..5da3b09b7518 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -22,6 +22,7 @@ #include <linux/backing-dev.h> #include <linux/uuid.h> #include <linux/mm_types.h> +#include <linux/dns_resolver.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/sock.h> @@ -75,10 +76,13 @@ struct afs_addr_list { u32 version; /* Version */ unsigned char max_addrs; unsigned char nr_addrs; - unsigned char index; /* Address currently in use */ + unsigned char preferred; /* Preferred address */ unsigned char nr_ipv4; /* Number of IPv4 addresses */ + enum dns_record_source source:8; + enum dns_lookup_status status:8; unsigned long probed; /* Mask of servers that have been probed */ - unsigned long yfs; /* Mask of servers that are YFS */ + unsigned long failed; /* Mask of addrs that failed locally/ICMP */ + unsigned long responded; /* Mask of addrs that responded */ struct sockaddr_rxrpc addrs[]; #define AFS_MAX_ADDRESSES ((unsigned int)(sizeof(unsigned long) * 8)) }; @@ -88,6 +92,7 @@ struct afs_addr_list { */ struct afs_call { const struct afs_call_type *type; /* type of call */ + struct afs_addr_list *alist; /* Address is alist[addr_ix] */ wait_queue_head_t waitq; /* processes awaiting completion */ struct work_struct async_work; /* async I/O processor */ struct work_struct work; /* actual work processor */ @@ -98,16 +103,22 @@ struct afs_call { struct afs_cb_interest *cbi; /* Callback interest for server used */ void *request; /* request data (first part) */ struct address_space *mapping; /* Pages being written from */ + struct iov_iter iter; /* Buffer iterator */ + struct iov_iter *_iter; /* Iterator currently in use */ + union { /* Convenience for ->iter */ + struct kvec kvec[1]; + struct bio_vec bvec[1]; + }; void *buffer; /* reply receive buffer */ void *reply[4]; /* Where to put the reply */ pgoff_t first; /* first page in mapping to deal with */ pgoff_t last; /* last page in mapping to deal with */ - size_t offset; /* offset into received data store */ atomic_t usage; enum afs_call_state state; spinlock_t state_lock; int error; /* error code */ u32 abort_code; /* Remote abort ID or 0 */ + u32 epoch; unsigned request_size; /* size of request data */ unsigned reply_max; /* maximum size of reply */ unsigned first_offset; /* offset into mapping[first] */ @@ -117,19 +128,28 @@ struct afs_call { unsigned count2; /* count used in unmarshalling */ }; unsigned char unmarshall; /* unmarshalling phase */ + unsigned char addr_ix; /* Address in ->alist */ bool incoming; /* T if incoming call */ bool send_pages; /* T if data from mapping should be sent */ bool need_attention; /* T if RxRPC poked us */ bool async; /* T if asynchronous */ bool ret_reply0; /* T if should return reply[0] on success */ bool upgrade; /* T to request service upgrade */ + bool want_reply_time; /* T if want reply_time */ u16 service_id; /* Actual service ID (after upgrade) */ unsigned int debug_id; /* Trace ID */ u32 operation_ID; /* operation ID for an incoming call */ u32 count; /* count for use in unmarshalling */ - __be32 tmp; /* place to extract temporary data */ + union { /* place to extract temporary data */ + struct { + __be32 tmp_u; + __be32 tmp; + } __attribute__((packed)); + __be64 tmp64; + }; afs_dataversion_t expected_version; /* Updated version expected from store */ afs_dataversion_t expected_version_2; /* 2nd updated version expected from store */ + ktime_t reply_time; /* Time of first reply packet */ }; struct afs_call_type { @@ -146,6 +166,9 @@ struct afs_call_type { /* Work function */ void (*work)(struct work_struct *work); + + /* Call done function (gets called immediately on success or failure) */ + void (*done)(struct afs_call *call); }; /* @@ -185,6 +208,7 @@ struct afs_read { refcount_t usage; unsigned int index; /* Which page we're reading into */ unsigned int nr_pages; + unsigned int offset; /* offset into current page */ void (*page_done)(struct afs_call *, struct afs_read *); struct page **pages; struct page *array[]; @@ -343,13 +367,70 @@ struct afs_cell { rwlock_t proc_lock; /* VL server list. */ - rwlock_t vl_addrs_lock; /* Lock on vl_addrs */ - struct afs_addr_list __rcu *vl_addrs; /* List of VL servers */ + rwlock_t vl_servers_lock; /* Lock on vl_servers */ + struct afs_vlserver_list __rcu *vl_servers; + u8 name_len; /* Length of name */ char name[64 + 1]; /* Cell name, case-flattened and NUL-padded */ }; /* + * Volume Location server record. + */ +struct afs_vlserver { + struct rcu_head rcu; + struct afs_addr_list __rcu *addresses; /* List of addresses for this VL server */ + unsigned long flags; +#define AFS_VLSERVER_FL_PROBED 0 /* The VL server has been probed */ +#define AFS_VLSERVER_FL_PROBING 1 /* VL server is being probed */ +#define AFS_VLSERVER_FL_IS_YFS 2 /* Server is YFS not AFS */ + rwlock_t lock; /* Lock on addresses */ + atomic_t usage; + + /* Probe state */ + wait_queue_head_t probe_wq; + atomic_t probe_outstanding; + spinlock_t probe_lock; + struct { + unsigned int rtt; /* RTT as ktime/64 */ + u32 abort_code; + short error; + bool have_result; + bool responded:1; + bool is_yfs:1; + bool not_yfs:1; + bool local_failure:1; + } probe; + + u16 port; + u16 name_len; /* Length of name */ + char name[]; /* Server name, case-flattened */ +}; + +/* + * Weighted list of Volume Location servers. + */ +struct afs_vlserver_entry { + u16 priority; /* Preference (as SRV) */ + u16 weight; /* Weight (as SRV) */ + enum dns_record_source source:8; + enum dns_lookup_status status:8; + struct afs_vlserver *server; +}; + +struct afs_vlserver_list { + struct rcu_head rcu; + atomic_t usage; + u8 nr_servers; + u8 index; /* Server currently in use */ + u8 preferred; /* Preferred server */ + enum dns_record_source source:8; + enum dns_lookup_status status:8; + rwlock_t lock; + struct afs_vlserver_entry servers[]; +}; + +/* * Cached VLDB entry. * * This is pointed to by cell->vldb_entries, indexed by name. @@ -403,8 +484,12 @@ struct afs_server { #define AFS_SERVER_FL_PROBING 6 /* Fileserver is being probed */ #define AFS_SERVER_FL_NO_IBULK 7 /* Fileserver doesn't support FS.InlineBulkStatus */ #define AFS_SERVER_FL_MAY_HAVE_CB 8 /* May have callbacks on this fileserver */ +#define AFS_SERVER_FL_IS_YFS 9 /* Server is YFS not AFS */ +#define AFS_SERVER_FL_NO_RM2 10 /* Fileserver doesn't support YFS.RemoveFile2 */ +#define AFS_SERVER_FL_HAVE_EPOCH 11 /* ->epoch is valid */ atomic_t usage; u32 addr_version; /* Address list version */ + u32 cm_epoch; /* Server RxRPC epoch */ /* file service access */ rwlock_t fs_lock; /* access lock */ @@ -413,6 +498,26 @@ struct afs_server { struct hlist_head cb_volumes; /* List of volume interests on this server */ unsigned cb_s_break; /* Break-everything counter. */ rwlock_t cb_break_lock; /* Volume finding lock */ + + /* Probe state */ + wait_queue_head_t probe_wq; + atomic_t probe_outstanding; + spinlock_t probe_lock; + struct { + unsigned int rtt; /* RTT as ktime/64 */ + u32 abort_code; + u32 cm_epoch; + short error; + bool have_result; + bool responded:1; + bool is_yfs:1; + bool not_yfs:1; + bool local_failure:1; + bool no_epoch:1; + bool cm_probed:1; + bool said_rebooted:1; + bool said_inconsistent:1; + } probe; }; /* @@ -447,8 +552,8 @@ struct afs_server_entry { struct afs_server_list { refcount_t usage; - unsigned short nr_servers; - unsigned short index; /* Server currently in use */ + unsigned char nr_servers; + unsigned char preferred; /* Preferred server */ unsigned short vnovol_mask; /* Servers to be skipped due to VNOVOL */ unsigned int seq; /* Set to ->servers_seq when installed */ rwlock_t lock; @@ -550,6 +655,15 @@ struct afs_vnode { afs_callback_type_t cb_type; /* type of callback */ }; +static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode) +{ +#ifdef CONFIG_AFS_FSCACHE + return vnode->cache; +#else + return NULL; +#endif +} + /* * cached security record for one user's attempt to access a vnode */ @@ -586,13 +700,31 @@ struct afs_interface { */ struct afs_addr_cursor { struct afs_addr_list *alist; /* Current address list (pins ref) */ - struct sockaddr_rxrpc *addr; + unsigned long tried; /* Tried addresses */ + signed char index; /* Current address */ + bool responded; /* T if the current address responded */ + unsigned short nr_iterations; /* Number of address iterations */ + short error; u32 abort_code; - unsigned short start; /* Starting point in alist->addrs[] */ - unsigned short index; /* Wrapping offset from start to current addr */ +}; + +/* + * Cursor for iterating over a set of volume location servers. + */ +struct afs_vl_cursor { + struct afs_addr_cursor ac; + struct afs_cell *cell; /* The cell we're querying */ + struct afs_vlserver_list *server_list; /* Current server list (pins ref) */ + struct afs_vlserver *server; /* Server on which this resides */ + struct key *key; /* Key for the server */ + unsigned long untried; /* Bitmask of untried servers */ + short index; /* Current server */ short error; - bool begun; /* T if we've begun iteration */ - bool responded; /* T if the current address responded */ + unsigned short flags; +#define AFS_VL_CURSOR_STOP 0x0001 /* Set to cease iteration */ +#define AFS_VL_CURSOR_RETRY 0x0002 /* Set to do a retry */ +#define AFS_VL_CURSOR_RETRIED 0x0004 /* Set if started a retry */ + unsigned short nr_iterations; /* Number of server iterations */ }; /* @@ -604,10 +736,11 @@ struct afs_fs_cursor { struct afs_server_list *server_list; /* Current server list (pins ref) */ struct afs_cb_interest *cbi; /* Server on which this resides (pins ref) */ struct key *key; /* Key for the server */ + unsigned long untried; /* Bitmask of untried servers */ unsigned int cb_break; /* cb_break + cb_s_break before the call */ unsigned int cb_break_2; /* cb_break + cb_s_break (2nd vnode) */ - unsigned char start; /* Initial index in server list */ - unsigned char index; /* Number of servers tried beyond start */ + short index; /* Current server */ + short error; unsigned short flags; #define AFS_FS_CURSOR_STOP 0x0001 /* Set to cease iteration */ #define AFS_FS_CURSOR_VBUSY 0x0002 /* Set if seen VBUSY */ @@ -615,6 +748,7 @@ struct afs_fs_cursor { #define AFS_FS_CURSOR_VNOVOL 0x0008 /* Set if seen VNOVOL */ #define AFS_FS_CURSOR_CUR_ONLY 0x0010 /* Set if current server only (file lock held) */ #define AFS_FS_CURSOR_NO_VSLEEP 0x0020 /* Set to prevent sleep on VBUSY, VOFFLINE, ... */ + unsigned short nr_iterations; /* Number of server iterations */ }; /* @@ -640,12 +774,12 @@ extern struct afs_addr_list *afs_alloc_addrlist(unsigned int, unsigned short, unsigned short); extern void afs_put_addrlist(struct afs_addr_list *); -extern struct afs_addr_list *afs_parse_text_addrs(const char *, size_t, char, - unsigned short, unsigned short); -extern struct afs_addr_list *afs_dns_query(struct afs_cell *, time64_t *); +extern struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *, + const char *, size_t, char, + unsigned short, unsigned short); +extern struct afs_vlserver_list *afs_dns_query(struct afs_cell *, time64_t *); extern bool afs_iterate_addresses(struct afs_addr_cursor *); extern int afs_end_cursor(struct afs_addr_cursor *); -extern int afs_set_vl_cursor(struct afs_addr_cursor *, struct afs_cell *); extern void afs_merge_fs_addr4(struct afs_addr_list *, __be32, u16); extern void afs_merge_fs_addr6(struct afs_addr_list *, __be32 *, u16); @@ -668,6 +802,7 @@ extern struct fscache_cookie_def afs_vnode_cache_index_def; * callback.c */ extern void afs_init_callback_state(struct afs_server *); +extern void __afs_break_callback(struct afs_vnode *); extern void afs_break_callback(struct afs_vnode *); extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break*); @@ -688,10 +823,13 @@ static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode) return vnode->cb_break + vnode->cb_s_break + vnode->cb_v_break; } -static inline unsigned int afs_cb_break_sum(struct afs_vnode *vnode, - struct afs_cb_interest *cbi) +static inline bool afs_cb_is_broken(unsigned int cb_break, + const struct afs_vnode *vnode, + const struct afs_cb_interest *cbi) { - return vnode->cb_break + cbi->server->cb_s_break + vnode->volume->cb_v_break; + return !cbi || cb_break != (vnode->cb_break + + cbi->server->cb_s_break + + vnode->volume->cb_v_break); } /* @@ -781,7 +919,7 @@ extern int afs_fs_give_up_callbacks(struct afs_net *, struct afs_server *); extern int afs_fs_fetch_data(struct afs_fs_cursor *, struct afs_read *); extern int afs_fs_create(struct afs_fs_cursor *, const char *, umode_t, u64, struct afs_fid *, struct afs_file_status *, struct afs_callback *); -extern int afs_fs_remove(struct afs_fs_cursor *, const char *, bool, u64); +extern int afs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool, u64); extern int afs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64); extern int afs_fs_symlink(struct afs_fs_cursor *, const char *, const char *, u64, struct afs_fid *, struct afs_file_status *); @@ -797,7 +935,7 @@ extern int afs_fs_release_lock(struct afs_fs_cursor *); extern int afs_fs_give_up_all_callbacks(struct afs_net *, struct afs_server *, struct afs_addr_cursor *, struct key *); extern int afs_fs_get_capabilities(struct afs_net *, struct afs_server *, - struct afs_addr_cursor *, struct key *); + struct afs_addr_cursor *, struct key *, unsigned int, bool); extern int afs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *, struct afs_fid *, struct afs_file_status *, struct afs_callback *, unsigned int, @@ -807,6 +945,13 @@ extern int afs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *, struct afs_callback *, struct afs_volsync *); /* + * fs_probe.c + */ +extern void afs_fileserver_probe_result(struct afs_call *); +extern int afs_probe_fileservers(struct afs_net *, struct key *, struct afs_server_list *); +extern int afs_wait_for_fs_probes(struct afs_server_list *, unsigned long); + +/* * inode.c */ extern int afs_fetch_status(struct afs_vnode *, struct key *, bool); @@ -922,7 +1067,6 @@ extern int __net_init afs_open_socket(struct afs_net *); extern void __net_exit afs_close_socket(struct afs_net *); extern void afs_charge_preallocation(struct work_struct *); extern void afs_put_call(struct afs_call *); -extern int afs_queue_call_work(struct afs_call *); extern long afs_make_call(struct afs_addr_cursor *, struct afs_call *, gfp_t, bool); extern struct afs_call *afs_alloc_flat_call(struct afs_net *, const struct afs_call_type *, @@ -930,12 +1074,39 @@ extern struct afs_call *afs_alloc_flat_call(struct afs_net *, extern void afs_flat_call_destructor(struct afs_call *); extern void afs_send_empty_reply(struct afs_call *); extern void afs_send_simple_reply(struct afs_call *, const void *, size_t); -extern int afs_extract_data(struct afs_call *, void *, size_t, bool); -extern int afs_protocol_error(struct afs_call *, int); +extern int afs_extract_data(struct afs_call *, bool); +extern int afs_protocol_error(struct afs_call *, int, enum afs_eproto_cause); + +static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t size) +{ + call->kvec[0].iov_base = buf; + call->kvec[0].iov_len = size; + iov_iter_kvec(&call->iter, READ, call->kvec, 1, size); +} + +static inline void afs_extract_to_tmp(struct afs_call *call) +{ + afs_extract_begin(call, &call->tmp, sizeof(call->tmp)); +} + +static inline void afs_extract_to_tmp64(struct afs_call *call) +{ + afs_extract_begin(call, &call->tmp64, sizeof(call->tmp64)); +} + +static inline void afs_extract_discard(struct afs_call *call, size_t size) +{ + iov_iter_discard(&call->iter, READ, size); +} + +static inline void afs_extract_to_buf(struct afs_call *call, size_t size) +{ + afs_extract_begin(call, call->buffer, size); +} static inline int afs_transfer_reply(struct afs_call *call) { - return afs_extract_data(call, call->buffer, call->reply_max, false); + return afs_extract_data(call, false); } static inline bool afs_check_call_state(struct afs_call *call, @@ -1012,7 +1183,6 @@ extern void afs_put_server(struct afs_net *, struct afs_server *); extern void afs_manage_servers(struct work_struct *); extern void afs_servers_timer(struct timer_list *); extern void __net_exit afs_purge_servers(struct afs_net *); -extern bool afs_probe_fileserver(struct afs_fs_cursor *); extern bool afs_check_server_record(struct afs_fs_cursor *, struct afs_server *); /* @@ -1039,14 +1209,51 @@ extern void afs_fs_exit(void); /* * vlclient.c */ -extern struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_net *, - struct afs_addr_cursor *, - struct key *, const char *, int); -extern struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *, struct afs_addr_cursor *, - struct key *, const uuid_t *); -extern int afs_vl_get_capabilities(struct afs_net *, struct afs_addr_cursor *, struct key *); -extern struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_net *, struct afs_addr_cursor *, - struct key *, const uuid_t *); +extern struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_vl_cursor *, + const char *, int); +extern struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *, const uuid_t *); +extern int afs_vl_get_capabilities(struct afs_net *, struct afs_addr_cursor *, struct key *, + struct afs_vlserver *, unsigned int, bool); +extern struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *, const uuid_t *); + +/* + * vl_probe.c + */ +extern void afs_vlserver_probe_result(struct afs_call *); +extern int afs_send_vl_probes(struct afs_net *, struct key *, struct afs_vlserver_list *); +extern int afs_wait_for_vl_probes(struct afs_vlserver_list *, unsigned long); + +/* + * vl_rotate.c + */ +extern bool afs_begin_vlserver_operation(struct afs_vl_cursor *, + struct afs_cell *, struct key *); +extern bool afs_select_vlserver(struct afs_vl_cursor *); +extern bool afs_select_current_vlserver(struct afs_vl_cursor *); +extern int afs_end_vlserver_operation(struct afs_vl_cursor *); + +/* + * vlserver_list.c + */ +static inline struct afs_vlserver *afs_get_vlserver(struct afs_vlserver *vlserver) +{ + atomic_inc(&vlserver->usage); + return vlserver; +} + +static inline struct afs_vlserver_list *afs_get_vlserverlist(struct afs_vlserver_list *vllist) +{ + if (vllist) + atomic_inc(&vllist->usage); + return vllist; +} + +extern struct afs_vlserver *afs_alloc_vlserver(const char *, size_t, unsigned short); +extern void afs_put_vlserver(struct afs_net *, struct afs_vlserver *); +extern struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int); +extern void afs_put_vlserverlist(struct afs_net *, struct afs_vlserver_list *); +extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *, + const void *, size_t); /* * volume.c @@ -1089,6 +1296,36 @@ extern int afs_launder_page(struct page *); extern const struct xattr_handler *afs_xattr_handlers[]; extern ssize_t afs_listxattr(struct dentry *, char *, size_t); +/* + * yfsclient.c + */ +extern int yfs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_volsync *, bool); +extern int yfs_fs_fetch_data(struct afs_fs_cursor *, struct afs_read *); +extern int yfs_fs_create_file(struct afs_fs_cursor *, const char *, umode_t, u64, + struct afs_fid *, struct afs_file_status *, struct afs_callback *); +extern int yfs_fs_make_dir(struct afs_fs_cursor *, const char *, umode_t, u64, + struct afs_fid *, struct afs_file_status *, struct afs_callback *); +extern int yfs_fs_remove_file2(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64); +extern int yfs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool, u64); +extern int yfs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64); +extern int yfs_fs_symlink(struct afs_fs_cursor *, const char *, const char *, u64, + struct afs_fid *, struct afs_file_status *); +extern int yfs_fs_rename(struct afs_fs_cursor *, const char *, + struct afs_vnode *, const char *, u64, u64); +extern int yfs_fs_store_data(struct afs_fs_cursor *, struct address_space *, + pgoff_t, pgoff_t, unsigned, unsigned); +extern int yfs_fs_setattr(struct afs_fs_cursor *, struct iattr *); +extern int yfs_fs_get_volume_status(struct afs_fs_cursor *, struct afs_volume_status *); +extern int yfs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t); +extern int yfs_fs_extend_lock(struct afs_fs_cursor *); +extern int yfs_fs_release_lock(struct afs_fs_cursor *); +extern int yfs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *, + struct afs_fid *, struct afs_file_status *, + struct afs_callback *, struct afs_volsync *); +extern int yfs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *, + struct afs_fid *, struct afs_file_status *, + struct afs_callback *, unsigned int, + struct afs_volsync *); /* * Miscellaneous inline functions. @@ -1120,6 +1357,17 @@ static inline void afs_check_for_remote_deletion(struct afs_fs_cursor *fc, } } +static inline int afs_io_error(struct afs_call *call, enum afs_io_error where) +{ + trace_afs_io_error(call->debug_id, -EIO, where); + return -EIO; +} + +static inline int afs_bad(struct afs_vnode *vnode, enum afs_file_error where) +{ + trace_afs_file_error(vnode, -EIO, where); + return -EIO; +} /*****************************************************************************/ /* diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 99fd13500a97..2e51c6994148 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -130,9 +130,10 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) goto error_no_page; } - ret = -EIO; - if (PageError(page)) + if (PageError(page)) { + ret = afs_bad(AFS_FS_I(d_inode(mntpt)), afs_file_error_mntpt); goto error; + } buf = kmap_atomic(page); memcpy(devname, buf, size); diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 9101f62707af..be2ee3bbd0a9 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -17,6 +17,11 @@ #include <linux/uaccess.h> #include "internal.h" +struct afs_vl_seq_net_private { + struct seq_net_private seq; /* Must be first */ + struct afs_vlserver_list *vllist; +}; + static inline struct afs_net *afs_seq2net(struct seq_file *m) { return afs_net(seq_file_net(m)); @@ -32,16 +37,24 @@ static inline struct afs_net *afs_seq2net_single(struct seq_file *m) */ static int afs_proc_cells_show(struct seq_file *m, void *v) { - struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link); + struct afs_vlserver_list *vllist; + struct afs_cell *cell; if (v == SEQ_START_TOKEN) { /* display header on line 1 */ - seq_puts(m, "USE NAME\n"); + seq_puts(m, "USE TTL SV NAME\n"); return 0; } + cell = list_entry(v, struct afs_cell, proc_link); + vllist = rcu_dereference(cell->vl_servers); + /* display one cell per line on subsequent lines */ - seq_printf(m, "%3u %s\n", atomic_read(&cell->usage), cell->name); + seq_printf(m, "%3u %6lld %2u %s\n", + atomic_read(&cell->usage), + cell->dns_expiry - ktime_get_real_seconds(), + vllist ? vllist->nr_servers : 0, + cell->name); return 0; } @@ -208,7 +221,7 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v) return 0; } - seq_printf(m, "%3d %08x %s\n", + seq_printf(m, "%3d %08llx %s\n", atomic_read(&vol->usage), vol->vid, afs_vol_types[vol->type]); @@ -247,61 +260,102 @@ static const struct seq_operations afs_proc_cell_volumes_ops = { .show = afs_proc_cell_volumes_show, }; +static const char *const dns_record_sources[NR__dns_record_source + 1] = { + [DNS_RECORD_UNAVAILABLE] = "unav", + [DNS_RECORD_FROM_CONFIG] = "cfg", + [DNS_RECORD_FROM_DNS_A] = "A", + [DNS_RECORD_FROM_DNS_AFSDB] = "AFSDB", + [DNS_RECORD_FROM_DNS_SRV] = "SRV", + [DNS_RECORD_FROM_NSS] = "nss", + [NR__dns_record_source] = "[weird]" +}; + +static const char *const dns_lookup_statuses[NR__dns_lookup_status + 1] = { + [DNS_LOOKUP_NOT_DONE] = "no-lookup", + [DNS_LOOKUP_GOOD] = "good", + [DNS_LOOKUP_GOOD_WITH_BAD] = "good/bad", + [DNS_LOOKUP_BAD] = "bad", + [DNS_LOOKUP_GOT_NOT_FOUND] = "not-found", + [DNS_LOOKUP_GOT_LOCAL_FAILURE] = "local-failure", + [DNS_LOOKUP_GOT_TEMP_FAILURE] = "temp-failure", + [DNS_LOOKUP_GOT_NS_FAILURE] = "ns-failure", + [NR__dns_lookup_status] = "[weird]" +}; + /* * Display the list of Volume Location servers we're using for a cell. */ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v) { - struct sockaddr_rxrpc *addr = v; + const struct afs_vl_seq_net_private *priv = m->private; + const struct afs_vlserver_list *vllist = priv->vllist; + const struct afs_vlserver_entry *entry; + const struct afs_vlserver *vlserver; + const struct afs_addr_list *alist; + int i; - /* display header on line 1 */ - if (v == (void *)1) { - seq_puts(m, "ADDRESS\n"); + if (v == SEQ_START_TOKEN) { + seq_printf(m, "# source %s, status %s\n", + dns_record_sources[vllist->source], + dns_lookup_statuses[vllist->status]); return 0; } - /* display one cell per line on subsequent lines */ - seq_printf(m, "%pISp\n", &addr->transport); + entry = v; + vlserver = entry->server; + alist = rcu_dereference(vlserver->addresses); + + seq_printf(m, "%s [p=%hu w=%hu s=%s,%s]:\n", + vlserver->name, entry->priority, entry->weight, + dns_record_sources[alist ? alist->source : entry->source], + dns_lookup_statuses[alist ? alist->status : entry->status]); + if (alist) { + for (i = 0; i < alist->nr_addrs; i++) + seq_printf(m, " %c %pISpc\n", + alist->preferred == i ? '>' : '-', + &alist->addrs[i].transport); + } return 0; } static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos) __acquires(rcu) { - struct afs_addr_list *alist; + struct afs_vl_seq_net_private *priv = m->private; + struct afs_vlserver_list *vllist; struct afs_cell *cell = PDE_DATA(file_inode(m->file)); loff_t pos = *_pos; rcu_read_lock(); - alist = rcu_dereference(cell->vl_addrs); + vllist = rcu_dereference(cell->vl_servers); + priv->vllist = vllist; - /* allow for the header line */ - if (!pos) - return (void *) 1; - pos--; + if (pos < 0) + *_pos = pos = 0; + if (pos == 0) + return SEQ_START_TOKEN; - if (!alist || pos >= alist->nr_addrs) + if (!vllist || pos - 1 >= vllist->nr_servers) return NULL; - return alist->addrs + pos; + return &vllist->servers[pos - 1]; } static void *afs_proc_cell_vlservers_next(struct seq_file *m, void *v, loff_t *_pos) { - struct afs_addr_list *alist; - struct afs_cell *cell = PDE_DATA(file_inode(m->file)); + struct afs_vl_seq_net_private *priv = m->private; + struct afs_vlserver_list *vllist = priv->vllist; loff_t pos; - alist = rcu_dereference(cell->vl_addrs); - pos = *_pos; - (*_pos)++; - if (!alist || pos >= alist->nr_addrs) + pos++; + *_pos = pos; + if (!vllist || pos - 1 >= vllist->nr_servers) return NULL; - return alist->addrs + pos; + return &vllist->servers[pos - 1]; } static void afs_proc_cell_vlservers_stop(struct seq_file *m, void *v) @@ -337,11 +391,11 @@ static int afs_proc_servers_show(struct seq_file *m, void *v) &server->uuid, atomic_read(&server->usage), &alist->addrs[0].transport, - alist->index == 0 ? "*" : ""); + alist->preferred == 0 ? "*" : ""); for (i = 1; i < alist->nr_addrs; i++) seq_printf(m, " %pISpc%s\n", &alist->addrs[i].transport, - alist->index == i ? "*" : ""); + alist->preferred == i ? "*" : ""); return 0; } @@ -562,7 +616,7 @@ int afs_proc_cell_setup(struct afs_cell *cell) if (!proc_create_net_data("vlservers", 0444, dir, &afs_proc_cell_vlservers_ops, - sizeof(struct seq_net_private), + sizeof(struct afs_vl_seq_net_private), cell) || !proc_create_net_data("volumes", 0444, dir, &afs_proc_cell_volumes_ops, diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h new file mode 100644 index 000000000000..07bc10f076aa --- /dev/null +++ b/fs/afs/protocol_yfs.h @@ -0,0 +1,163 @@ +/* YFS protocol bits + * + * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#define YFS_FS_SERVICE 2500 +#define YFS_CM_SERVICE 2501 + +#define YFSCBMAX 1024 + +enum YFS_CM_Operations { + YFSCBProbe = 206, /* probe client */ + YFSCBGetLock = 207, /* get contents of CM lock table */ + YFSCBXStatsVersion = 209, /* get version of extended statistics */ + YFSCBGetXStats = 210, /* get contents of extended statistics data */ + YFSCBInitCallBackState3 = 213, /* initialise callback state, version 3 */ + YFSCBProbeUuid = 214, /* check the client hasn't rebooted */ + YFSCBGetServerPrefs = 215, + YFSCBGetCellServDV = 216, + YFSCBGetLocalCell = 217, + YFSCBGetCacheConfig = 218, + YFSCBGetCellByNum = 65537, + YFSCBTellMeAboutYourself = 65538, /* get client capabilities */ + YFSCBCallBack = 64204, +}; + +enum YFS_FS_Operations { + YFSFETCHACL = 64131, /* YFS Fetch file ACL */ + YFSFETCHSTATUS = 64132, /* YFS Fetch file status */ + YFSSTOREACL = 64134, /* YFS Store file ACL */ + YFSSTORESTATUS = 64135, /* YFS Store file status */ + YFSREMOVEFILE = 64136, /* YFS Remove a file */ + YFSCREATEFILE = 64137, /* YFS Create a file */ + YFSRENAME = 64138, /* YFS Rename or move a file or directory */ + YFSSYMLINK = 64139, /* YFS Create a symbolic link */ + YFSLINK = 64140, /* YFS Create a hard link */ + YFSMAKEDIR = 64141, /* YFS Create a directory */ + YFSREMOVEDIR = 64142, /* YFS Remove a directory */ + YFSGETVOLUMESTATUS = 64149, /* YFS Get volume status information */ + YFSSETVOLUMESTATUS = 64150, /* YFS Set volume status information */ + YFSSETLOCK = 64156, /* YFS Request a file lock */ + YFSEXTENDLOCK = 64157, /* YFS Extend a file lock */ + YFSRELEASELOCK = 64158, /* YFS Release a file lock */ + YFSLOOKUP = 64161, /* YFS lookup file in directory */ + YFSFLUSHCPS = 64165, + YFSFETCHOPAQUEACL = 64168, + YFSWHOAMI = 64170, + YFSREMOVEACL = 64171, + YFSREMOVEFILE2 = 64173, + YFSSTOREOPAQUEACL2 = 64174, + YFSINLINEBULKSTATUS = 64536, /* YFS Fetch multiple file statuses with errors */ + YFSFETCHDATA64 = 64537, /* YFS Fetch file data */ + YFSSTOREDATA64 = 64538, /* YFS Store file data */ + YFSUPDATESYMLINK = 64540, +}; + +struct yfs_xdr_u64 { + __be32 msw; + __be32 lsw; +} __packed; + +static inline u64 xdr_to_u64(const struct yfs_xdr_u64 x) +{ + return ((u64)ntohl(x.msw) << 32) | ntohl(x.lsw); +} + +static inline struct yfs_xdr_u64 u64_to_xdr(const u64 x) +{ + return (struct yfs_xdr_u64){ .msw = htonl(x >> 32), .lsw = htonl(x) }; +} + +struct yfs_xdr_vnode { + struct yfs_xdr_u64 lo; + __be32 hi; + __be32 unique; +} __packed; + +struct yfs_xdr_YFSFid { + struct yfs_xdr_u64 volume; + struct yfs_xdr_vnode vnode; +} __packed; + + +struct yfs_xdr_YFSFetchStatus { + __be32 type; + __be32 nlink; + struct yfs_xdr_u64 size; + struct yfs_xdr_u64 data_version; + struct yfs_xdr_u64 author; + struct yfs_xdr_u64 owner; + struct yfs_xdr_u64 group; + __be32 mode; + __be32 caller_access; + __be32 anon_access; + struct yfs_xdr_vnode parent; + __be32 data_access_protocol; + struct yfs_xdr_u64 mtime_client; + struct yfs_xdr_u64 mtime_server; + __be32 lock_count; + __be32 abort_code; +} __packed; + +struct yfs_xdr_YFSCallBack { + __be32 version; + struct yfs_xdr_u64 expiration_time; + __be32 type; +} __packed; + +struct yfs_xdr_YFSStoreStatus { + __be32 mask; + __be32 mode; + struct yfs_xdr_u64 mtime_client; + struct yfs_xdr_u64 owner; + struct yfs_xdr_u64 group; +} __packed; + +struct yfs_xdr_RPCFlags { + __be32 rpc_flags; +} __packed; + +struct yfs_xdr_YFSVolSync { + struct yfs_xdr_u64 vol_creation_date; + struct yfs_xdr_u64 vol_update_date; + struct yfs_xdr_u64 max_quota; + struct yfs_xdr_u64 blocks_in_use; + struct yfs_xdr_u64 blocks_avail; +} __packed; + +enum yfs_volume_type { + yfs_volume_type_ro = 0, + yfs_volume_type_rw = 1, +}; + +#define yfs_FVSOnline 0x1 +#define yfs_FVSInservice 0x2 +#define yfs_FVSBlessed 0x4 +#define yfs_FVSNeedsSalvage 0x8 + +struct yfs_xdr_YFSFetchVolumeStatus { + struct yfs_xdr_u64 vid; + struct yfs_xdr_u64 parent_id; + __be32 flags; + __be32 type; + struct yfs_xdr_u64 max_quota; + struct yfs_xdr_u64 blocks_in_use; + struct yfs_xdr_u64 part_blocks_avail; + struct yfs_xdr_u64 part_max_blocks; + struct yfs_xdr_u64 vol_copy_date; + struct yfs_xdr_u64 vol_backup_date; +} __packed; + +struct yfs_xdr_YFSStoreVolumeStatus { + __be32 mask; + struct yfs_xdr_u64 min_quota; + struct yfs_xdr_u64 max_quota; + struct yfs_xdr_u64 file_quota; +} __packed; diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c index 1faef56b12bd..00504254c1c2 100644 --- a/fs/afs/rotate.c +++ b/fs/afs/rotate.c @@ -19,14 +19,6 @@ #include "afs_fs.h" /* - * Initialise a filesystem server cursor for iterating over FS servers. - */ -static void afs_init_fs_cursor(struct afs_fs_cursor *fc, struct afs_vnode *vnode) -{ - memset(fc, 0, sizeof(*fc)); -} - -/* * Begin an operation on the fileserver. * * Fileserver operations are serialised on the server by vnode, so we serialise @@ -35,13 +27,14 @@ static void afs_init_fs_cursor(struct afs_fs_cursor *fc, struct afs_vnode *vnode bool afs_begin_vnode_operation(struct afs_fs_cursor *fc, struct afs_vnode *vnode, struct key *key) { - afs_init_fs_cursor(fc, vnode); + memset(fc, 0, sizeof(*fc)); fc->vnode = vnode; fc->key = key; fc->ac.error = SHRT_MAX; + fc->error = -EDESTADDRREQ; if (mutex_lock_interruptible(&vnode->io_lock) < 0) { - fc->ac.error = -EINTR; + fc->error = -EINTR; fc->flags |= AFS_FS_CURSOR_STOP; return false; } @@ -65,12 +58,15 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc, fc->server_list = afs_get_serverlist(vnode->volume->servers); read_unlock(&vnode->volume->servers_lock); + fc->untried = (1UL << fc->server_list->nr_servers) - 1; + fc->index = READ_ONCE(fc->server_list->preferred); + cbi = vnode->cb_interest; if (cbi) { /* See if the vnode's preferred record is still available */ for (i = 0; i < fc->server_list->nr_servers; i++) { if (fc->server_list->servers[i].cb_interest == cbi) { - fc->start = i; + fc->index = i; goto found_interest; } } @@ -80,7 +76,7 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc, * and have to return an error. */ if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) { - fc->ac.error = -ESTALE; + fc->error = -ESTALE; return false; } @@ -94,12 +90,9 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc, afs_put_cb_interest(afs_v2net(vnode), cbi); cbi = NULL; - } else { - fc->start = READ_ONCE(fc->server_list->index); } found_interest: - fc->index = fc->start; return true; } @@ -117,7 +110,7 @@ static void afs_busy(struct afs_volume *volume, u32 abort_code) default: m = "busy"; break; } - pr_notice("kAFS: Volume %u '%s' is %s\n", volume->vid, volume->name, m); + pr_notice("kAFS: Volume %llu '%s' is %s\n", volume->vid, volume->name, m); } /* @@ -127,7 +120,7 @@ static bool afs_sleep_and_retry(struct afs_fs_cursor *fc) { msleep_interruptible(1000); if (signal_pending(current)) { - fc->ac.error = -ERESTARTSYS; + fc->error = -ERESTARTSYS; return false; } @@ -143,27 +136,32 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) struct afs_addr_list *alist; struct afs_server *server; struct afs_vnode *vnode = fc->vnode; + u32 rtt, abort_code; + int error = fc->ac.error, i; - _enter("%u/%u,%u/%u,%d,%d", - fc->index, fc->start, - fc->ac.index, fc->ac.start, - fc->ac.error, fc->ac.abort_code); + _enter("%lx[%d],%lx[%d],%d,%d", + fc->untried, fc->index, + fc->ac.tried, fc->ac.index, + error, fc->ac.abort_code); if (fc->flags & AFS_FS_CURSOR_STOP) { _leave(" = f [stopped]"); return false; } + fc->nr_iterations++; + /* Evaluate the result of the previous operation, if there was one. */ - switch (fc->ac.error) { + switch (error) { case SHRT_MAX: goto start; case 0: default: /* Success or local failure. Stop. */ + fc->error = error; fc->flags |= AFS_FS_CURSOR_STOP; - _leave(" = f [okay/local %d]", fc->ac.error); + _leave(" = f [okay/local %d]", error); return false; case -ECONNABORTED: @@ -178,7 +176,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) * - May indicate that the fileserver couldn't attach to the vol. */ if (fc->flags & AFS_FS_CURSOR_VNOVOL) { - fc->ac.error = -EREMOTEIO; + fc->error = -EREMOTEIO; goto next_server; } @@ -187,12 +185,12 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) write_unlock(&vnode->volume->servers_lock); set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags); - fc->ac.error = afs_check_volume_status(vnode->volume, fc->key); - if (fc->ac.error < 0) - goto failed; + error = afs_check_volume_status(vnode->volume, fc->key); + if (error < 0) + goto failed_set_error; if (test_bit(AFS_VOLUME_DELETED, &vnode->volume->flags)) { - fc->ac.error = -ENOMEDIUM; + fc->error = -ENOMEDIUM; goto failed; } @@ -200,7 +198,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) * it's the fileserver having trouble. */ if (vnode->volume->servers == fc->server_list) { - fc->ac.error = -EREMOTEIO; + fc->error = -EREMOTEIO; goto next_server; } @@ -215,7 +213,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) case VONLINE: case VDISKFULL: case VOVERQUOTA: - fc->ac.error = afs_abort_to_error(fc->ac.abort_code); + fc->error = afs_abort_to_error(fc->ac.abort_code); goto next_server; case VOFFLINE: @@ -224,11 +222,11 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) clear_bit(AFS_VOLUME_BUSY, &vnode->volume->flags); } if (fc->flags & AFS_FS_CURSOR_NO_VSLEEP) { - fc->ac.error = -EADV; + fc->error = -EADV; goto failed; } if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) { - fc->ac.error = -ESTALE; + fc->error = -ESTALE; goto failed; } goto busy; @@ -240,7 +238,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) * have a file lock we need to maintain. */ if (fc->flags & AFS_FS_CURSOR_NO_VSLEEP) { - fc->ac.error = -EBUSY; + fc->error = -EBUSY; goto failed; } if (!test_and_set_bit(AFS_VOLUME_BUSY, &vnode->volume->flags)) { @@ -269,16 +267,16 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) * honour, just in case someone sets up a loop. */ if (fc->flags & AFS_FS_CURSOR_VMOVED) { - fc->ac.error = -EREMOTEIO; + fc->error = -EREMOTEIO; goto failed; } fc->flags |= AFS_FS_CURSOR_VMOVED; set_bit(AFS_VOLUME_WAIT, &vnode->volume->flags); set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags); - fc->ac.error = afs_check_volume_status(vnode->volume, fc->key); - if (fc->ac.error < 0) - goto failed; + error = afs_check_volume_status(vnode->volume, fc->key); + if (error < 0) + goto failed_set_error; /* If the server list didn't change, then the VLDB is * out of sync with the fileservers. This is hopefully @@ -290,7 +288,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) * TODO: Retry a few times with sleeps. */ if (vnode->volume->servers == fc->server_list) { - fc->ac.error = -ENOMEDIUM; + fc->error = -ENOMEDIUM; goto failed; } @@ -299,20 +297,25 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc) default: clear_bit(AFS_VOLUME_OFFLINE, &vnode->volume->flags); clear_bit(AFS_VOLUME_BUSY, &vnode->volume->flags); - fc->ac.error = afs_abort_to_error(fc->ac.abort_code); + fc->error = afs_abort_to_error(fc->ac.abort_code); goto failed; } + case -ETIMEDOUT: + case -ETIME: + if (fc->error != -EDESTADDRREQ) + goto iterate_address; + /* Fall through */ case -ENETUNREACH: case -EHOSTUNREACH: case -ECONNREFUSED: - case -ETIMEDOUT: - case -ETIME: _debug("no conn"); + fc->error = error; goto iterate_address; case -ECONNRESET: _debug("call reset"); + fc->error = error; goto failed; } @@ -328,15 +331,57 @@ start: /* See if we need to do an update of the volume record. Note that the * volume may have moved or even have been deleted. */ - fc->ac.error = afs_check_volume_status(vnode->volume, fc->key); - if (fc->ac.error < 0) - goto failed; + error = afs_check_volume_status(vnode->volume, fc->key); + if (error < 0) + goto failed_set_error; if (!afs_start_fs_iteration(fc, vnode)) goto failed; -use_server: - _debug("use"); + _debug("__ VOL %llx __", vnode->volume->vid); + error = afs_probe_fileservers(afs_v2net(vnode), fc->key, fc->server_list); + if (error < 0) + goto failed_set_error; + +pick_server: + _debug("pick [%lx]", fc->untried); + + error = afs_wait_for_fs_probes(fc->server_list, fc->untried); + if (error < 0) + goto failed_set_error; + + /* Pick the untried server with the lowest RTT. If we have outstanding + * callbacks, we stick with the server we're already using if we can. + */ + if (fc->cbi) { + _debug("cbi %u", fc->index); + if (test_bit(fc->index, &fc->untried)) + goto selected_server; + afs_put_cb_interest(afs_v2net(vnode), fc->cbi); + fc->cbi = NULL; + _debug("nocbi"); + } + + fc->index = -1; + rtt = U32_MAX; + for (i = 0; i < fc->server_list->nr_servers; i++) { + struct afs_server *s = fc->server_list->servers[i].server; + + if (!test_bit(i, &fc->untried) || !s->probe.responded) + continue; + if (s->probe.rtt < rtt) { + fc->index = i; + rtt = s->probe.rtt; + } + } + + if (fc->index == -1) + goto no_more_servers; + +selected_server: + _debug("use %d", fc->index); + __clear_bit(fc->index, &fc->untried); + /* We're starting on a different fileserver from the list. We need to * check it, create a callback intercept, find its address list and * probe its capabilities before we use it. @@ -354,10 +399,10 @@ use_server: * break request before we've finished decoding the reply and * installing the vnode. */ - fc->ac.error = afs_register_server_cb_interest(vnode, fc->server_list, - fc->index); - if (fc->ac.error < 0) - goto failed; + error = afs_register_server_cb_interest(vnode, fc->server_list, + fc->index); + if (error < 0) + goto failed_set_error; fc->cbi = afs_get_cb_interest(vnode->cb_interest); @@ -369,66 +414,88 @@ use_server: memset(&fc->ac, 0, sizeof(fc->ac)); - /* Probe the current fileserver if we haven't done so yet. */ - if (!test_bit(AFS_SERVER_FL_PROBED, &server->flags)) { - fc->ac.alist = afs_get_addrlist(alist); - - if (!afs_probe_fileserver(fc)) { - switch (fc->ac.error) { - case -ENOMEM: - case -ERESTARTSYS: - case -EINTR: - goto failed; - default: - goto next_server; - } - } - } - if (!fc->ac.alist) fc->ac.alist = alist; else afs_put_addrlist(alist); - fc->ac.start = READ_ONCE(alist->index); - fc->ac.index = fc->ac.start; + fc->ac.index = -1; iterate_address: ASSERT(fc->ac.alist); - _debug("iterate %d/%d", fc->ac.index, fc->ac.alist->nr_addrs); /* Iterate over the current server's address list to try and find an * address on which it will respond to us. */ if (!afs_iterate_addresses(&fc->ac)) goto next_server; + _debug("address [%u] %u/%u", fc->index, fc->ac.index, fc->ac.alist->nr_addrs); + _leave(" = t"); return true; next_server: _debug("next"); afs_end_cursor(&fc->ac); - afs_put_cb_interest(afs_v2net(vnode), fc->cbi); - fc->cbi = NULL; - fc->index++; - if (fc->index >= fc->server_list->nr_servers) - fc->index = 0; - if (fc->index != fc->start) - goto use_server; + goto pick_server; +no_more_servers: /* That's all the servers poked to no good effect. Try again if some * of them were busy. */ if (fc->flags & AFS_FS_CURSOR_VBUSY) goto restart_from_beginning; - fc->ac.error = -EDESTADDRREQ; - goto failed; + abort_code = 0; + error = -EDESTADDRREQ; + for (i = 0; i < fc->server_list->nr_servers; i++) { + struct afs_server *s = fc->server_list->servers[i].server; + int probe_error = READ_ONCE(s->probe.error); + + switch (probe_error) { + case 0: + continue; + default: + if (error == -ETIMEDOUT || + error == -ETIME) + continue; + case -ETIMEDOUT: + case -ETIME: + if (error == -ENOMEM || + error == -ENONET) + continue; + case -ENOMEM: + case -ENONET: + if (error == -ENETUNREACH) + continue; + case -ENETUNREACH: + if (error == -EHOSTUNREACH) + continue; + case -EHOSTUNREACH: + if (error == -ECONNREFUSED) + continue; + case -ECONNREFUSED: + if (error == -ECONNRESET) + continue; + case -ECONNRESET: /* Responded, but call expired. */ + if (error == -ECONNABORTED) + continue; + case -ECONNABORTED: + abort_code = s->probe.abort_code; + error = probe_error; + continue; + } + } + + if (error == -ECONNABORTED) + error = afs_abort_to_error(abort_code); +failed_set_error: + fc->error = error; failed: fc->flags |= AFS_FS_CURSOR_STOP; afs_end_cursor(&fc->ac); - _leave(" = f [failed %d]", fc->ac.error); + _leave(" = f [failed %d]", fc->error); return false; } @@ -442,13 +509,14 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc) struct afs_vnode *vnode = fc->vnode; struct afs_cb_interest *cbi = vnode->cb_interest; struct afs_addr_list *alist; + int error = fc->ac.error; _enter(""); - switch (fc->ac.error) { + switch (error) { case SHRT_MAX: if (!cbi) { - fc->ac.error = -ESTALE; + fc->error = -ESTALE; fc->flags |= AFS_FS_CURSOR_STOP; return false; } @@ -461,25 +529,26 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc) afs_get_addrlist(alist); read_unlock(&cbi->server->fs_lock); if (!alist) { - fc->ac.error = -ESTALE; + fc->error = -ESTALE; fc->flags |= AFS_FS_CURSOR_STOP; return false; } memset(&fc->ac, 0, sizeof(fc->ac)); fc->ac.alist = alist; - fc->ac.start = READ_ONCE(alist->index); - fc->ac.index = fc->ac.start; + fc->ac.index = -1; goto iterate_address; case 0: default: /* Success or local failure. Stop. */ + fc->error = error; fc->flags |= AFS_FS_CURSOR_STOP; - _leave(" = f [okay/local %d]", fc->ac.error); + _leave(" = f [okay/local %d]", error); return false; case -ECONNABORTED: + fc->error = afs_abort_to_error(fc->ac.abort_code); fc->flags |= AFS_FS_CURSOR_STOP; _leave(" = f [abort]"); return false; @@ -490,6 +559,7 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc) case -ETIMEDOUT: case -ETIME: _debug("no conn"); + fc->error = error; goto iterate_address; } @@ -507,12 +577,65 @@ iterate_address: } /* + * Dump cursor state in the case of the error being EDESTADDRREQ. + */ +static void afs_dump_edestaddrreq(const struct afs_fs_cursor *fc) +{ + static int count; + int i; + + if (!IS_ENABLED(CONFIG_AFS_DEBUG_CURSOR) || count > 3) + return; + count++; + + rcu_read_lock(); + + pr_notice("EDESTADDR occurred\n"); + pr_notice("FC: cbb=%x cbb2=%x fl=%hx err=%hd\n", + fc->cb_break, fc->cb_break_2, fc->flags, fc->error); + pr_notice("FC: ut=%lx ix=%d ni=%u\n", + fc->untried, fc->index, fc->nr_iterations); + + if (fc->server_list) { + const struct afs_server_list *sl = fc->server_list; + pr_notice("FC: SL nr=%u pr=%u vnov=%hx\n", + sl->nr_servers, sl->preferred, sl->vnovol_mask); + for (i = 0; i < sl->nr_servers; i++) { + const struct afs_server *s = sl->servers[i].server; + pr_notice("FC: server fl=%lx av=%u %pU\n", + s->flags, s->addr_version, &s->uuid); + if (s->addresses) { + const struct afs_addr_list *a = + rcu_dereference(s->addresses); + pr_notice("FC: - av=%u nr=%u/%u/%u pr=%u\n", + a->version, + a->nr_ipv4, a->nr_addrs, a->max_addrs, + a->preferred); + pr_notice("FC: - pr=%lx R=%lx F=%lx\n", + a->probed, a->responded, a->failed); + if (a == fc->ac.alist) + pr_notice("FC: - current\n"); + } + } + } + + pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n", + fc->ac.tried, fc->ac.index, fc->ac.abort_code, fc->ac.error, + fc->ac.responded, fc->ac.nr_iterations); + rcu_read_unlock(); +} + +/* * Tidy up a filesystem cursor and unlock the vnode. */ int afs_end_vnode_operation(struct afs_fs_cursor *fc) { struct afs_net *net = afs_v2net(fc->vnode); - int ret; + + if (fc->error == -EDESTADDRREQ || + fc->error == -ENETUNREACH || + fc->error == -EHOSTUNREACH) + afs_dump_edestaddrreq(fc); mutex_unlock(&fc->vnode->io_lock); @@ -520,9 +643,8 @@ int afs_end_vnode_operation(struct afs_fs_cursor *fc) afs_put_cb_interest(net, fc->cbi); afs_put_serverlist(net, fc->server_list); - ret = fc->ac.error; - if (ret == -ECONNABORTED) - afs_abort_to_error(fc->ac.abort_code); + if (fc->error == -ECONNABORTED) + fc->error = afs_abort_to_error(fc->ac.abort_code); - return fc->ac.error; + return fc->error; } diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 77a83790a31f..59970886690f 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -16,6 +16,7 @@ #include <net/af_rxrpc.h> #include "internal.h" #include "afs_cm.h" +#include "protocol_yfs.h" struct workqueue_struct *afs_async_calls; @@ -75,6 +76,18 @@ int afs_open_socket(struct afs_net *net) if (ret < 0) goto error_2; + srx.srx_service = YFS_CM_SERVICE; + ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); + if (ret < 0) + goto error_2; + + /* Ideally, we'd turn on service upgrade here, but we can't because + * OpenAFS is buggy and leaks the userStatus field from packet to + * packet and between FS packets and CB packets - so if we try to do an + * upgrade on an FS packet, OpenAFS will leak that into the CB packet + * it sends back to us. + */ + rxrpc_kernel_new_call_notification(socket, afs_rx_new_call, afs_rx_discard_new_call); @@ -143,6 +156,7 @@ static struct afs_call *afs_alloc_call(struct afs_net *net, INIT_WORK(&call->async_work, afs_process_async_call); init_waitqueue_head(&call->waitq); spin_lock_init(&call->state_lock); + call->_iter = &call->iter; o = atomic_inc_return(&net->nr_outstanding_calls); trace_afs_call(call, afs_call_trace_alloc, 1, o, @@ -176,6 +190,7 @@ void afs_put_call(struct afs_call *call) afs_put_server(call->net, call->cm_server); afs_put_cb_interest(call->net, call->cbi); + afs_put_addrlist(call->alist); kfree(call->request); trace_afs_call(call, afs_call_trace_free, 0, o, @@ -189,21 +204,22 @@ void afs_put_call(struct afs_call *call) } /* - * Queue the call for actual work. Returns 0 unconditionally for convenience. + * Queue the call for actual work. */ -int afs_queue_call_work(struct afs_call *call) +static void afs_queue_call_work(struct afs_call *call) { - int u = atomic_inc_return(&call->usage); + if (call->type->work) { + int u = atomic_inc_return(&call->usage); - trace_afs_call(call, afs_call_trace_work, u, - atomic_read(&call->net->nr_outstanding_calls), - __builtin_return_address(0)); + trace_afs_call(call, afs_call_trace_work, u, + atomic_read(&call->net->nr_outstanding_calls), + __builtin_return_address(0)); - INIT_WORK(&call->work, call->type->work); + INIT_WORK(&call->work, call->type->work); - if (!queue_work(afs_wq, &call->work)) - afs_put_call(call); - return 0; + if (!queue_work(afs_wq, &call->work)) + afs_put_call(call); + } } /* @@ -233,6 +249,7 @@ struct afs_call *afs_alloc_flat_call(struct afs_net *net, goto nomem_free; } + afs_extract_to_buf(call, call->reply_max); call->operation_ID = type->op; init_waitqueue_head(&call->waitq); return call; @@ -286,7 +303,7 @@ static void afs_load_bvec(struct afs_call *call, struct msghdr *msg, offset = 0; } - iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes); + iov_iter_bvec(&msg->msg_iter, WRITE, bv, nr, bytes); } /* @@ -342,7 +359,7 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg) long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp, bool async) { - struct sockaddr_rxrpc *srx = ac->addr; + struct sockaddr_rxrpc *srx = &ac->alist->addrs[ac->index]; struct rxrpc_call *rxcall; struct msghdr msg; struct kvec iov[1]; @@ -359,6 +376,8 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, atomic_read(&call->net->nr_outstanding_calls)); call->async = async; + call->addr_ix = ac->index; + call->alist = afs_get_addrlist(ac->alist); /* Work out the length we're going to transmit. This is awkward for * calls such as FS.StoreData where there's an extra injection of data @@ -390,6 +409,7 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, call->debug_id); if (IS_ERR(rxcall)) { ret = PTR_ERR(rxcall); + call->error = ret; goto error_kill_call; } @@ -401,8 +421,7 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, msg.msg_name = NULL; msg.msg_namelen = 0; - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, - call->request_size); + iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, call->request_size); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = MSG_WAITALL | (call->send_pages ? MSG_MORE : 0); @@ -432,7 +451,7 @@ error_do_abort: rxrpc_kernel_abort_call(call->net->socket, rxcall, RX_USER_ABORT, ret, "KSD"); } else { - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, NULL, 0, 0); + iov_iter_kvec(&msg.msg_iter, READ, NULL, 0, 0); rxrpc_kernel_recv_data(call->net->socket, rxcall, &msg.msg_iter, false, &call->abort_code, &call->service_id); @@ -442,6 +461,8 @@ error_do_abort: call->error = ret; trace_afs_call_done(call); error_kill_call: + if (call->type->done) + call->type->done(call); afs_put_call(call); ac->error = ret; _leave(" = %d", ret); @@ -466,14 +487,12 @@ static void afs_deliver_to_call(struct afs_call *call) state == AFS_CALL_SV_AWAIT_ACK ) { if (state == AFS_CALL_SV_AWAIT_ACK) { - struct iov_iter iter; - - iov_iter_kvec(&iter, READ | ITER_KVEC, NULL, 0, 0); + iov_iter_kvec(&call->iter, READ, NULL, 0, 0); ret = rxrpc_kernel_recv_data(call->net->socket, - call->rxcall, &iter, false, - &remote_abort, + call->rxcall, &call->iter, + false, &remote_abort, &call->service_id); - trace_afs_recv_data(call, 0, 0, false, ret); + trace_afs_receive_data(call, &call->iter, false, ret); if (ret == -EINPROGRESS || ret == -EAGAIN) return; @@ -485,10 +504,17 @@ static void afs_deliver_to_call(struct afs_call *call) return; } + if (call->want_reply_time && + rxrpc_kernel_get_reply_time(call->net->socket, + call->rxcall, + &call->reply_time)) + call->want_reply_time = false; + ret = call->type->deliver(call); state = READ_ONCE(call->state); switch (ret) { case 0: + afs_queue_call_work(call); if (state == AFS_CALL_CL_PROC_REPLY) { if (call->cbi) set_bit(AFS_SERVER_FL_MAY_HAVE_CB, @@ -500,7 +526,6 @@ static void afs_deliver_to_call(struct afs_call *call) case -EINPROGRESS: case -EAGAIN: goto out; - case -EIO: case -ECONNABORTED: ASSERTCMP(state, ==, AFS_CALL_COMPLETE); goto done; @@ -509,6 +534,10 @@ static void afs_deliver_to_call(struct afs_call *call) rxrpc_kernel_abort_call(call->net->socket, call->rxcall, abort_code, ret, "KIV"); goto local_abort; + case -EIO: + pr_err("kAFS: Call %u in bad state %u\n", + call->debug_id, state); + /* Fall through */ case -ENODATA: case -EBADMSG: case -EMSGSIZE: @@ -517,12 +546,14 @@ static void afs_deliver_to_call(struct afs_call *call) if (state != AFS_CALL_CL_AWAIT_REPLY) abort_code = RXGEN_SS_UNMARSHAL; rxrpc_kernel_abort_call(call->net->socket, call->rxcall, - abort_code, -EBADMSG, "KUM"); + abort_code, ret, "KUM"); goto local_abort; } } done: + if (call->type->done) + call->type->done(call); if (state == AFS_CALL_COMPLETE && call->incoming) afs_put_call(call); out: @@ -728,6 +759,7 @@ void afs_charge_preallocation(struct work_struct *work) call->async = true; call->state = AFS_CALL_SV_AWAIT_OP_ID; init_waitqueue_head(&call->waitq); + afs_extract_to_tmp(call); } if (rxrpc_kernel_charge_accept(net->socket, @@ -773,18 +805,15 @@ static int afs_deliver_cm_op_id(struct afs_call *call) { int ret; - _enter("{%zu}", call->offset); - - ASSERTCMP(call->offset, <, 4); + _enter("{%zu}", iov_iter_count(call->_iter)); /* the operation ID forms the first four bytes of the request data */ - ret = afs_extract_data(call, &call->tmp, 4, true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; call->operation_ID = ntohl(call->tmp); afs_set_call_state(call, AFS_CALL_SV_AWAIT_OP_ID, AFS_CALL_SV_AWAIT_REQUEST); - call->offset = 0; /* ask the cache manager to route the call (it'll change the call type * if successful) */ @@ -825,7 +854,7 @@ void afs_send_empty_reply(struct afs_call *call) msg.msg_name = NULL; msg.msg_namelen = 0; - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0); + iov_iter_kvec(&msg.msg_iter, WRITE, NULL, 0, 0); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; @@ -864,7 +893,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) iov[0].iov_len = len; msg.msg_name = NULL; msg.msg_namelen = 0; - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len); + iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; @@ -888,30 +917,19 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) /* * Extract a piece of data from the received data socket buffers. */ -int afs_extract_data(struct afs_call *call, void *buf, size_t count, - bool want_more) +int afs_extract_data(struct afs_call *call, bool want_more) { struct afs_net *net = call->net; - struct iov_iter iter; - struct kvec iov; + struct iov_iter *iter = call->_iter; enum afs_call_state state; u32 remote_abort = 0; int ret; - _enter("{%s,%zu},,%zu,%d", - call->type->name, call->offset, count, want_more); - - ASSERTCMP(call->offset, <=, count); - - iov.iov_base = buf + call->offset; - iov.iov_len = count - call->offset; - iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, count - call->offset); + _enter("{%s,%zu},%d", call->type->name, iov_iter_count(iter), want_more); - ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, &iter, + ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter, want_more, &remote_abort, &call->service_id); - call->offset += (count - call->offset) - iov_iter_count(&iter); - trace_afs_recv_data(call, count, call->offset, want_more, ret); if (ret == 0 || ret == -EAGAIN) return ret; @@ -926,7 +944,7 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count, break; case AFS_CALL_COMPLETE: kdebug("prem complete %d", call->error); - return -EIO; + return afs_io_error(call, afs_io_error_extract); default: break; } @@ -940,8 +958,9 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count, /* * Log protocol error production. */ -noinline int afs_protocol_error(struct afs_call *call, int error) +noinline int afs_protocol_error(struct afs_call *call, int error, + enum afs_eproto_cause cause) { - trace_afs_protocol_error(call, error, __builtin_return_address(0)); + trace_afs_protocol_error(call, error, cause); return error; } diff --git a/fs/afs/security.c b/fs/afs/security.c index 81dfedb7879f..5f58a9a17e69 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c @@ -126,7 +126,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key, bool changed = false; int i, j; - _enter("{%x:%u},%x,%x", + _enter("{%llx:%llu},%x,%x", vnode->fid.vid, vnode->fid.vnode, key_serial(key), caller_access); rcu_read_lock(); @@ -147,7 +147,8 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key, break; } - if (cb_break != afs_cb_break_sum(vnode, vnode->cb_interest)) { + if (afs_cb_is_broken(cb_break, vnode, + vnode->cb_interest)) { changed = true; break; } @@ -177,7 +178,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key, } } - if (cb_break != afs_cb_break_sum(vnode, vnode->cb_interest)) + if (afs_cb_is_broken(cb_break, vnode, vnode->cb_interest)) goto someone_else_changed_it; /* We need a ref on any permits list we want to copy as we'll have to @@ -256,7 +257,7 @@ found: spin_lock(&vnode->lock); zap = rcu_access_pointer(vnode->permit_cache); - if (cb_break == afs_cb_break_sum(vnode, vnode->cb_interest) && + if (!afs_cb_is_broken(cb_break, vnode, vnode->cb_interest) && zap == permits) rcu_assign_pointer(vnode->permit_cache, replacement); else @@ -289,7 +290,7 @@ int afs_check_permit(struct afs_vnode *vnode, struct key *key, bool valid = false; int i, ret; - _enter("{%x:%u},%x", + _enter("{%llx:%llu},%x", vnode->fid.vid, vnode->fid.vnode, key_serial(key)); /* check the permits to see if we've got one yet */ @@ -349,7 +350,7 @@ int afs_permission(struct inode *inode, int mask) if (mask & MAY_NOT_BLOCK) return -ECHILD; - _enter("{{%x:%u},%lx},%x,", + _enter("{{%llx:%llu},%lx},%x,", vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask); key = afs_request_key(vnode->volume->cell); diff --git a/fs/afs/server.c b/fs/afs/server.c index 1d329e6981d5..642afa2e9783 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include "afs_fs.h" #include "internal.h" +#include "protocol_yfs.h" static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */ static unsigned afs_server_update_delay = 30; /* Time till VLDB recheck in secs */ @@ -230,6 +231,8 @@ static struct afs_server *afs_alloc_server(struct afs_net *net, rwlock_init(&server->fs_lock); INIT_HLIST_HEAD(&server->cb_volumes); rwlock_init(&server->cb_break_lock); + init_waitqueue_head(&server->probe_wq); + spin_lock_init(&server->probe_lock); afs_inc_servers_outstanding(net); _leave(" = %p", server); @@ -246,41 +249,23 @@ enomem: static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell, struct key *key, const uuid_t *uuid) { - struct afs_addr_cursor ac; - struct afs_addr_list *alist; + struct afs_vl_cursor vc; + struct afs_addr_list *alist = NULL; int ret; - ret = afs_set_vl_cursor(&ac, cell); - if (ret < 0) - return ERR_PTR(ret); - - while (afs_iterate_addresses(&ac)) { - if (test_bit(ac.index, &ac.alist->yfs)) - alist = afs_yfsvl_get_endpoints(cell->net, &ac, key, uuid); - else - alist = afs_vl_get_addrs_u(cell->net, &ac, key, uuid); - switch (ac.error) { - case 0: - afs_end_cursor(&ac); - return alist; - case -ECONNABORTED: - ac.error = afs_abort_to_error(ac.abort_code); - goto error; - case -ENOMEM: - case -ENONET: - goto error; - case -ENETUNREACH: - case -EHOSTUNREACH: - case -ECONNREFUSED: - break; - default: - ac.error = -EIO; - goto error; + ret = -ERESTARTSYS; + if (afs_begin_vlserver_operation(&vc, cell, key)) { + while (afs_select_vlserver(&vc)) { + if (test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags)) + alist = afs_yfsvl_get_endpoints(&vc, uuid); + else + alist = afs_vl_get_addrs_u(&vc, uuid); } + + ret = afs_end_vlserver_operation(&vc); } -error: - return ERR_PTR(afs_end_cursor(&ac)); + return ret < 0 ? ERR_PTR(ret) : alist; } /* @@ -382,9 +367,7 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server) struct afs_addr_list *alist = rcu_access_pointer(server->addresses); struct afs_addr_cursor ac = { .alist = alist, - .start = alist->index, - .index = 0, - .addr = &alist->addrs[alist->index], + .index = alist->preferred, .error = 0, }; _enter("%p", server); @@ -392,6 +375,9 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server) if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags)) afs_fs_give_up_all_callbacks(net, server, &ac, NULL); + wait_var_event(&server->probe_outstanding, + atomic_read(&server->probe_outstanding) == 0); + call_rcu(&server->rcu, afs_server_rcu); afs_dec_servers_outstanding(net); } @@ -525,99 +511,6 @@ void afs_purge_servers(struct afs_net *net) } /* - * Probe a fileserver to find its capabilities. - * - * TODO: Try service upgrade. - */ -static bool afs_do_probe_fileserver(struct afs_fs_cursor *fc) -{ - _enter(""); - - fc->ac.addr = NULL; - fc->ac.start = READ_ONCE(fc->ac.alist->index); - fc->ac.index = fc->ac.start; - fc->ac.error = 0; - fc->ac.begun = false; - - while (afs_iterate_addresses(&fc->ac)) { - afs_fs_get_capabilities(afs_v2net(fc->vnode), fc->cbi->server, - &fc->ac, fc->key); - switch (fc->ac.error) { - case 0: - afs_end_cursor(&fc->ac); - set_bit(AFS_SERVER_FL_PROBED, &fc->cbi->server->flags); - return true; - case -ECONNABORTED: - fc->ac.error = afs_abort_to_error(fc->ac.abort_code); - goto error; - case -ENOMEM: - case -ENONET: - goto error; - case -ENETUNREACH: - case -EHOSTUNREACH: - case -ECONNREFUSED: - case -ETIMEDOUT: - case -ETIME: - break; - default: - fc->ac.error = -EIO; - goto error; - } - } - -error: - afs_end_cursor(&fc->ac); - return false; -} - -/* - * If we haven't already, try probing the fileserver to get its capabilities. - * We try not to instigate parallel probes, but it's possible that the parallel - * probes will fail due to authentication failure when ours would succeed. - * - * TODO: Try sending an anonymous probe if an authenticated probe fails. - */ -bool afs_probe_fileserver(struct afs_fs_cursor *fc) -{ - bool success; - int ret, retries = 0; - - _enter(""); - -retry: - if (test_bit(AFS_SERVER_FL_PROBED, &fc->cbi->server->flags)) { - _leave(" = t"); - return true; - } - - if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &fc->cbi->server->flags)) { - success = afs_do_probe_fileserver(fc); - clear_bit_unlock(AFS_SERVER_FL_PROBING, &fc->cbi->server->flags); - wake_up_bit(&fc->cbi->server->flags, AFS_SERVER_FL_PROBING); - _leave(" = t"); - return success; - } - - _debug("wait"); - ret = wait_on_bit(&fc->cbi->server->flags, AFS_SERVER_FL_PROBING, - TASK_INTERRUPTIBLE); - if (ret == -ERESTARTSYS) { - fc->ac.error = ret; - _leave(" = f [%d]", ret); - return false; - } - - retries++; - if (retries == 4) { - fc->ac.error = -ESTALE; - _leave(" = f [stale]"); - return false; - } - _debug("retry"); - goto retry; -} - -/* * Get an update for a server's address list. */ static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct afs_server *server) diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c index 8a5760aa5832..95d0761cdb34 100644 --- a/fs/afs/server_list.c +++ b/fs/afs/server_list.c @@ -118,11 +118,11 @@ bool afs_annotate_server_list(struct afs_server_list *new, return false; changed: - /* Maintain the same current server as before if possible. */ - cur = old->servers[old->index].server; + /* Maintain the same preferred server as before if possible. */ + cur = old->servers[old->preferred].server; for (j = 0; j < new->nr_servers; j++) { if (new->servers[j].server == cur) { - new->index = j; + new->preferred = j; break; } } diff --git a/fs/afs/super.c b/fs/afs/super.c index 4d3e274207fb..dcd07fe99871 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -406,10 +406,11 @@ static int afs_fill_super(struct super_block *sb, inode = afs_iget_pseudo_dir(sb, true); sb->s_flags |= SB_RDONLY; } else { - sprintf(sb->s_id, "%u", as->volume->vid); + sprintf(sb->s_id, "%llu", as->volume->vid); afs_activate_volume(as->volume); fid.vid = as->volume->vid; fid.vnode = 1; + fid.vnode_hi = 0; fid.unique = 1; inode = afs_iget(sb, params->key, &fid, NULL, NULL, NULL); } @@ -663,7 +664,7 @@ static void afs_destroy_inode(struct inode *inode) { struct afs_vnode *vnode = AFS_FS_I(inode); - _enter("%p{%x:%u}", inode, vnode->fid.vid, vnode->fid.vnode); + _enter("%p{%llx:%llu}", inode, vnode->fid.vid, vnode->fid.vnode); _debug("DESTROY INODE %p", inode); diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c new file mode 100644 index 000000000000..b4f1a84519b9 --- /dev/null +++ b/fs/afs/vl_list.c @@ -0,0 +1,340 @@ +/* AFS vlserver list management. + * + * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include "internal.h" + +struct afs_vlserver *afs_alloc_vlserver(const char *name, size_t name_len, + unsigned short port) +{ + struct afs_vlserver *vlserver; + + vlserver = kzalloc(struct_size(vlserver, name, name_len + 1), + GFP_KERNEL); + if (vlserver) { + atomic_set(&vlserver->usage, 1); + rwlock_init(&vlserver->lock); + init_waitqueue_head(&vlserver->probe_wq); + spin_lock_init(&vlserver->probe_lock); + vlserver->name_len = name_len; + vlserver->port = port; + memcpy(vlserver->name, name, name_len); + } + return vlserver; +} + +static void afs_vlserver_rcu(struct rcu_head *rcu) +{ + struct afs_vlserver *vlserver = container_of(rcu, struct afs_vlserver, rcu); + + afs_put_addrlist(rcu_access_pointer(vlserver->addresses)); + kfree_rcu(vlserver, rcu); +} + +void afs_put_vlserver(struct afs_net *net, struct afs_vlserver *vlserver) +{ + if (vlserver) { + unsigned int u = atomic_dec_return(&vlserver->usage); + //_debug("VL PUT %p{%u}", vlserver, u); + + if (u == 0) + call_rcu(&vlserver->rcu, afs_vlserver_rcu); + } +} + +struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int nr_servers) +{ + struct afs_vlserver_list *vllist; + + vllist = kzalloc(struct_size(vllist, servers, nr_servers), GFP_KERNEL); + if (vllist) { + atomic_set(&vllist->usage, 1); + rwlock_init(&vllist->lock); + } + + return vllist; +} + +void afs_put_vlserverlist(struct afs_net *net, struct afs_vlserver_list *vllist) +{ + if (vllist) { + unsigned int u = atomic_dec_return(&vllist->usage); + + //_debug("VLLS PUT %p{%u}", vllist, u); + if (u == 0) { + int i; + + for (i = 0; i < vllist->nr_servers; i++) { + afs_put_vlserver(net, vllist->servers[i].server); + } + kfree_rcu(vllist, rcu); + } + } +} + +static u16 afs_extract_le16(const u8 **_b) +{ + u16 val; + + val = (u16)*(*_b)++ << 0; + val |= (u16)*(*_b)++ << 8; + return val; +} + +/* + * Build a VL server address list from a DNS queried server list. + */ +static struct afs_addr_list *afs_extract_vl_addrs(const u8 **_b, const u8 *end, + u8 nr_addrs, u16 port) +{ + struct afs_addr_list *alist; + const u8 *b = *_b; + int ret = -EINVAL; + + alist = afs_alloc_addrlist(nr_addrs, VL_SERVICE, port); + if (!alist) + return ERR_PTR(-ENOMEM); + if (nr_addrs == 0) + return alist; + + for (; nr_addrs > 0 && end - b >= nr_addrs; nr_addrs--) { + struct dns_server_list_v1_address hdr; + __be32 x[4]; + + hdr.address_type = *b++; + + switch (hdr.address_type) { + case DNS_ADDRESS_IS_IPV4: + if (end - b < 4) { + _leave(" = -EINVAL [short inet]"); + goto error; + } + memcpy(x, b, 4); + afs_merge_fs_addr4(alist, x[0], port); + b += 4; + break; + + case DNS_ADDRESS_IS_IPV6: + if (end - b < 16) { + _leave(" = -EINVAL [short inet6]"); + goto error; + } + memcpy(x, b, 16); + afs_merge_fs_addr6(alist, x, port); + b += 16; + break; + + default: + _leave(" = -EADDRNOTAVAIL [unknown af %u]", + hdr.address_type); + ret = -EADDRNOTAVAIL; + goto error; + } + } + + /* Start with IPv6 if available. */ + if (alist->nr_ipv4 < alist->nr_addrs) + alist->preferred = alist->nr_ipv4; + + *_b = b; + return alist; + +error: + *_b = b; + afs_put_addrlist(alist); + return ERR_PTR(ret); +} + +/* + * Build a VL server list from a DNS queried server list. + */ +struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell, + const void *buffer, + size_t buffer_size) +{ + const struct dns_server_list_v1_header *hdr = buffer; + struct dns_server_list_v1_server bs; + struct afs_vlserver_list *vllist, *previous; + struct afs_addr_list *addrs; + struct afs_vlserver *server; + const u8 *b = buffer, *end = buffer + buffer_size; + int ret = -ENOMEM, nr_servers, i, j; + + _enter(""); + + /* Check that it's a server list, v1 */ + if (end - b < sizeof(*hdr) || + hdr->hdr.content != DNS_PAYLOAD_IS_SERVER_LIST || + hdr->hdr.version != 1) { + pr_notice("kAFS: Got DNS record [%u,%u] len %zu\n", + hdr->hdr.content, hdr->hdr.version, end - b); + ret = -EDESTADDRREQ; + goto dump; + } + + nr_servers = hdr->nr_servers; + + vllist = afs_alloc_vlserver_list(nr_servers); + if (!vllist) + return ERR_PTR(-ENOMEM); + + vllist->source = (hdr->source < NR__dns_record_source) ? + hdr->source : NR__dns_record_source; + vllist->status = (hdr->status < NR__dns_lookup_status) ? + hdr->status : NR__dns_lookup_status; + + read_lock(&cell->vl_servers_lock); + previous = afs_get_vlserverlist( + rcu_dereference_protected(cell->vl_servers, + lockdep_is_held(&cell->vl_servers_lock))); + read_unlock(&cell->vl_servers_lock); + + b += sizeof(*hdr); + while (end - b >= sizeof(bs)) { + bs.name_len = afs_extract_le16(&b); + bs.priority = afs_extract_le16(&b); + bs.weight = afs_extract_le16(&b); + bs.port = afs_extract_le16(&b); + bs.source = *b++; + bs.status = *b++; + bs.protocol = *b++; + bs.nr_addrs = *b++; + + _debug("extract %u %u %u %u %u %u %*.*s", + bs.name_len, bs.priority, bs.weight, + bs.port, bs.protocol, bs.nr_addrs, + bs.name_len, bs.name_len, b); + + if (end - b < bs.name_len) + break; + + ret = -EPROTONOSUPPORT; + if (bs.protocol == DNS_SERVER_PROTOCOL_UNSPECIFIED) { + bs.protocol = DNS_SERVER_PROTOCOL_UDP; + } else if (bs.protocol != DNS_SERVER_PROTOCOL_UDP) { + _leave(" = [proto %u]", bs.protocol); + goto error; + } + + if (bs.port == 0) + bs.port = AFS_VL_PORT; + if (bs.source > NR__dns_record_source) + bs.source = NR__dns_record_source; + if (bs.status > NR__dns_lookup_status) + bs.status = NR__dns_lookup_status; + + server = NULL; + if (previous) { + /* See if we can update an old server record */ + for (i = 0; i < previous->nr_servers; i++) { + struct afs_vlserver *p = previous->servers[i].server; + + if (p->name_len == bs.name_len && + p->port == bs.port && + strncasecmp(b, p->name, bs.name_len) == 0) { + server = afs_get_vlserver(p); + break; + } + } + } + + if (!server) { + ret = -ENOMEM; + server = afs_alloc_vlserver(b, bs.name_len, bs.port); + if (!server) + goto error; + } + + b += bs.name_len; + + /* Extract the addresses - note that we can't skip this as we + * have to advance the payload pointer. + */ + addrs = afs_extract_vl_addrs(&b, end, bs.nr_addrs, bs.port); + if (IS_ERR(addrs)) { + ret = PTR_ERR(addrs); + goto error_2; + } + + if (vllist->nr_servers >= nr_servers) { + _debug("skip %u >= %u", vllist->nr_servers, nr_servers); + afs_put_addrlist(addrs); + afs_put_vlserver(cell->net, server); + continue; + } + + addrs->source = bs.source; + addrs->status = bs.status; + + if (addrs->nr_addrs == 0) { + afs_put_addrlist(addrs); + if (!rcu_access_pointer(server->addresses)) { + afs_put_vlserver(cell->net, server); + continue; + } + } else { + struct afs_addr_list *old = addrs; + + write_lock(&server->lock); + rcu_swap_protected(server->addresses, old, + lockdep_is_held(&server->lock)); + write_unlock(&server->lock); + afs_put_addrlist(old); + } + + + /* TODO: Might want to check for duplicates */ + + /* Insertion-sort by priority and weight */ + for (j = 0; j < vllist->nr_servers; j++) { + if (bs.priority < vllist->servers[j].priority) + break; /* Lower preferable */ + if (bs.priority == vllist->servers[j].priority && + bs.weight > vllist->servers[j].weight) + break; /* Higher preferable */ + } + + if (j < vllist->nr_servers) { + memmove(vllist->servers + j + 1, + vllist->servers + j, + (vllist->nr_servers - j) * sizeof(struct afs_vlserver_entry)); + } + + clear_bit(AFS_VLSERVER_FL_PROBED, &server->flags); + + vllist->servers[j].priority = bs.priority; + vllist->servers[j].weight = bs.weight; + vllist->servers[j].server = server; + vllist->nr_servers++; + } + + if (b != end) { + _debug("parse error %zd", b - end); + goto error; + } + + afs_put_vlserverlist(cell->net, previous); + _leave(" = ok [%u]", vllist->nr_servers); + return vllist; + +error_2: + afs_put_vlserver(cell->net, server); +error: + afs_put_vlserverlist(cell->net, vllist); + afs_put_vlserverlist(cell->net, previous); +dump: + if (ret != -ENOMEM) { + printk(KERN_DEBUG "DNS: at %zu\n", (const void *)b - buffer); + print_hex_dump_bytes("DNS: ", DUMP_PREFIX_NONE, buffer, buffer_size); + } + return ERR_PTR(ret); +} diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c new file mode 100644 index 000000000000..c0f616bd70cb --- /dev/null +++ b/fs/afs/vl_probe.c @@ -0,0 +1,273 @@ +/* AFS vlserver probing + * + * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include <linux/sched.h> +#include <linux/slab.h> +#include "afs_fs.h" +#include "internal.h" +#include "protocol_yfs.h" + +static bool afs_vl_probe_done(struct afs_vlserver *server) +{ + if (!atomic_dec_and_test(&server->probe_outstanding)) + return false; + + wake_up_var(&server->probe_outstanding); + clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags); + wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING); + return true; +} + +/* + * Process the result of probing a vlserver. This is called after successful + * or failed delivery of an VL.GetCapabilities operation. + */ +void afs_vlserver_probe_result(struct afs_call *call) +{ + struct afs_addr_list *alist = call->alist; + struct afs_vlserver *server = call->reply[0]; + unsigned int server_index = (long)call->reply[1]; + unsigned int index = call->addr_ix; + unsigned int rtt = UINT_MAX; + bool have_result = false; + u64 _rtt; + int ret = call->error; + + _enter("%s,%u,%u,%d,%d", server->name, server_index, index, ret, call->abort_code); + + spin_lock(&server->probe_lock); + + switch (ret) { + case 0: + server->probe.error = 0; + goto responded; + case -ECONNABORTED: + if (!server->probe.responded) { + server->probe.abort_code = call->abort_code; + server->probe.error = ret; + } + goto responded; + case -ENOMEM: + case -ENONET: + server->probe.local_failure = true; + afs_io_error(call, afs_io_error_vl_probe_fail); + goto out; + case -ECONNRESET: /* Responded, but call expired. */ + case -ENETUNREACH: + case -EHOSTUNREACH: + case -ECONNREFUSED: + case -ETIMEDOUT: + case -ETIME: + default: + clear_bit(index, &alist->responded); + set_bit(index, &alist->failed); + if (!server->probe.responded && + (server->probe.error == 0 || + server->probe.error == -ETIMEDOUT || + server->probe.error == -ETIME)) + server->probe.error = ret; + afs_io_error(call, afs_io_error_vl_probe_fail); + goto out; + } + +responded: + set_bit(index, &alist->responded); + clear_bit(index, &alist->failed); + + if (call->service_id == YFS_VL_SERVICE) { + server->probe.is_yfs = true; + set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags); + alist->addrs[index].srx_service = call->service_id; + } else { + server->probe.not_yfs = true; + if (!server->probe.is_yfs) { + clear_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags); + alist->addrs[index].srx_service = call->service_id; + } + } + + /* Get the RTT and scale it to fit into a 32-bit value that represents + * over a minute of time so that we can access it with one instruction + * on a 32-bit system. + */ + _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); + _rtt /= 64; + rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt; + if (rtt < server->probe.rtt) { + server->probe.rtt = rtt; + alist->preferred = index; + have_result = true; + } + + smp_wmb(); /* Set rtt before responded. */ + server->probe.responded = true; + set_bit(AFS_VLSERVER_FL_PROBED, &server->flags); +out: + spin_unlock(&server->probe_lock); + + _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", + server_index, index, &alist->addrs[index].transport, + (unsigned int)rtt, ret); + + have_result |= afs_vl_probe_done(server); + if (have_result) { + server->probe.have_result = true; + wake_up_var(&server->probe.have_result); + wake_up_all(&server->probe_wq); + } +} + +/* + * Probe all of a vlserver's addresses to find out the best route and to + * query its capabilities. + */ +static int afs_do_probe_vlserver(struct afs_net *net, + struct afs_vlserver *server, + struct key *key, + unsigned int server_index) +{ + struct afs_addr_cursor ac = { + .index = 0, + }; + int ret; + + _enter("%s", server->name); + + read_lock(&server->lock); + ac.alist = rcu_dereference_protected(server->addresses, + lockdep_is_held(&server->lock)); + read_unlock(&server->lock); + + atomic_set(&server->probe_outstanding, ac.alist->nr_addrs); + memset(&server->probe, 0, sizeof(server->probe)); + server->probe.rtt = UINT_MAX; + + for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) { + ret = afs_vl_get_capabilities(net, &ac, key, server, + server_index, true); + if (ret != -EINPROGRESS) { + afs_vl_probe_done(server); + return ret; + } + } + + return 0; +} + +/* + * Send off probes to all unprobed servers. + */ +int afs_send_vl_probes(struct afs_net *net, struct key *key, + struct afs_vlserver_list *vllist) +{ + struct afs_vlserver *server; + int i, ret; + + for (i = 0; i < vllist->nr_servers; i++) { + server = vllist->servers[i].server; + if (test_bit(AFS_VLSERVER_FL_PROBED, &server->flags)) + continue; + + if (!test_and_set_bit_lock(AFS_VLSERVER_FL_PROBING, &server->flags)) { + ret = afs_do_probe_vlserver(net, server, key, i); + if (ret) + return ret; + } + } + + return 0; +} + +/* + * Wait for the first as-yet untried server to respond. + */ +int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist, + unsigned long untried) +{ + struct wait_queue_entry *waits; + struct afs_vlserver *server; + unsigned int rtt = UINT_MAX; + bool have_responders = false; + int pref = -1, i; + + _enter("%u,%lx", vllist->nr_servers, untried); + + /* Only wait for servers that have a probe outstanding. */ + for (i = 0; i < vllist->nr_servers; i++) { + if (test_bit(i, &untried)) { + server = vllist->servers[i].server; + if (!test_bit(AFS_VLSERVER_FL_PROBING, &server->flags)) + __clear_bit(i, &untried); + if (server->probe.responded) + have_responders = true; + } + } + if (have_responders || !untried) + return 0; + + waits = kmalloc(array_size(vllist->nr_servers, sizeof(*waits)), GFP_KERNEL); + if (!waits) + return -ENOMEM; + + for (i = 0; i < vllist->nr_servers; i++) { + if (test_bit(i, &untried)) { + server = vllist->servers[i].server; + init_waitqueue_entry(&waits[i], current); + add_wait_queue(&server->probe_wq, &waits[i]); + } + } + + for (;;) { + bool still_probing = false; + + set_current_state(TASK_INTERRUPTIBLE); + for (i = 0; i < vllist->nr_servers; i++) { + if (test_bit(i, &untried)) { + server = vllist->servers[i].server; + if (server->probe.responded) + goto stop; + if (test_bit(AFS_VLSERVER_FL_PROBING, &server->flags)) + still_probing = true; + } + } + + if (!still_probing || unlikely(signal_pending(current))) + goto stop; + schedule(); + } + +stop: + set_current_state(TASK_RUNNING); + + for (i = 0; i < vllist->nr_servers; i++) { + if (test_bit(i, &untried)) { + server = vllist->servers[i].server; + if (server->probe.responded && + server->probe.rtt < rtt) { + pref = i; + rtt = server->probe.rtt; + } + + remove_wait_queue(&server->probe_wq, &waits[i]); + } + } + + kfree(waits); + + if (pref == -1 && signal_pending(current)) + return -ERESTARTSYS; + + if (pref >= 0) + vllist->preferred = pref; + + _leave(" = 0 [%u]", pref); + return 0; +} diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c new file mode 100644 index 000000000000..b64a284b99d2 --- /dev/null +++ b/fs/afs/vl_rotate.c @@ -0,0 +1,355 @@ +/* Handle vlserver selection and rotation. + * + * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/sched/signal.h> +#include "internal.h" +#include "afs_vl.h" + +/* + * Begin an operation on a volume location server. + */ +bool afs_begin_vlserver_operation(struct afs_vl_cursor *vc, struct afs_cell *cell, + struct key *key) +{ + memset(vc, 0, sizeof(*vc)); + vc->cell = cell; + vc->key = key; + vc->error = -EDESTADDRREQ; + vc->ac.error = SHRT_MAX; + + if (signal_pending(current)) { + vc->error = -EINTR; + vc->flags |= AFS_VL_CURSOR_STOP; + return false; + } + + return true; +} + +/* + * Begin iteration through a server list, starting with the last used server if + * possible, or the last recorded good server if not. + */ +static bool afs_start_vl_iteration(struct afs_vl_cursor *vc) +{ + struct afs_cell *cell = vc->cell; + + if (wait_on_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET, + TASK_INTERRUPTIBLE)) { + vc->error = -ERESTARTSYS; + return false; + } + + read_lock(&cell->vl_servers_lock); + vc->server_list = afs_get_vlserverlist( + rcu_dereference_protected(cell->vl_servers, + lockdep_is_held(&cell->vl_servers_lock))); + read_unlock(&cell->vl_servers_lock); + if (!vc->server_list || !vc->server_list->nr_servers) + return false; + + vc->untried = (1UL << vc->server_list->nr_servers) - 1; + vc->index = -1; + return true; +} + +/* + * Select the vlserver to use. May be called multiple times to rotate + * through the vlservers. + */ +bool afs_select_vlserver(struct afs_vl_cursor *vc) +{ + struct afs_addr_list *alist; + struct afs_vlserver *vlserver; + u32 rtt; + int error = vc->ac.error, abort_code, i; + + _enter("%lx[%d],%lx[%d],%d,%d", + vc->untried, vc->index, + vc->ac.tried, vc->ac.index, + error, vc->ac.abort_code); + + if (vc->flags & AFS_VL_CURSOR_STOP) { + _leave(" = f [stopped]"); + return false; + } + + vc->nr_iterations++; + + /* Evaluate the result of the previous operation, if there was one. */ + switch (error) { + case SHRT_MAX: + goto start; + + default: + case 0: + /* Success or local failure. Stop. */ + vc->error = error; + vc->flags |= AFS_VL_CURSOR_STOP; + _leave(" = f [okay/local %d]", vc->ac.error); + return false; + + case -ECONNABORTED: + /* The far side rejected the operation on some grounds. This + * might involve the server being busy or the volume having been moved. + */ + switch (vc->ac.abort_code) { + case AFSVL_IO: + case AFSVL_BADVOLOPER: + case AFSVL_NOMEM: + /* The server went weird. */ + vc->error = -EREMOTEIO; + //write_lock(&vc->cell->vl_servers_lock); + //vc->server_list->weird_mask |= 1 << vc->index; + //write_unlock(&vc->cell->vl_servers_lock); + goto next_server; + + default: + vc->error = afs_abort_to_error(vc->ac.abort_code); + goto failed; + } + + case -ENETUNREACH: + case -EHOSTUNREACH: + case -ECONNREFUSED: + case -ETIMEDOUT: + case -ETIME: + _debug("no conn %d", error); + vc->error = error; + goto iterate_address; + + case -ECONNRESET: + _debug("call reset"); + vc->error = error; + vc->flags |= AFS_VL_CURSOR_RETRY; + goto next_server; + } + +restart_from_beginning: + _debug("restart"); + afs_end_cursor(&vc->ac); + afs_put_vlserverlist(vc->cell->net, vc->server_list); + vc->server_list = NULL; + if (vc->flags & AFS_VL_CURSOR_RETRIED) + goto failed; + vc->flags |= AFS_VL_CURSOR_RETRIED; +start: + _debug("start"); + + if (!afs_start_vl_iteration(vc)) + goto failed; + + error = afs_send_vl_probes(vc->cell->net, vc->key, vc->server_list); + if (error < 0) + goto failed_set_error; + +pick_server: + _debug("pick [%lx]", vc->untried); + + error = afs_wait_for_vl_probes(vc->server_list, vc->untried); + if (error < 0) + goto failed_set_error; + + /* Pick the untried server with the lowest RTT. */ + vc->index = vc->server_list->preferred; + if (test_bit(vc->index, &vc->untried)) + goto selected_server; + + vc->index = -1; + rtt = U32_MAX; + for (i = 0; i < vc->server_list->nr_servers; i++) { + struct afs_vlserver *s = vc->server_list->servers[i].server; + + if (!test_bit(i, &vc->untried) || !s->probe.responded) + continue; + if (s->probe.rtt < rtt) { + vc->index = i; + rtt = s->probe.rtt; + } + } + + if (vc->index == -1) + goto no_more_servers; + +selected_server: + _debug("use %d", vc->index); + __clear_bit(vc->index, &vc->untried); + + /* We're starting on a different vlserver from the list. We need to + * check it, find its address list and probe its capabilities before we + * use it. + */ + ASSERTCMP(vc->ac.alist, ==, NULL); + vlserver = vc->server_list->servers[vc->index].server; + vc->server = vlserver; + + _debug("USING VLSERVER: %s", vlserver->name); + + read_lock(&vlserver->lock); + alist = rcu_dereference_protected(vlserver->addresses, + lockdep_is_held(&vlserver->lock)); + afs_get_addrlist(alist); + read_unlock(&vlserver->lock); + + memset(&vc->ac, 0, sizeof(vc->ac)); + + if (!vc->ac.alist) + vc->ac.alist = alist; + else + afs_put_addrlist(alist); + + vc->ac.index = -1; + +iterate_address: + ASSERT(vc->ac.alist); + /* Iterate over the current server's address list to try and find an + * address on which it will respond to us. + */ + if (!afs_iterate_addresses(&vc->ac)) + goto next_server; + + _debug("VL address %d/%d", vc->ac.index, vc->ac.alist->nr_addrs); + + _leave(" = t %pISpc", &vc->ac.alist->addrs[vc->ac.index].transport); + return true; + +next_server: + _debug("next"); + afs_end_cursor(&vc->ac); + goto pick_server; + +no_more_servers: + /* That's all the servers poked to no good effect. Try again if some + * of them were busy. + */ + if (vc->flags & AFS_VL_CURSOR_RETRY) + goto restart_from_beginning; + + abort_code = 0; + error = -EDESTADDRREQ; + for (i = 0; i < vc->server_list->nr_servers; i++) { + struct afs_vlserver *s = vc->server_list->servers[i].server; + int probe_error = READ_ONCE(s->probe.error); + + switch (probe_error) { + case 0: + continue; + default: + if (error == -ETIMEDOUT || + error == -ETIME) + continue; + case -ETIMEDOUT: + case -ETIME: + if (error == -ENOMEM || + error == -ENONET) + continue; + case -ENOMEM: + case -ENONET: + if (error == -ENETUNREACH) + continue; + case -ENETUNREACH: + if (error == -EHOSTUNREACH) + continue; + case -EHOSTUNREACH: + if (error == -ECONNREFUSED) + continue; + case -ECONNREFUSED: + if (error == -ECONNRESET) + continue; + case -ECONNRESET: /* Responded, but call expired. */ + if (error == -ECONNABORTED) + continue; + case -ECONNABORTED: + abort_code = s->probe.abort_code; + error = probe_error; + continue; + } + } + + if (error == -ECONNABORTED) + error = afs_abort_to_error(abort_code); + +failed_set_error: + vc->error = error; +failed: + vc->flags |= AFS_VL_CURSOR_STOP; + afs_end_cursor(&vc->ac); + _leave(" = f [failed %d]", vc->error); + return false; +} + +/* + * Dump cursor state in the case of the error being EDESTADDRREQ. + */ +static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc) +{ + static int count; + int i; + + if (!IS_ENABLED(CONFIG_AFS_DEBUG_CURSOR) || count > 3) + return; + count++; + + rcu_read_lock(); + pr_notice("EDESTADDR occurred\n"); + pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n", + vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error); + + if (vc->server_list) { + const struct afs_vlserver_list *sl = vc->server_list; + pr_notice("VC: SL nr=%u ix=%u\n", + sl->nr_servers, sl->index); + for (i = 0; i < sl->nr_servers; i++) { + const struct afs_vlserver *s = sl->servers[i].server; + pr_notice("VC: server %s+%hu fl=%lx E=%hd\n", + s->name, s->port, s->flags, s->probe.error); + if (s->addresses) { + const struct afs_addr_list *a = + rcu_dereference(s->addresses); + pr_notice("VC: - nr=%u/%u/%u pf=%u\n", + a->nr_ipv4, a->nr_addrs, a->max_addrs, + a->preferred); + pr_notice("VC: - pr=%lx R=%lx F=%lx\n", + a->probed, a->responded, a->failed); + if (a == vc->ac.alist) + pr_notice("VC: - current\n"); + } + } + } + + pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n", + vc->ac.tried, vc->ac.index, vc->ac.abort_code, vc->ac.error, + vc->ac.responded, vc->ac.nr_iterations); + rcu_read_unlock(); +} + +/* + * Tidy up a volume location server cursor and unlock the vnode. + */ +int afs_end_vlserver_operation(struct afs_vl_cursor *vc) +{ + struct afs_net *net = vc->cell->net; + + if (vc->error == -EDESTADDRREQ || + vc->error == -ENETUNREACH || + vc->error == -EHOSTUNREACH) + afs_vl_dump_edestaddrreq(vc); + + afs_end_cursor(&vc->ac); + afs_put_vlserverlist(net, vc->server_list); + + if (vc->error == -ECONNABORTED) + vc->error = afs_abort_to_error(vc->ac.abort_code); + + return vc->error; +} diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index c3b740813fc7..c3d9e5a5f67e 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c @@ -128,14 +128,13 @@ static const struct afs_call_type afs_RXVLGetEntryByNameU = { * Dispatch a get volume entry by name or ID operation (uuid variant). If the * volname is a decimal number then it's a volume ID not a volume name. */ -struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_net *net, - struct afs_addr_cursor *ac, - struct key *key, +struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_vl_cursor *vc, const char *volname, int volnamesz) { struct afs_vldb_entry *entry; struct afs_call *call; + struct afs_net *net = vc->cell->net; size_t reqsz, padsz; __be32 *bp; @@ -155,7 +154,7 @@ struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_net *net, return ERR_PTR(-ENOMEM); } - call->key = key; + call->key = vc->key; call->reply[0] = entry; call->ret_reply0 = true; @@ -168,7 +167,7 @@ struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_net *net, memset((void *)bp + volnamesz, 0, padsz); trace_afs_make_vl_call(call); - return (struct afs_vldb_entry *)afs_make_call(ac, call, GFP_KERNEL, false); + return (struct afs_vldb_entry *)afs_make_call(&vc->ac, call, GFP_KERNEL, false); } /* @@ -187,19 +186,18 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call) u32 uniquifier, nentries, count; int i, ret; - _enter("{%u,%zu/%u}", call->unmarshall, call->offset, call->count); + _enter("{%u,%zu/%u}", + call->unmarshall, iov_iter_count(call->_iter), call->count); -again: switch (call->unmarshall) { case 0: - call->offset = 0; + afs_extract_to_buf(call, + sizeof(struct afs_uuid__xdr) + 3 * sizeof(__be32)); call->unmarshall++; /* Extract the returned uuid, uniquifier, nentries and blkaddrs size */ case 1: - ret = afs_extract_data(call, call->buffer, - sizeof(struct afs_uuid__xdr) + 3 * sizeof(__be32), - true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; @@ -216,28 +214,28 @@ again: call->reply[0] = alist; call->count = count; call->count2 = nentries; - call->offset = 0; call->unmarshall++; + more_entries: + count = min(call->count, 4U); + afs_extract_to_buf(call, count * sizeof(__be32)); + /* Extract entries */ case 2: - count = min(call->count, 4U); - ret = afs_extract_data(call, call->buffer, - count * sizeof(__be32), - call->count > 4); + ret = afs_extract_data(call, call->count > 4); if (ret < 0) return ret; alist = call->reply[0]; bp = call->buffer; + count = min(call->count, 4U); for (i = 0; i < count; i++) if (alist->nr_addrs < call->count2) afs_merge_fs_addr4(alist, *bp++, AFS_FS_PORT); call->count -= count; if (call->count > 0) - goto again; - call->offset = 0; + goto more_entries; call->unmarshall++; break; } @@ -267,14 +265,13 @@ static const struct afs_call_type afs_RXVLGetAddrsU = { * Dispatch an operation to get the addresses for a server, where the server is * nominated by UUID. */ -struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *net, - struct afs_addr_cursor *ac, - struct key *key, +struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *vc, const uuid_t *uuid) { struct afs_ListAddrByAttributes__xdr *r; const struct afs_uuid *u = (const struct afs_uuid *)uuid; struct afs_call *call; + struct afs_net *net = vc->cell->net; __be32 *bp; int i; @@ -286,7 +283,7 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *net, if (!call) return ERR_PTR(-ENOMEM); - call->key = key; + call->key = vc->key; call->reply[0] = NULL; call->ret_reply0 = true; @@ -307,7 +304,7 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *net, r->uuid.node[i] = htonl(u->node[i]); trace_afs_make_vl_call(call); - return (struct afs_addr_list *)afs_make_call(ac, call, GFP_KERNEL, false); + return (struct afs_addr_list *)afs_make_call(&vc->ac, call, GFP_KERNEL, false); } /* @@ -318,54 +315,51 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call) u32 count; int ret; - _enter("{%u,%zu/%u}", call->unmarshall, call->offset, call->count); + _enter("{%u,%zu/%u}", + call->unmarshall, iov_iter_count(call->_iter), call->count); -again: switch (call->unmarshall) { case 0: - call->offset = 0; + afs_extract_to_tmp(call); call->unmarshall++; /* Extract the capabilities word count */ case 1: - ret = afs_extract_data(call, &call->tmp, - 1 * sizeof(__be32), - true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; count = ntohl(call->tmp); - call->count = count; call->count2 = count; - call->offset = 0; + call->unmarshall++; + afs_extract_discard(call, count * sizeof(__be32)); /* Extract capabilities words */ case 2: - count = min(call->count, 16U); - ret = afs_extract_data(call, call->buffer, - count * sizeof(__be32), - call->count > 16); + ret = afs_extract_data(call, false); if (ret < 0) return ret; /* TODO: Examine capabilities */ - call->count -= count; - if (call->count > 0) - goto again; - call->offset = 0; call->unmarshall++; break; } - call->reply[0] = (void *)(unsigned long)call->service_id; - _leave(" = 0 [done]"); return 0; } +static void afs_destroy_vl_get_capabilities(struct afs_call *call) +{ + struct afs_vlserver *server = call->reply[0]; + + afs_put_vlserver(call->net, server); + afs_flat_call_destructor(call); +} + /* * VL.GetCapabilities operation type */ @@ -373,11 +367,12 @@ static const struct afs_call_type afs_RXVLGetCapabilities = { .name = "VL.GetCapabilities", .op = afs_VL_GetCapabilities, .deliver = afs_deliver_vl_get_capabilities, - .destructor = afs_flat_call_destructor, + .done = afs_vlserver_probe_result, + .destructor = afs_destroy_vl_get_capabilities, }; /* - * Probe a fileserver for the capabilities that it supports. This can + * Probe a volume server for the capabilities that it supports. This can * return up to 196 words. * * We use this to probe for service upgrade to determine what the server at the @@ -385,7 +380,10 @@ static const struct afs_call_type afs_RXVLGetCapabilities = { */ int afs_vl_get_capabilities(struct afs_net *net, struct afs_addr_cursor *ac, - struct key *key) + struct key *key, + struct afs_vlserver *server, + unsigned int server_index, + bool async) { struct afs_call *call; __be32 *bp; @@ -397,9 +395,10 @@ int afs_vl_get_capabilities(struct afs_net *net, return -ENOMEM; call->key = key; - call->upgrade = true; /* Let's see if this is a YFS server */ - call->reply[0] = (void *)VLGETCAPABILITIES; - call->ret_reply0 = true; + call->reply[0] = afs_get_vlserver(server); + call->reply[1] = (void *)(long)server_index; + call->upgrade = true; + call->want_reply_time = true; /* marshall the parameters */ bp = call->request; @@ -407,7 +406,7 @@ int afs_vl_get_capabilities(struct afs_net *net, /* Can't take a ref on server */ trace_afs_make_vl_call(call); - return afs_make_call(ac, call, GFP_KERNEL, false); + return afs_make_call(ac, call, GFP_KERNEL, async); } /* @@ -426,22 +425,19 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) u32 uniquifier, size; int ret; - _enter("{%u,%zu/%u,%u}", call->unmarshall, call->offset, call->count, call->count2); + _enter("{%u,%zu,%u}", + call->unmarshall, iov_iter_count(call->_iter), call->count2); -again: switch (call->unmarshall) { case 0: - call->offset = 0; + afs_extract_to_buf(call, sizeof(uuid_t) + 3 * sizeof(__be32)); call->unmarshall = 1; /* Extract the returned uuid, uniquifier, fsEndpoints count and * either the first fsEndpoint type or the volEndpoints * count if there are no fsEndpoints. */ case 1: - ret = afs_extract_data(call, call->buffer, - sizeof(uuid_t) + - 3 * sizeof(__be32), - true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; @@ -451,22 +447,19 @@ again: call->count2 = ntohl(*bp); /* Type or next count */ if (call->count > YFS_MAXENDPOINTS) - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_yvl_fsendpt_num); alist = afs_alloc_addrlist(call->count, FS_SERVICE, AFS_FS_PORT); if (!alist) return -ENOMEM; alist->version = uniquifier; call->reply[0] = alist; - call->offset = 0; if (call->count == 0) goto extract_volendpoints; - call->unmarshall = 2; - - /* Extract fsEndpoints[] entries */ - case 2: + next_fsendpoint: switch (call->count2) { case YFS_ENDPOINT_IPV4: size = sizeof(__be32) * (1 + 1 + 1); @@ -475,11 +468,17 @@ again: size = sizeof(__be32) * (1 + 4 + 1); break; default: - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_yvl_fsendpt_type); } size += sizeof(__be32); - ret = afs_extract_data(call, call->buffer, size, true); + afs_extract_to_buf(call, size); + call->unmarshall = 2; + + /* Extract fsEndpoints[] entries */ + case 2: + ret = afs_extract_data(call, true); if (ret < 0) return ret; @@ -488,18 +487,21 @@ again: switch (call->count2) { case YFS_ENDPOINT_IPV4: if (ntohl(bp[0]) != sizeof(__be32) * 2) - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_yvl_fsendpt4_len); afs_merge_fs_addr4(alist, bp[1], ntohl(bp[2])); bp += 3; break; case YFS_ENDPOINT_IPV6: if (ntohl(bp[0]) != sizeof(__be32) * 5) - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_yvl_fsendpt6_len); afs_merge_fs_addr6(alist, bp + 1, ntohl(bp[5])); bp += 6; break; default: - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_yvl_fsendpt_type); } /* Got either the type of the next entry or the count of @@ -507,10 +509,9 @@ again: */ call->count2 = ntohl(*bp++); - call->offset = 0; call->count--; if (call->count > 0) - goto again; + goto next_fsendpoint; extract_volendpoints: /* Extract the list of volEndpoints. */ @@ -518,8 +519,10 @@ again: if (!call->count) goto end; if (call->count > YFS_MAXENDPOINTS) - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_yvl_vlendpt_type); + afs_extract_to_buf(call, 1 * sizeof(__be32)); call->unmarshall = 3; /* Extract the type of volEndpoints[0]. Normally we would @@ -527,17 +530,14 @@ again: * data of the current one, but this is the first... */ case 3: - ret = afs_extract_data(call, call->buffer, sizeof(__be32), true); + ret = afs_extract_data(call, true); if (ret < 0) return ret; bp = call->buffer; - call->count2 = ntohl(*bp++); - call->offset = 0; - call->unmarshall = 4; - /* Extract volEndpoints[] entries */ - case 4: + next_volendpoint: + call->count2 = ntohl(*bp++); switch (call->count2) { case YFS_ENDPOINT_IPV4: size = sizeof(__be32) * (1 + 1 + 1); @@ -546,12 +546,18 @@ again: size = sizeof(__be32) * (1 + 4 + 1); break; default: - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_yvl_vlendpt_type); } if (call->count > 1) - size += sizeof(__be32); - ret = afs_extract_data(call, call->buffer, size, true); + size += sizeof(__be32); /* Get next type too */ + afs_extract_to_buf(call, size); + call->unmarshall = 4; + + /* Extract volEndpoints[] entries */ + case 4: + ret = afs_extract_data(call, true); if (ret < 0) return ret; @@ -559,34 +565,35 @@ again: switch (call->count2) { case YFS_ENDPOINT_IPV4: if (ntohl(bp[0]) != sizeof(__be32) * 2) - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_yvl_vlendpt4_len); bp += 3; break; case YFS_ENDPOINT_IPV6: if (ntohl(bp[0]) != sizeof(__be32) * 5) - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_yvl_vlendpt6_len); bp += 6; break; default: - return afs_protocol_error(call, -EBADMSG); + return afs_protocol_error(call, -EBADMSG, + afs_eproto_yvl_vlendpt_type); } /* Got either the type of the next entry or the count of * volEndpoints if no more fsEndpoints. */ - call->offset = 0; call->count--; - if (call->count > 0) { - call->count2 = ntohl(*bp++); - goto again; - } + if (call->count > 0) + goto next_volendpoint; end: + afs_extract_discard(call, 0); call->unmarshall = 5; /* Done */ case 5: - ret = afs_extract_data(call, call->buffer, 0, false); + ret = afs_extract_data(call, false); if (ret < 0) return ret; call->unmarshall = 6; @@ -596,11 +603,6 @@ again: } alist = call->reply[0]; - - /* Start with IPv6 if available. */ - if (alist->nr_ipv4 < alist->nr_addrs) - alist->index = alist->nr_ipv4; - _leave(" = 0 [done]"); return 0; } @@ -619,12 +621,11 @@ static const struct afs_call_type afs_YFSVLGetEndpoints = { * Dispatch an operation to get the addresses for a server, where the server is * nominated by UUID. */ -struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_net *net, - struct afs_addr_cursor *ac, - struct key *key, +struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc, const uuid_t *uuid) { struct afs_call *call; + struct afs_net *net = vc->cell->net; __be32 *bp; _enter(""); @@ -635,7 +636,7 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_net *net, if (!call) return ERR_PTR(-ENOMEM); - call->key = key; + call->key = vc->key; call->reply[0] = NULL; call->ret_reply0 = true; @@ -646,5 +647,5 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_net *net, memcpy(bp, uuid, sizeof(*uuid)); /* Type opr_uuid */ trace_afs_make_vl_call(call); - return (struct afs_addr_list *)afs_make_call(ac, call, GFP_KERNEL, false); + return (struct afs_addr_list *)afs_make_call(&vc->ac, call, GFP_KERNEL, false); } diff --git a/fs/afs/volume.c b/fs/afs/volume.c index 3037bd01f617..00975ed3640f 100644 --- a/fs/afs/volume.c +++ b/fs/afs/volume.c @@ -74,55 +74,19 @@ static struct afs_vldb_entry *afs_vl_lookup_vldb(struct afs_cell *cell, const char *volname, size_t volnamesz) { - struct afs_addr_cursor ac; - struct afs_vldb_entry *vldb; + struct afs_vldb_entry *vldb = ERR_PTR(-EDESTADDRREQ); + struct afs_vl_cursor vc; int ret; - ret = afs_set_vl_cursor(&ac, cell); - if (ret < 0) - return ERR_PTR(ret); - - while (afs_iterate_addresses(&ac)) { - if (!test_bit(ac.index, &ac.alist->probed)) { - ret = afs_vl_get_capabilities(cell->net, &ac, key); - switch (ret) { - case VL_SERVICE: - clear_bit(ac.index, &ac.alist->yfs); - set_bit(ac.index, &ac.alist->probed); - ac.addr->srx_service = ret; - break; - case YFS_VL_SERVICE: - set_bit(ac.index, &ac.alist->yfs); - set_bit(ac.index, &ac.alist->probed); - ac.addr->srx_service = ret; - break; - } - } - - vldb = afs_vl_get_entry_by_name_u(cell->net, &ac, key, - volname, volnamesz); - switch (ac.error) { - case 0: - afs_end_cursor(&ac); - return vldb; - case -ECONNABORTED: - ac.error = afs_abort_to_error(ac.abort_code); - goto error; - case -ENOMEM: - case -ENONET: - goto error; - case -ENETUNREACH: - case -EHOSTUNREACH: - case -ECONNREFUSED: - break; - default: - ac.error = -EIO; - goto error; - } + if (!afs_begin_vlserver_operation(&vc, cell, key)) + return ERR_PTR(-ERESTARTSYS); + + while (afs_select_vlserver(&vc)) { + vldb = afs_vl_get_entry_by_name_u(&vc, volname, volnamesz); } -error: - return ERR_PTR(afs_end_cursor(&ac)); + ret = afs_end_vlserver_operation(&vc); + return ret < 0 ? ERR_PTR(ret) : vldb; } /* @@ -270,7 +234,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key) /* We look up an ID by passing it as a decimal string in the * operation's name parameter. */ - idsz = sprintf(idbuf, "%u", volume->vid); + idsz = sprintf(idbuf, "%llu", volume->vid); vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz); if (IS_ERR(vldb)) { diff --git a/fs/afs/write.c b/fs/afs/write.c index 19c04caf3c01..72efcfcf9f95 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -33,10 +33,21 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key, loff_t pos, unsigned int len, struct page *page) { struct afs_read *req; + size_t p; + void *data; int ret; _enter(",,%llu", (unsigned long long)pos); + if (pos >= vnode->vfs_inode.i_size) { + p = pos & ~PAGE_MASK; + ASSERTCMP(p + len, <=, PAGE_SIZE); + data = kmap(page); + memset(data + p, 0, len); + kunmap(page); + return 0; + } + req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *), GFP_KERNEL); if (!req) @@ -81,7 +92,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, pgoff_t index = pos >> PAGE_SHIFT; int ret; - _enter("{%x:%u},{%lx},%u,%u", + _enter("{%llx:%llu},{%lx},%u,%u", vnode->fid.vid, vnode->fid.vnode, index, from, to); /* We want to store information about how much of a page is altered in @@ -181,7 +192,7 @@ int afs_write_end(struct file *file, struct address_space *mapping, loff_t i_size, maybe_i_size; int ret; - _enter("{%x:%u},{%lx}", + _enter("{%llx:%llu},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index); maybe_i_size = pos + copied; @@ -230,7 +241,7 @@ static void afs_kill_pages(struct address_space *mapping, struct pagevec pv; unsigned count, loop; - _enter("{%x:%u},%lx-%lx", + _enter("{%llx:%llu},%lx-%lx", vnode->fid.vid, vnode->fid.vnode, first, last); pagevec_init(&pv); @@ -272,7 +283,7 @@ static void afs_redirty_pages(struct writeback_control *wbc, struct pagevec pv; unsigned count, loop; - _enter("{%x:%u},%lx-%lx", + _enter("{%llx:%llu},%lx-%lx", vnode->fid.vid, vnode->fid.vnode, first, last); pagevec_init(&pv); @@ -314,7 +325,7 @@ static int afs_store_data(struct address_space *mapping, struct list_head *p; int ret = -ENOKEY, ret2; - _enter("%s{%x:%u.%u},%lx,%lx,%x,%x", + _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, @@ -533,6 +544,7 @@ no_more: case -ENOENT: case -ENOMEDIUM: case -ENXIO: + trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); afs_kill_pages(mapping, first, last); mapping_set_error(mapping, ret); break; @@ -675,7 +687,7 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call) unsigned count, loop; pgoff_t first = call->first, last = call->last; - _enter("{%x:%u},{%lx-%lx}", + _enter("{%llx:%llu},{%lx-%lx}", vnode->fid.vid, vnode->fid.vnode, first, last); pagevec_init(&pv); @@ -714,7 +726,7 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) ssize_t result; size_t count = iov_iter_count(from); - _enter("{%x.%u},{%zu},", + _enter("{%llx:%llu},{%zu},", vnode->fid.vid, vnode->fid.vnode, count); if (IS_SWAPFILE(&vnode->vfs_inode)) { @@ -742,7 +754,7 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) struct inode *inode = file_inode(file); struct afs_vnode *vnode = AFS_FS_I(inode); - _enter("{%x:%u},{n=%pD},%d", + _enter("{%llx:%llu},{n=%pD},%d", vnode->fid.vid, vnode->fid.vnode, file, datasync); @@ -760,7 +772,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) struct afs_vnode *vnode = AFS_FS_I(inode); unsigned long priv; - _enter("{{%x:%u}},{%lx}", + _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, vmf->page->index); sb_start_pagefault(inode->i_sb); diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c index cfcc674e64a5..a2cdf25573e2 100644 --- a/fs/afs/xattr.c +++ b/fs/afs/xattr.c @@ -72,7 +72,7 @@ static int afs_xattr_get_fid(const struct xattr_handler *handler, char text[8 + 1 + 8 + 1 + 8 + 1]; size_t len; - len = sprintf(text, "%x:%x:%x", + len = sprintf(text, "%llx:%llx:%x", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); if (size == 0) return len; diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c new file mode 100644 index 000000000000..12658c1363ae --- /dev/null +++ b/fs/afs/yfsclient.c @@ -0,0 +1,2184 @@ +/* YFS File Server client stubs + * + * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/circ_buf.h> +#include <linux/iversion.h> +#include "internal.h" +#include "afs_fs.h" +#include "xdr_fs.h" +#include "protocol_yfs.h" + +static const struct afs_fid afs_zero_fid; + +static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi) +{ + call->cbi = afs_get_cb_interest(cbi); +} + +#define xdr_size(x) (sizeof(*x) / sizeof(__be32)) + +static void xdr_decode_YFSFid(const __be32 **_bp, struct afs_fid *fid) +{ + const struct yfs_xdr_YFSFid *x = (const void *)*_bp; + + fid->vid = xdr_to_u64(x->volume); + fid->vnode = xdr_to_u64(x->vnode.lo); + fid->vnode_hi = ntohl(x->vnode.hi); + fid->unique = ntohl(x->vnode.unique); + *_bp += xdr_size(x); +} + +static __be32 *xdr_encode_u32(__be32 *bp, u32 n) +{ + *bp++ = htonl(n); + return bp; +} + +static __be32 *xdr_encode_u64(__be32 *bp, u64 n) +{ + struct yfs_xdr_u64 *x = (void *)bp; + + *x = u64_to_xdr(n); + return bp + xdr_size(x); +} + +static __be32 *xdr_encode_YFSFid(__be32 *bp, struct afs_fid *fid) +{ + struct yfs_xdr_YFSFid *x = (void *)bp; + + x->volume = u64_to_xdr(fid->vid); + x->vnode.lo = u64_to_xdr(fid->vnode); + x->vnode.hi = htonl(fid->vnode_hi); + x->vnode.unique = htonl(fid->unique); + return bp + xdr_size(x); +} + +static size_t xdr_strlen(unsigned int len) +{ + return sizeof(__be32) + round_up(len, sizeof(__be32)); +} + +static __be32 *xdr_encode_string(__be32 *bp, const char *p, unsigned int len) +{ + bp = xdr_encode_u32(bp, len); + bp = memcpy(bp, p, len); + if (len & 3) { + unsigned int pad = 4 - (len & 3); + + memset((u8 *)bp + len, 0, pad); + len += pad; + } + + return bp + len / sizeof(__be32); +} + +static s64 linux_to_yfs_time(const struct timespec64 *t) +{ + /* Convert to 100ns intervals. */ + return (u64)t->tv_sec * 10000000 + t->tv_nsec/100; +} + +static __be32 *xdr_encode_YFSStoreStatus_mode(__be32 *bp, mode_t mode) +{ + struct yfs_xdr_YFSStoreStatus *x = (void *)bp; + + x->mask = htonl(AFS_SET_MODE); + x->mode = htonl(mode & S_IALLUGO); + x->mtime_client = u64_to_xdr(0); + x->owner = u64_to_xdr(0); + x->group = u64_to_xdr(0); + return bp + xdr_size(x); +} + +static __be32 *xdr_encode_YFSStoreStatus_mtime(__be32 *bp, const struct timespec64 *t) +{ + struct yfs_xdr_YFSStoreStatus *x = (void *)bp; + s64 mtime = linux_to_yfs_time(t); + + x->mask = htonl(AFS_SET_MTIME); + x->mode = htonl(0); + x->mtime_client = u64_to_xdr(mtime); + x->owner = u64_to_xdr(0); + x->group = u64_to_xdr(0); + return bp + xdr_size(x); +} + +/* + * Convert a signed 100ns-resolution 64-bit time into a timespec. + */ +static struct timespec64 yfs_time_to_linux(s64 t) +{ + struct timespec64 ts; + u64 abs_t; + + /* + * Unfortunately can not use normal 64 bit division on 32 bit arch, but + * the alternative, do_div, does not work with negative numbers so have + * to special case them + */ + if (t < 0) { + abs_t = -t; + ts.tv_nsec = (time64_t)(do_div(abs_t, 10000000) * 100); + ts.tv_nsec = -ts.tv_nsec; + ts.tv_sec = -abs_t; + } else { + abs_t = t; + ts.tv_nsec = (time64_t)do_div(abs_t, 10000000) * 100; + ts.tv_sec = abs_t; + } + + return ts; +} + +static struct timespec64 xdr_to_time(const struct yfs_xdr_u64 xdr) +{ + s64 t = xdr_to_u64(xdr); + + return yfs_time_to_linux(t); +} + +static void yfs_check_req(struct afs_call *call, __be32 *bp) +{ + size_t len = (void *)bp - call->request; + + if (len > call->request_size) + pr_err("kAFS: %s: Request buffer overflow (%zu>%u)\n", + call->type->name, len, call->request_size); + else if (len < call->request_size) + pr_warning("kAFS: %s: Request buffer underflow (%zu<%u)\n", + call->type->name, len, call->request_size); +} + +/* + * Dump a bad file status record. + */ +static void xdr_dump_bad(const __be32 *bp) +{ + __be32 x[4]; + int i; + + pr_notice("YFS XDR: Bad status record\n"); + for (i = 0; i < 5 * 4 * 4; i += 16) { + memcpy(x, bp, 16); + bp += 4; + pr_notice("%03x: %08x %08x %08x %08x\n", + i, ntohl(x[0]), ntohl(x[1]), ntohl(x[2]), ntohl(x[3])); + } + + memcpy(x, bp, 4); + pr_notice("0x50: %08x\n", ntohl(x[0])); +} + +/* + * Decode a YFSFetchStatus block + */ +static int xdr_decode_YFSFetchStatus(struct afs_call *call, + const __be32 **_bp, + struct afs_file_status *status, + struct afs_vnode *vnode, + const afs_dataversion_t *expected_version, + struct afs_read *read_req) +{ + const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp; + u32 type; + u8 flags = 0; + + status->abort_code = ntohl(xdr->abort_code); + if (status->abort_code != 0) { + if (vnode && status->abort_code == VNOVNODE) { + set_bit(AFS_VNODE_DELETED, &vnode->flags); + status->nlink = 0; + __afs_break_callback(vnode); + } + return 0; + } + + type = ntohl(xdr->type); + switch (type) { + case AFS_FTYPE_FILE: + case AFS_FTYPE_DIR: + case AFS_FTYPE_SYMLINK: + if (type != status->type && + vnode && + !test_bit(AFS_VNODE_UNSET, &vnode->flags)) { + pr_warning("Vnode %llx:%llx:%x changed type %u to %u\n", + vnode->fid.vid, + vnode->fid.vnode, + vnode->fid.unique, + status->type, type); + goto bad; + } + status->type = type; + break; + default: + goto bad; + } + +#define EXTRACT_M4(FIELD) \ + do { \ + u32 x = ntohl(xdr->FIELD); \ + if (status->FIELD != x) { \ + flags |= AFS_VNODE_META_CHANGED; \ + status->FIELD = x; \ + } \ + } while (0) + +#define EXTRACT_M8(FIELD) \ + do { \ + u64 x = xdr_to_u64(xdr->FIELD); \ + if (status->FIELD != x) { \ + flags |= AFS_VNODE_META_CHANGED; \ + status->FIELD = x; \ + } \ + } while (0) + +#define EXTRACT_D8(FIELD) \ + do { \ + u64 x = xdr_to_u64(xdr->FIELD); \ + if (status->FIELD != x) { \ + flags |= AFS_VNODE_DATA_CHANGED; \ + status->FIELD = x; \ + } \ + } while (0) + + EXTRACT_M4(nlink); + EXTRACT_D8(size); + EXTRACT_D8(data_version); + EXTRACT_M8(author); + EXTRACT_M8(owner); + EXTRACT_M8(group); + EXTRACT_M4(mode); + EXTRACT_M4(caller_access); /* call ticket dependent */ + EXTRACT_M4(anon_access); + + status->mtime_client = xdr_to_time(xdr->mtime_client); + status->mtime_server = xdr_to_time(xdr->mtime_server); + status->lock_count = ntohl(xdr->lock_count); + + if (read_req) { + read_req->data_version = status->data_version; + read_req->file_size = status->size; + } + + *_bp += xdr_size(xdr); + + if (vnode) { + if (test_bit(AFS_VNODE_UNSET, &vnode->flags)) + flags |= AFS_VNODE_NOT_YET_SET; + afs_update_inode_from_status(vnode, status, expected_version, + flags); + } + + return 0; + +bad: + xdr_dump_bad(*_bp); + return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); +} + +/* + * Decode the file status. We need to lock the target vnode if we're going to + * update its status so that stat() sees the attributes update atomically. + */ +static int yfs_decode_status(struct afs_call *call, + const __be32 **_bp, + struct afs_file_status *status, + struct afs_vnode *vnode, + const afs_dataversion_t *expected_version, + struct afs_read *read_req) +{ + int ret; + + if (!vnode) + return xdr_decode_YFSFetchStatus(call, _bp, status, vnode, + expected_version, read_req); + + write_seqlock(&vnode->cb_lock); + ret = xdr_decode_YFSFetchStatus(call, _bp, status, vnode, + expected_version, read_req); + write_sequnlock(&vnode->cb_lock); + return ret; +} + +/* + * Decode a YFSCallBack block + */ +static void xdr_decode_YFSCallBack(struct afs_call *call, + struct afs_vnode *vnode, + const __be32 **_bp) +{ + struct yfs_xdr_YFSCallBack *xdr = (void *)*_bp; + struct afs_cb_interest *old, *cbi = call->cbi; + u64 cb_expiry; + + write_seqlock(&vnode->cb_lock); + + if (!afs_cb_is_broken(call->cb_break, vnode, cbi)) { + cb_expiry = xdr_to_u64(xdr->expiration_time); + do_div(cb_expiry, 10 * 1000 * 1000); + vnode->cb_version = ntohl(xdr->version); + vnode->cb_type = ntohl(xdr->type); + vnode->cb_expires_at = cb_expiry + ktime_get_real_seconds(); + old = vnode->cb_interest; + if (old != call->cbi) { + vnode->cb_interest = cbi; + cbi = old; + } + set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); + } + + write_sequnlock(&vnode->cb_lock); + call->cbi = cbi; + *_bp += xdr_size(xdr); +} + +static void xdr_decode_YFSCallBack_raw(const __be32 **_bp, + struct afs_callback *cb) +{ + struct yfs_xdr_YFSCallBack *x = (void *)*_bp; + u64 cb_expiry; + + cb_expiry = xdr_to_u64(x->expiration_time); + do_div(cb_expiry, 10 * 1000 * 1000); + cb->version = ntohl(x->version); + cb->type = ntohl(x->type); + cb->expires_at = cb_expiry + ktime_get_real_seconds(); + + *_bp += xdr_size(x); +} + +/* + * Decode a YFSVolSync block + */ +static void xdr_decode_YFSVolSync(const __be32 **_bp, + struct afs_volsync *volsync) +{ + struct yfs_xdr_YFSVolSync *x = (void *)*_bp; + u64 creation; + + if (volsync) { + creation = xdr_to_u64(x->vol_creation_date); + do_div(creation, 10 * 1000 * 1000); + volsync->creation = creation; + } + + *_bp += xdr_size(x); +} + +/* + * Encode the requested attributes into a YFSStoreStatus block + */ +static __be32 *xdr_encode_YFS_StoreStatus(__be32 *bp, struct iattr *attr) +{ + struct yfs_xdr_YFSStoreStatus *x = (void *)bp; + s64 mtime = 0, owner = 0, group = 0; + u32 mask = 0, mode = 0; + + mask = 0; + if (attr->ia_valid & ATTR_MTIME) { + mask |= AFS_SET_MTIME; + mtime = linux_to_yfs_time(&attr->ia_mtime); + } + + if (attr->ia_valid & ATTR_UID) { + mask |= AFS_SET_OWNER; + owner = from_kuid(&init_user_ns, attr->ia_uid); + } + + if (attr->ia_valid & ATTR_GID) { + mask |= AFS_SET_GROUP; + group = from_kgid(&init_user_ns, attr->ia_gid); + } + + if (attr->ia_valid & ATTR_MODE) { + mask |= AFS_SET_MODE; + mode = attr->ia_mode & S_IALLUGO; + } + + x->mask = htonl(mask); + x->mode = htonl(mode); + x->mtime_client = u64_to_xdr(mtime); + x->owner = u64_to_xdr(owner); + x->group = u64_to_xdr(group); + return bp + xdr_size(x); +} + +/* + * Decode a YFSFetchVolumeStatus block. + */ +static void xdr_decode_YFSFetchVolumeStatus(const __be32 **_bp, + struct afs_volume_status *vs) +{ + const struct yfs_xdr_YFSFetchVolumeStatus *x = (const void *)*_bp; + u32 flags; + + vs->vid = xdr_to_u64(x->vid); + vs->parent_id = xdr_to_u64(x->parent_id); + flags = ntohl(x->flags); + vs->online = flags & yfs_FVSOnline; + vs->in_service = flags & yfs_FVSInservice; + vs->blessed = flags & yfs_FVSBlessed; + vs->needs_salvage = flags & yfs_FVSNeedsSalvage; + vs->type = ntohl(x->type); + vs->min_quota = 0; + vs->max_quota = xdr_to_u64(x->max_quota); + vs->blocks_in_use = xdr_to_u64(x->blocks_in_use); + vs->part_blocks_avail = xdr_to_u64(x->part_blocks_avail); + vs->part_max_blocks = xdr_to_u64(x->part_max_blocks); + vs->vol_copy_date = xdr_to_u64(x->vol_copy_date); + vs->vol_backup_date = xdr_to_u64(x->vol_backup_date); + *_bp += sizeof(*x) / sizeof(__be32); +} + +/* + * deliver reply data to an FS.FetchStatus + */ +static int yfs_deliver_fs_fetch_status_vnode(struct afs_call *call) +{ + struct afs_vnode *vnode = call->reply[0]; + const __be32 *bp; + int ret; + + ret = afs_transfer_reply(call); + if (ret < 0) + return ret; + + _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + ret = yfs_decode_status(call, &bp, &vnode->status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + xdr_decode_YFSCallBack(call, vnode, &bp); + xdr_decode_YFSVolSync(&bp, call->reply[1]); + + _leave(" = 0 [done]"); + return 0; +} + +/* + * YFS.FetchStatus operation type + */ +static const struct afs_call_type yfs_RXYFSFetchStatus_vnode = { + .name = "YFS.FetchStatus(vnode)", + .op = yfs_FS_FetchStatus, + .deliver = yfs_deliver_fs_fetch_status_vnode, + .destructor = afs_flat_call_destructor, +}; + +/* + * Fetch the status information for a file. + */ +int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsync, + bool new_inode) +{ + struct afs_vnode *vnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(vnode); + __be32 *bp; + + _enter(",%x,{%llx:%llu},,", + key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + + call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus_vnode, + sizeof(__be32) * 2 + + sizeof(struct yfs_xdr_YFSFid), + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSCallBack) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) { + fc->ac.error = -ENOMEM; + return -ENOMEM; + } + + call->key = fc->key; + call->reply[0] = vnode; + call->reply[1] = volsync; + call->expected_version = new_inode ? 1 : vnode->status.data_version; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSFETCHSTATUS); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &vnode->fid); + yfs_check_req(call, bp); + + call->cb_break = fc->cb_break; + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &vnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Deliver reply data to an YFS.FetchData64. + */ +static int yfs_deliver_fs_fetch_data64(struct afs_call *call) +{ + struct afs_vnode *vnode = call->reply[0]; + struct afs_read *req = call->reply[2]; + const __be32 *bp; + unsigned int size; + int ret; + + _enter("{%u,%zu/%llu}", + call->unmarshall, iov_iter_count(&call->iter), req->actual_len); + + switch (call->unmarshall) { + case 0: + req->actual_len = 0; + req->index = 0; + req->offset = req->pos & (PAGE_SIZE - 1); + afs_extract_to_tmp64(call); + call->unmarshall++; + + /* extract the returned data length */ + case 1: + _debug("extract data length"); + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + req->actual_len = be64_to_cpu(call->tmp64); + _debug("DATA length: %llu", req->actual_len); + req->remain = min(req->len, req->actual_len); + if (req->remain == 0) + goto no_more_data; + + call->unmarshall++; + + begin_page: + ASSERTCMP(req->index, <, req->nr_pages); + if (req->remain > PAGE_SIZE - req->offset) + size = PAGE_SIZE - req->offset; + else + size = req->remain; + call->bvec[0].bv_len = size; + call->bvec[0].bv_offset = req->offset; + call->bvec[0].bv_page = req->pages[req->index]; + iov_iter_bvec(&call->iter, READ, call->bvec, 1, size); + ASSERTCMP(size, <=, PAGE_SIZE); + + /* extract the returned data */ + case 2: + _debug("extract data %zu/%llu", + iov_iter_count(&call->iter), req->remain); + + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + req->remain -= call->bvec[0].bv_len; + req->offset += call->bvec[0].bv_len; + ASSERTCMP(req->offset, <=, PAGE_SIZE); + if (req->offset == PAGE_SIZE) { + req->offset = 0; + if (req->page_done) + req->page_done(call, req); + req->index++; + if (req->remain > 0) + goto begin_page; + } + + ASSERTCMP(req->remain, ==, 0); + if (req->actual_len <= req->len) + goto no_more_data; + + /* Discard any excess data the server gave us */ + iov_iter_discard(&call->iter, READ, req->actual_len - req->len); + call->unmarshall = 3; + case 3: + _debug("extract discard %zu/%llu", + iov_iter_count(&call->iter), req->actual_len - req->len); + + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + no_more_data: + call->unmarshall = 4; + afs_extract_to_buf(call, + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSCallBack) + + sizeof(struct yfs_xdr_YFSVolSync)); + + /* extract the metadata */ + case 4: + ret = afs_extract_data(call, false); + if (ret < 0) + return ret; + + bp = call->buffer; + ret = yfs_decode_status(call, &bp, &vnode->status, vnode, + &vnode->status.data_version, req); + if (ret < 0) + return ret; + xdr_decode_YFSCallBack(call, vnode, &bp); + xdr_decode_YFSVolSync(&bp, call->reply[1]); + + call->unmarshall++; + + case 5: + break; + } + + for (; req->index < req->nr_pages; req->index++) { + if (req->offset < PAGE_SIZE) + zero_user_segment(req->pages[req->index], + req->offset, PAGE_SIZE); + if (req->page_done) + req->page_done(call, req); + req->offset = 0; + } + + _leave(" = 0 [done]"); + return 0; +} + +static void yfs_fetch_data_destructor(struct afs_call *call) +{ + struct afs_read *req = call->reply[2]; + + afs_put_read(req); + afs_flat_call_destructor(call); +} + +/* + * YFS.FetchData64 operation type + */ +static const struct afs_call_type yfs_RXYFSFetchData64 = { + .name = "YFS.FetchData64", + .op = yfs_FS_FetchData64, + .deliver = yfs_deliver_fs_fetch_data64, + .destructor = yfs_fetch_data_destructor, +}; + +/* + * Fetch data from a file. + */ +int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req) +{ + struct afs_vnode *vnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(vnode); + __be32 *bp; + + _enter(",%x,{%llx:%llu},%llx,%llx", + key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode, + req->pos, req->len); + + call = afs_alloc_flat_call(net, &yfs_RXYFSFetchData64, + sizeof(__be32) * 2 + + sizeof(struct yfs_xdr_YFSFid) + + sizeof(struct yfs_xdr_u64) * 2, + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSCallBack) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = vnode; + call->reply[1] = NULL; /* volsync */ + call->reply[2] = req; + call->expected_version = vnode->status.data_version; + call->want_reply_time = true; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSFETCHDATA64); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_u64(bp, req->pos); + bp = xdr_encode_u64(bp, req->len); + yfs_check_req(call, bp); + + refcount_inc(&req->usage); + call->cb_break = fc->cb_break; + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &vnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Deliver reply data for YFS.CreateFile or YFS.MakeDir. + */ +static int yfs_deliver_fs_create_vnode(struct afs_call *call) +{ + struct afs_vnode *vnode = call->reply[0]; + const __be32 *bp; + int ret; + + _enter("{%u}", call->unmarshall); + + ret = afs_transfer_reply(call); + if (ret < 0) + return ret; + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + xdr_decode_YFSFid(&bp, call->reply[1]); + ret = yfs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL); + if (ret < 0) + return ret; + ret = yfs_decode_status(call, &bp, &vnode->status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + xdr_decode_YFSCallBack_raw(&bp, call->reply[3]); + xdr_decode_YFSVolSync(&bp, NULL); + + _leave(" = 0 [done]"); + return 0; +} + +/* + * FS.CreateFile and FS.MakeDir operation type + */ +static const struct afs_call_type afs_RXFSCreateFile = { + .name = "YFS.CreateFile", + .op = yfs_FS_CreateFile, + .deliver = yfs_deliver_fs_create_vnode, + .destructor = afs_flat_call_destructor, +}; + +/* + * Create a file. + */ +int yfs_fs_create_file(struct afs_fs_cursor *fc, + const char *name, + umode_t mode, + u64 current_data_version, + struct afs_fid *newfid, + struct afs_file_status *newstatus, + struct afs_callback *newcb) +{ + struct afs_vnode *vnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(vnode); + size_t namesz, reqsz, rplsz; + __be32 *bp; + + _enter(""); + + namesz = strlen(name); + reqsz = (sizeof(__be32) + + sizeof(__be32) + + sizeof(struct yfs_xdr_YFSFid) + + xdr_strlen(namesz) + + sizeof(struct yfs_xdr_YFSStoreStatus) + + sizeof(__be32)); + rplsz = (sizeof(struct yfs_xdr_YFSFid) + + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSCallBack) + + sizeof(struct yfs_xdr_YFSVolSync)); + + call = afs_alloc_flat_call(net, &afs_RXFSCreateFile, reqsz, rplsz); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = vnode; + call->reply[1] = newfid; + call->reply[2] = newstatus; + call->reply[3] = newcb; + call->expected_version = current_data_version + 1; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSCREATEFILE); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_string(bp, name, namesz); + bp = xdr_encode_YFSStoreStatus_mode(bp, mode); + bp = xdr_encode_u32(bp, 0); /* ViceLockType */ + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &vnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +static const struct afs_call_type yfs_RXFSMakeDir = { + .name = "YFS.MakeDir", + .op = yfs_FS_MakeDir, + .deliver = yfs_deliver_fs_create_vnode, + .destructor = afs_flat_call_destructor, +}; + +/* + * Make a directory. + */ +int yfs_fs_make_dir(struct afs_fs_cursor *fc, + const char *name, + umode_t mode, + u64 current_data_version, + struct afs_fid *newfid, + struct afs_file_status *newstatus, + struct afs_callback *newcb) +{ + struct afs_vnode *vnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(vnode); + size_t namesz, reqsz, rplsz; + __be32 *bp; + + _enter(""); + + namesz = strlen(name); + reqsz = (sizeof(__be32) + + sizeof(struct yfs_xdr_RPCFlags) + + sizeof(struct yfs_xdr_YFSFid) + + xdr_strlen(namesz) + + sizeof(struct yfs_xdr_YFSStoreStatus)); + rplsz = (sizeof(struct yfs_xdr_YFSFid) + + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSCallBack) + + sizeof(struct yfs_xdr_YFSVolSync)); + + call = afs_alloc_flat_call(net, &yfs_RXFSMakeDir, reqsz, rplsz); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = vnode; + call->reply[1] = newfid; + call->reply[2] = newstatus; + call->reply[3] = newcb; + call->expected_version = current_data_version + 1; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSMAKEDIR); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_string(bp, name, namesz); + bp = xdr_encode_YFSStoreStatus_mode(bp, mode); + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &vnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Deliver reply data to a YFS.RemoveFile2 operation. + */ +static int yfs_deliver_fs_remove_file2(struct afs_call *call) +{ + struct afs_vnode *dvnode = call->reply[0]; + struct afs_vnode *vnode = call->reply[1]; + struct afs_fid fid; + const __be32 *bp; + int ret; + + _enter("{%u}", call->unmarshall); + + ret = afs_transfer_reply(call); + if (ret < 0) + return ret; + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + ret = yfs_decode_status(call, &bp, &dvnode->status, dvnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + + xdr_decode_YFSFid(&bp, &fid); + ret = yfs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL); + if (ret < 0) + return ret; + /* Was deleted if vnode->status.abort_code == VNOVNODE. */ + + xdr_decode_YFSVolSync(&bp, NULL); + return 0; +} + +/* + * YFS.RemoveFile2 operation type. + */ +static const struct afs_call_type yfs_RXYFSRemoveFile2 = { + .name = "YFS.RemoveFile2", + .op = yfs_FS_RemoveFile2, + .deliver = yfs_deliver_fs_remove_file2, + .destructor = afs_flat_call_destructor, +}; + +/* + * Remove a file and retrieve new file status. + */ +int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode, + const char *name, u64 current_data_version) +{ + struct afs_vnode *dvnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(dvnode); + size_t namesz; + __be32 *bp; + + _enter(""); + + namesz = strlen(name); + + call = afs_alloc_flat_call(net, &yfs_RXYFSRemoveFile2, + sizeof(__be32) + + sizeof(struct yfs_xdr_RPCFlags) + + sizeof(struct yfs_xdr_YFSFid) + + xdr_strlen(namesz), + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSFid) + + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = dvnode; + call->reply[1] = vnode; + call->expected_version = current_data_version + 1; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSREMOVEFILE2); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &dvnode->fid); + bp = xdr_encode_string(bp, name, namesz); + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &dvnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Deliver reply data to a YFS.RemoveFile or YFS.RemoveDir operation. + */ +static int yfs_deliver_fs_remove(struct afs_call *call) +{ + struct afs_vnode *dvnode = call->reply[0]; + const __be32 *bp; + int ret; + + _enter("{%u}", call->unmarshall); + + ret = afs_transfer_reply(call); + if (ret < 0) + return ret; + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + ret = yfs_decode_status(call, &bp, &dvnode->status, dvnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + + xdr_decode_YFSVolSync(&bp, NULL); + return 0; +} + +/* + * FS.RemoveDir and FS.RemoveFile operation types. + */ +static const struct afs_call_type yfs_RXYFSRemoveFile = { + .name = "YFS.RemoveFile", + .op = yfs_FS_RemoveFile, + .deliver = yfs_deliver_fs_remove, + .destructor = afs_flat_call_destructor, +}; + +static const struct afs_call_type yfs_RXYFSRemoveDir = { + .name = "YFS.RemoveDir", + .op = yfs_FS_RemoveDir, + .deliver = yfs_deliver_fs_remove, + .destructor = afs_flat_call_destructor, +}; + +/* + * remove a file or directory + */ +int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode, + const char *name, bool isdir, u64 current_data_version) +{ + struct afs_vnode *dvnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(dvnode); + size_t namesz; + __be32 *bp; + + _enter(""); + + namesz = strlen(name); + call = afs_alloc_flat_call( + net, isdir ? &yfs_RXYFSRemoveDir : &yfs_RXYFSRemoveFile, + sizeof(__be32) + + sizeof(struct yfs_xdr_RPCFlags) + + sizeof(struct yfs_xdr_YFSFid) + + xdr_strlen(namesz), + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = dvnode; + call->reply[1] = vnode; + call->expected_version = current_data_version + 1; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, isdir ? YFSREMOVEDIR : YFSREMOVEFILE); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &dvnode->fid); + bp = xdr_encode_string(bp, name, namesz); + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &dvnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Deliver reply data to a YFS.Link operation. + */ +static int yfs_deliver_fs_link(struct afs_call *call) +{ + struct afs_vnode *dvnode = call->reply[0], *vnode = call->reply[1]; + const __be32 *bp; + int ret; + + _enter("{%u}", call->unmarshall); + + ret = afs_transfer_reply(call); + if (ret < 0) + return ret; + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + ret = yfs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL); + if (ret < 0) + return ret; + ret = yfs_decode_status(call, &bp, &dvnode->status, dvnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + xdr_decode_YFSVolSync(&bp, NULL); + _leave(" = 0 [done]"); + return 0; +} + +/* + * YFS.Link operation type. + */ +static const struct afs_call_type yfs_RXYFSLink = { + .name = "YFS.Link", + .op = yfs_FS_Link, + .deliver = yfs_deliver_fs_link, + .destructor = afs_flat_call_destructor, +}; + +/* + * Make a hard link. + */ +int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode, + const char *name, u64 current_data_version) +{ + struct afs_vnode *dvnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(vnode); + size_t namesz; + __be32 *bp; + + _enter(""); + + namesz = strlen(name); + call = afs_alloc_flat_call(net, &yfs_RXYFSLink, + sizeof(__be32) + + sizeof(struct yfs_xdr_RPCFlags) + + sizeof(struct yfs_xdr_YFSFid) + + xdr_strlen(namesz) + + sizeof(struct yfs_xdr_YFSFid), + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = dvnode; + call->reply[1] = vnode; + call->expected_version = current_data_version + 1; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSLINK); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &dvnode->fid); + bp = xdr_encode_string(bp, name, namesz); + bp = xdr_encode_YFSFid(bp, &vnode->fid); + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &vnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Deliver reply data to a YFS.Symlink operation. + */ +static int yfs_deliver_fs_symlink(struct afs_call *call) +{ + struct afs_vnode *vnode = call->reply[0]; + const __be32 *bp; + int ret; + + _enter("{%u}", call->unmarshall); + + ret = afs_transfer_reply(call); + if (ret < 0) + return ret; + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + xdr_decode_YFSFid(&bp, call->reply[1]); + ret = yfs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL); + if (ret < 0) + return ret; + ret = yfs_decode_status(call, &bp, &vnode->status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + xdr_decode_YFSVolSync(&bp, NULL); + + _leave(" = 0 [done]"); + return 0; +} + +/* + * YFS.Symlink operation type + */ +static const struct afs_call_type yfs_RXYFSSymlink = { + .name = "YFS.Symlink", + .op = yfs_FS_Symlink, + .deliver = yfs_deliver_fs_symlink, + .destructor = afs_flat_call_destructor, +}; + +/* + * Create a symbolic link. + */ +int yfs_fs_symlink(struct afs_fs_cursor *fc, + const char *name, + const char *contents, + u64 current_data_version, + struct afs_fid *newfid, + struct afs_file_status *newstatus) +{ + struct afs_vnode *dvnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(dvnode); + size_t namesz, contents_sz; + __be32 *bp; + + _enter(""); + + namesz = strlen(name); + contents_sz = strlen(contents); + call = afs_alloc_flat_call(net, &yfs_RXYFSSymlink, + sizeof(__be32) + + sizeof(struct yfs_xdr_RPCFlags) + + sizeof(struct yfs_xdr_YFSFid) + + xdr_strlen(namesz) + + xdr_strlen(contents_sz) + + sizeof(struct yfs_xdr_YFSStoreStatus), + sizeof(struct yfs_xdr_YFSFid) + + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = dvnode; + call->reply[1] = newfid; + call->reply[2] = newstatus; + call->expected_version = current_data_version + 1; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSSYMLINK); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &dvnode->fid); + bp = xdr_encode_string(bp, name, namesz); + bp = xdr_encode_string(bp, contents, contents_sz); + bp = xdr_encode_YFSStoreStatus_mode(bp, S_IRWXUGO); + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &dvnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Deliver reply data to a YFS.Rename operation. + */ +static int yfs_deliver_fs_rename(struct afs_call *call) +{ + struct afs_vnode *orig_dvnode = call->reply[0]; + struct afs_vnode *new_dvnode = call->reply[1]; + const __be32 *bp; + int ret; + + _enter("{%u}", call->unmarshall); + + ret = afs_transfer_reply(call); + if (ret < 0) + return ret; + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + ret = yfs_decode_status(call, &bp, &orig_dvnode->status, orig_dvnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + if (new_dvnode != orig_dvnode) { + ret = yfs_decode_status(call, &bp, &new_dvnode->status, new_dvnode, + &call->expected_version_2, NULL); + if (ret < 0) + return ret; + } + + xdr_decode_YFSVolSync(&bp, NULL); + _leave(" = 0 [done]"); + return 0; +} + +/* + * YFS.Rename operation type + */ +static const struct afs_call_type yfs_RXYFSRename = { + .name = "FS.Rename", + .op = yfs_FS_Rename, + .deliver = yfs_deliver_fs_rename, + .destructor = afs_flat_call_destructor, +}; + +/* + * Rename a file or directory. + */ +int yfs_fs_rename(struct afs_fs_cursor *fc, + const char *orig_name, + struct afs_vnode *new_dvnode, + const char *new_name, + u64 current_orig_data_version, + u64 current_new_data_version) +{ + struct afs_vnode *orig_dvnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(orig_dvnode); + size_t o_namesz, n_namesz; + __be32 *bp; + + _enter(""); + + o_namesz = strlen(orig_name); + n_namesz = strlen(new_name); + call = afs_alloc_flat_call(net, &yfs_RXYFSRename, + sizeof(__be32) + + sizeof(struct yfs_xdr_RPCFlags) + + sizeof(struct yfs_xdr_YFSFid) + + xdr_strlen(o_namesz) + + sizeof(struct yfs_xdr_YFSFid) + + xdr_strlen(n_namesz), + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = orig_dvnode; + call->reply[1] = new_dvnode; + call->expected_version = current_orig_data_version + 1; + call->expected_version_2 = current_new_data_version + 1; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSRENAME); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &orig_dvnode->fid); + bp = xdr_encode_string(bp, orig_name, o_namesz); + bp = xdr_encode_YFSFid(bp, &new_dvnode->fid); + bp = xdr_encode_string(bp, new_name, n_namesz); + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &orig_dvnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Deliver reply data to a YFS.StoreData64 operation. + */ +static int yfs_deliver_fs_store_data(struct afs_call *call) +{ + struct afs_vnode *vnode = call->reply[0]; + const __be32 *bp; + int ret; + + _enter(""); + + ret = afs_transfer_reply(call); + if (ret < 0) + return ret; + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + ret = yfs_decode_status(call, &bp, &vnode->status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + xdr_decode_YFSVolSync(&bp, NULL); + + afs_pages_written_back(vnode, call); + + _leave(" = 0 [done]"); + return 0; +} + +/* + * YFS.StoreData64 operation type. + */ +static const struct afs_call_type yfs_RXYFSStoreData64 = { + .name = "YFS.StoreData64", + .op = yfs_FS_StoreData64, + .deliver = yfs_deliver_fs_store_data, + .destructor = afs_flat_call_destructor, +}; + +/* + * Store a set of pages to a large file. + */ +int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping, + pgoff_t first, pgoff_t last, + unsigned offset, unsigned to) +{ + struct afs_vnode *vnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(vnode); + loff_t size, pos, i_size; + __be32 *bp; + + _enter(",%x,{%llx:%llu},,", + key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + + size = (loff_t)to - (loff_t)offset; + if (first != last) + size += (loff_t)(last - first) << PAGE_SHIFT; + pos = (loff_t)first << PAGE_SHIFT; + pos += offset; + + i_size = i_size_read(&vnode->vfs_inode); + if (pos + size > i_size) + i_size = size + pos; + + _debug("size %llx, at %llx, i_size %llx", + (unsigned long long)size, (unsigned long long)pos, + (unsigned long long)i_size); + + call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64, + sizeof(__be32) + + sizeof(__be32) + + sizeof(struct yfs_xdr_YFSFid) + + sizeof(struct yfs_xdr_YFSStoreStatus) + + sizeof(struct yfs_xdr_u64) * 3, + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->mapping = mapping; + call->reply[0] = vnode; + call->first = first; + call->last = last; + call->first_offset = offset; + call->last_to = to; + call->send_pages = true; + call->expected_version = vnode->status.data_version + 1; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSSTOREDATA64); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFSStoreStatus_mtime(bp, &vnode->vfs_inode.i_mtime); + bp = xdr_encode_u64(bp, pos); + bp = xdr_encode_u64(bp, size); + bp = xdr_encode_u64(bp, i_size); + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &vnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * deliver reply data to an FS.StoreStatus + */ +static int yfs_deliver_fs_store_status(struct afs_call *call) +{ + struct afs_vnode *vnode = call->reply[0]; + const __be32 *bp; + int ret; + + _enter(""); + + ret = afs_transfer_reply(call); + if (ret < 0) + return ret; + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + ret = yfs_decode_status(call, &bp, &vnode->status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + xdr_decode_YFSVolSync(&bp, NULL); + + _leave(" = 0 [done]"); + return 0; +} + +/* + * YFS.StoreStatus operation type + */ +static const struct afs_call_type yfs_RXYFSStoreStatus = { + .name = "YFS.StoreStatus", + .op = yfs_FS_StoreStatus, + .deliver = yfs_deliver_fs_store_status, + .destructor = afs_flat_call_destructor, +}; + +static const struct afs_call_type yfs_RXYFSStoreData64_as_Status = { + .name = "YFS.StoreData64", + .op = yfs_FS_StoreData64, + .deliver = yfs_deliver_fs_store_status, + .destructor = afs_flat_call_destructor, +}; + +/* + * Set the attributes on a file, using YFS.StoreData64 rather than + * YFS.StoreStatus so as to alter the file size also. + */ +static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr) +{ + struct afs_vnode *vnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(vnode); + __be32 *bp; + + _enter(",%x,{%llx:%llu},,", + key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + + call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64_as_Status, + sizeof(__be32) * 2 + + sizeof(struct yfs_xdr_YFSFid) + + sizeof(struct yfs_xdr_YFSStoreStatus) + + sizeof(struct yfs_xdr_u64) * 3, + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = vnode; + call->expected_version = vnode->status.data_version + 1; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSSTOREDATA64); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFS_StoreStatus(bp, attr); + bp = xdr_encode_u64(bp, 0); /* position of start of write */ + bp = xdr_encode_u64(bp, 0); /* size of write */ + bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */ + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &vnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Set the attributes on a file, using YFS.StoreData64 if there's a change in + * file size, and YFS.StoreStatus otherwise. + */ +int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr) +{ + struct afs_vnode *vnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(vnode); + __be32 *bp; + + if (attr->ia_valid & ATTR_SIZE) + return yfs_fs_setattr_size(fc, attr); + + _enter(",%x,{%llx:%llu},,", + key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); + + call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus, + sizeof(__be32) * 2 + + sizeof(struct yfs_xdr_YFSFid) + + sizeof(struct yfs_xdr_YFSStoreStatus), + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = vnode; + call->expected_version = vnode->status.data_version; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSSTORESTATUS); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_YFS_StoreStatus(bp, attr); + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &vnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Deliver reply data to a YFS.GetVolumeStatus operation. + */ +static int yfs_deliver_fs_get_volume_status(struct afs_call *call) +{ + const __be32 *bp; + char *p; + u32 size; + int ret; + + _enter("{%u}", call->unmarshall); + + switch (call->unmarshall) { + case 0: + call->unmarshall++; + afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus)); + + /* extract the returned status record */ + case 1: + _debug("extract status"); + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + bp = call->buffer; + xdr_decode_YFSFetchVolumeStatus(&bp, call->reply[1]); + call->unmarshall++; + afs_extract_to_tmp(call); + + /* extract the volume name length */ + case 2: + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + call->count = ntohl(call->tmp); + _debug("volname length: %u", call->count); + if (call->count >= AFSNAMEMAX) + return afs_protocol_error(call, -EBADMSG, + afs_eproto_volname_len); + size = (call->count + 3) & ~3; /* It's padded */ + afs_extract_begin(call, call->reply[2], size); + call->unmarshall++; + + /* extract the volume name */ + case 3: + _debug("extract volname"); + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + p = call->reply[2]; + p[call->count] = 0; + _debug("volname '%s'", p); + afs_extract_to_tmp(call); + call->unmarshall++; + + /* extract the offline message length */ + case 4: + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + call->count = ntohl(call->tmp); + _debug("offline msg length: %u", call->count); + if (call->count >= AFSNAMEMAX) + return afs_protocol_error(call, -EBADMSG, + afs_eproto_offline_msg_len); + size = (call->count + 3) & ~3; /* It's padded */ + afs_extract_begin(call, call->reply[2], size); + call->unmarshall++; + + /* extract the offline message */ + case 5: + _debug("extract offline"); + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + p = call->reply[2]; + p[call->count] = 0; + _debug("offline '%s'", p); + + afs_extract_to_tmp(call); + call->unmarshall++; + + /* extract the message of the day length */ + case 6: + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + call->count = ntohl(call->tmp); + _debug("motd length: %u", call->count); + if (call->count >= AFSNAMEMAX) + return afs_protocol_error(call, -EBADMSG, + afs_eproto_motd_len); + size = (call->count + 3) & ~3; /* It's padded */ + afs_extract_begin(call, call->reply[2], size); + call->unmarshall++; + + /* extract the message of the day */ + case 7: + _debug("extract motd"); + ret = afs_extract_data(call, false); + if (ret < 0) + return ret; + + p = call->reply[2]; + p[call->count] = 0; + _debug("motd '%s'", p); + + call->unmarshall++; + + case 8: + break; + } + + _leave(" = 0 [done]"); + return 0; +} + +/* + * Destroy a YFS.GetVolumeStatus call. + */ +static void yfs_get_volume_status_call_destructor(struct afs_call *call) +{ + kfree(call->reply[2]); + call->reply[2] = NULL; + afs_flat_call_destructor(call); +} + +/* + * YFS.GetVolumeStatus operation type + */ +static const struct afs_call_type yfs_RXYFSGetVolumeStatus = { + .name = "YFS.GetVolumeStatus", + .op = yfs_FS_GetVolumeStatus, + .deliver = yfs_deliver_fs_get_volume_status, + .destructor = yfs_get_volume_status_call_destructor, +}; + +/* + * fetch the status of a volume + */ +int yfs_fs_get_volume_status(struct afs_fs_cursor *fc, + struct afs_volume_status *vs) +{ + struct afs_vnode *vnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(vnode); + __be32 *bp; + void *tmpbuf; + + _enter(""); + + tmpbuf = kmalloc(AFSOPAQUEMAX, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + + call = afs_alloc_flat_call(net, &yfs_RXYFSGetVolumeStatus, + sizeof(__be32) * 2 + + sizeof(struct yfs_xdr_u64), + sizeof(struct yfs_xdr_YFSFetchVolumeStatus) + + sizeof(__be32)); + if (!call) { + kfree(tmpbuf); + return -ENOMEM; + } + + call->key = fc->key; + call->reply[0] = vnode; + call->reply[1] = vs; + call->reply[2] = tmpbuf; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSGETVOLUMESTATUS); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_u64(bp, vnode->fid.vid); + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &vnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Deliver reply data to an YFS.SetLock, YFS.ExtendLock or YFS.ReleaseLock + */ +static int yfs_deliver_fs_xxxx_lock(struct afs_call *call) +{ + struct afs_vnode *vnode = call->reply[0]; + const __be32 *bp; + int ret; + + _enter("{%u}", call->unmarshall); + + ret = afs_transfer_reply(call); + if (ret < 0) + return ret; + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + ret = yfs_decode_status(call, &bp, &vnode->status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + xdr_decode_YFSVolSync(&bp, NULL); + + _leave(" = 0 [done]"); + return 0; +} + +/* + * YFS.SetLock operation type + */ +static const struct afs_call_type yfs_RXYFSSetLock = { + .name = "YFS.SetLock", + .op = yfs_FS_SetLock, + .deliver = yfs_deliver_fs_xxxx_lock, + .destructor = afs_flat_call_destructor, +}; + +/* + * YFS.ExtendLock operation type + */ +static const struct afs_call_type yfs_RXYFSExtendLock = { + .name = "YFS.ExtendLock", + .op = yfs_FS_ExtendLock, + .deliver = yfs_deliver_fs_xxxx_lock, + .destructor = afs_flat_call_destructor, +}; + +/* + * YFS.ReleaseLock operation type + */ +static const struct afs_call_type yfs_RXYFSReleaseLock = { + .name = "YFS.ReleaseLock", + .op = yfs_FS_ReleaseLock, + .deliver = yfs_deliver_fs_xxxx_lock, + .destructor = afs_flat_call_destructor, +}; + +/* + * Set a lock on a file + */ +int yfs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type) +{ + struct afs_vnode *vnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(vnode); + __be32 *bp; + + _enter(""); + + call = afs_alloc_flat_call(net, &yfs_RXYFSSetLock, + sizeof(__be32) * 2 + + sizeof(struct yfs_xdr_YFSFid) + + sizeof(__be32), + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = vnode; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSSETLOCK); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &vnode->fid); + bp = xdr_encode_u32(bp, type); + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &vnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * extend a lock on a file + */ +int yfs_fs_extend_lock(struct afs_fs_cursor *fc) +{ + struct afs_vnode *vnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(vnode); + __be32 *bp; + + _enter(""); + + call = afs_alloc_flat_call(net, &yfs_RXYFSExtendLock, + sizeof(__be32) * 2 + + sizeof(struct yfs_xdr_YFSFid), + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = vnode; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSEXTENDLOCK); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &vnode->fid); + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &vnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * release a lock on a file + */ +int yfs_fs_release_lock(struct afs_fs_cursor *fc) +{ + struct afs_vnode *vnode = fc->vnode; + struct afs_call *call; + struct afs_net *net = afs_v2net(vnode); + __be32 *bp; + + _enter(""); + + call = afs_alloc_flat_call(net, &yfs_RXYFSReleaseLock, + sizeof(__be32) * 2 + + sizeof(struct yfs_xdr_YFSFid), + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) + return -ENOMEM; + + call->key = fc->key; + call->reply[0] = vnode; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSRELEASELOCK); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, &vnode->fid); + yfs_check_req(call, bp); + + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &vnode->fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Deliver reply data to an FS.FetchStatus with no vnode. + */ +static int yfs_deliver_fs_fetch_status(struct afs_call *call) +{ + struct afs_file_status *status = call->reply[1]; + struct afs_callback *callback = call->reply[2]; + struct afs_volsync *volsync = call->reply[3]; + struct afs_vnode *vnode = call->reply[0]; + const __be32 *bp; + int ret; + + ret = afs_transfer_reply(call); + if (ret < 0) + return ret; + + _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + ret = yfs_decode_status(call, &bp, status, vnode, + &call->expected_version, NULL); + if (ret < 0) + return ret; + xdr_decode_YFSCallBack_raw(&bp, callback); + xdr_decode_YFSVolSync(&bp, volsync); + + _leave(" = 0 [done]"); + return 0; +} + +/* + * YFS.FetchStatus operation type + */ +static const struct afs_call_type yfs_RXYFSFetchStatus = { + .name = "YFS.FetchStatus", + .op = yfs_FS_FetchStatus, + .deliver = yfs_deliver_fs_fetch_status, + .destructor = afs_flat_call_destructor, +}; + +/* + * Fetch the status information for a fid without needing a vnode handle. + */ +int yfs_fs_fetch_status(struct afs_fs_cursor *fc, + struct afs_net *net, + struct afs_fid *fid, + struct afs_file_status *status, + struct afs_callback *callback, + struct afs_volsync *volsync) +{ + struct afs_call *call; + __be32 *bp; + + _enter(",%x,{%llx:%llu},,", + key_serial(fc->key), fid->vid, fid->vnode); + + call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus, + sizeof(__be32) * 2 + + sizeof(struct yfs_xdr_YFSFid), + sizeof(struct yfs_xdr_YFSFetchStatus) + + sizeof(struct yfs_xdr_YFSCallBack) + + sizeof(struct yfs_xdr_YFSVolSync)); + if (!call) { + fc->ac.error = -ENOMEM; + return -ENOMEM; + } + + call->key = fc->key; + call->reply[0] = NULL; /* vnode for fid[0] */ + call->reply[1] = status; + call->reply[2] = callback; + call->reply[3] = volsync; + call->expected_version = 1; /* vnode->status.data_version */ + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSFETCHSTATUS); + bp = xdr_encode_u32(bp, 0); /* RPC flags */ + bp = xdr_encode_YFSFid(bp, fid); + yfs_check_req(call, bp); + + call->cb_break = fc->cb_break; + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, fid); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} + +/* + * Deliver reply data to an YFS.InlineBulkStatus call + */ +static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) +{ + struct afs_file_status *statuses; + struct afs_callback *callbacks; + struct afs_vnode *vnode = call->reply[0]; + const __be32 *bp; + u32 tmp; + int ret; + + _enter("{%u}", call->unmarshall); + + switch (call->unmarshall) { + case 0: + afs_extract_to_tmp(call); + call->unmarshall++; + + /* Extract the file status count and array in two steps */ + case 1: + _debug("extract status count"); + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + tmp = ntohl(call->tmp); + _debug("status count: %u/%u", tmp, call->count2); + if (tmp != call->count2) + return afs_protocol_error(call, -EBADMSG, + afs_eproto_ibulkst_count); + + call->count = 0; + call->unmarshall++; + more_counts: + afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus)); + + case 2: + _debug("extract status array %u", call->count); + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + bp = call->buffer; + statuses = call->reply[1]; + ret = yfs_decode_status(call, &bp, &statuses[call->count], + call->count == 0 ? vnode : NULL, + NULL, NULL); + if (ret < 0) + return ret; + + call->count++; + if (call->count < call->count2) + goto more_counts; + + call->count = 0; + call->unmarshall++; + afs_extract_to_tmp(call); + + /* Extract the callback count and array in two steps */ + case 3: + _debug("extract CB count"); + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + tmp = ntohl(call->tmp); + _debug("CB count: %u", tmp); + if (tmp != call->count2) + return afs_protocol_error(call, -EBADMSG, + afs_eproto_ibulkst_cb_count); + call->count = 0; + call->unmarshall++; + more_cbs: + afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack)); + + case 4: + _debug("extract CB array"); + ret = afs_extract_data(call, true); + if (ret < 0) + return ret; + + _debug("unmarshall CB array"); + bp = call->buffer; + callbacks = call->reply[2]; + xdr_decode_YFSCallBack_raw(&bp, &callbacks[call->count]); + statuses = call->reply[1]; + if (call->count == 0 && vnode && statuses[0].abort_code == 0) { + bp = call->buffer; + xdr_decode_YFSCallBack(call, vnode, &bp); + } + call->count++; + if (call->count < call->count2) + goto more_cbs; + + afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync)); + call->unmarshall++; + + case 5: + ret = afs_extract_data(call, false); + if (ret < 0) + return ret; + + bp = call->buffer; + xdr_decode_YFSVolSync(&bp, call->reply[3]); + + call->unmarshall++; + + case 6: + break; + } + + _leave(" = 0 [done]"); + return 0; +} + +/* + * FS.InlineBulkStatus operation type + */ +static const struct afs_call_type yfs_RXYFSInlineBulkStatus = { + .name = "YFS.InlineBulkStatus", + .op = yfs_FS_InlineBulkStatus, + .deliver = yfs_deliver_fs_inline_bulk_status, + .destructor = afs_flat_call_destructor, +}; + +/* + * Fetch the status information for up to 1024 files + */ +int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc, + struct afs_net *net, + struct afs_fid *fids, + struct afs_file_status *statuses, + struct afs_callback *callbacks, + unsigned int nr_fids, + struct afs_volsync *volsync) +{ + struct afs_call *call; + __be32 *bp; + int i; + + _enter(",%x,{%llx:%llu},%u", + key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids); + + call = afs_alloc_flat_call(net, &yfs_RXYFSInlineBulkStatus, + sizeof(__be32) + + sizeof(__be32) + + sizeof(__be32) + + sizeof(struct yfs_xdr_YFSFid) * nr_fids, + sizeof(struct yfs_xdr_YFSFetchStatus)); + if (!call) { + fc->ac.error = -ENOMEM; + return -ENOMEM; + } + + call->key = fc->key; + call->reply[0] = NULL; /* vnode for fid[0] */ + call->reply[1] = statuses; + call->reply[2] = callbacks; + call->reply[3] = volsync; + call->count2 = nr_fids; + + /* marshall the parameters */ + bp = call->request; + bp = xdr_encode_u32(bp, YFSINLINEBULKSTATUS); + bp = xdr_encode_u32(bp, 0); /* RPCFlags */ + bp = xdr_encode_u32(bp, nr_fids); + for (i = 0; i < nr_fids; i++) + bp = xdr_encode_YFSFid(bp, &fids[i]); + yfs_check_req(call, bp); + + call->cb_break = fc->cb_break; + afs_use_fs_server(call, fc->cbi); + trace_afs_make_fs_call(call, &fids[0]); + return afs_make_call(&fc->ac, call, GFP_NOFS, false); +} diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index 9a69392f1fb3..d81c148682e7 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -350,7 +350,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) s->s_magic = BFS_MAGIC; - if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) { + if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end) || + le32_to_cpu(bfs_sb->s_start) < BFS_BSIZE) { printf("Superblock is corrupted\n"); goto out1; } @@ -359,9 +360,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) sizeof(struct bfs_inode) + BFS_ROOT_INO - 1; imap_len = (info->si_lasti / 8) + 1; - info->si_imap = kzalloc(imap_len, GFP_KERNEL); - if (!info->si_imap) + info->si_imap = kzalloc(imap_len, GFP_KERNEL | __GFP_NOWARN); + if (!info->si_imap) { + printf("Cannot allocate %u bytes\n", imap_len); goto out1; + } for (i = 0; i < BFS_ROOT_INO; i++) set_bit(i, info->si_imap); diff --git a/fs/block_dev.c b/fs/block_dev.c index 38b8ce05cbc7..a80b4f0ee7c4 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -349,7 +349,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) dio->size = 0; dio->multi_bio = false; - dio->should_dirty = is_read && (iter->type == ITER_IOVEC); + dio->should_dirty = is_read && iter_is_iovec(iter); blk_start_plug(&plug); for (;;) { diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 68ca41dbbef3..80953528572d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3201,9 +3201,6 @@ void btrfs_get_block_group_info(struct list_head *groups_list, struct btrfs_ioctl_space_info *space); void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_balance_args *bargs); -int btrfs_dedupe_file_range(struct file *src_file, loff_t src_loff, - struct file *dst_file, loff_t dst_loff, - u64 olen); /* file.c */ int __init btrfs_auto_defrag_init(void); @@ -3233,8 +3230,9 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages, size_t num_pages, loff_t pos, size_t write_bytes, struct extent_state **cached); int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); -int btrfs_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, u64 len); +loff_t btrfs_remap_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags); /* tree-defrag.c */ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 97c7a086f7bd..a3c22e16509b 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -3298,8 +3298,7 @@ const struct file_operations btrfs_file_operations = { #ifdef CONFIG_COMPAT .compat_ioctl = btrfs_compat_ioctl, #endif - .clone_file_range = btrfs_clone_file_range, - .dedupe_file_range = btrfs_dedupe_file_range, + .remap_file_range = btrfs_remap_file_range, }; void __cold btrfs_auto_defrag_exit(void) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a990a9045139..3ca6943827ef 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3629,26 +3629,6 @@ out_unlock: return ret; } -int btrfs_dedupe_file_range(struct file *src_file, loff_t src_loff, - struct file *dst_file, loff_t dst_loff, - u64 olen) -{ - struct inode *src = file_inode(src_file); - struct inode *dst = file_inode(dst_file); - u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; - - if (WARN_ON_ONCE(bs < PAGE_SIZE)) { - /* - * Btrfs does not support blocksize < page_size. As a - * result, btrfs_cmp_data() won't correctly handle - * this situation without an update. - */ - return -EINVAL; - } - - return btrfs_extent_same(src, src_loff, olen, dst, dst_loff); -} - static int clone_finish_inode_update(struct btrfs_trans_handle *trans, struct inode *inode, u64 endoff, @@ -4350,10 +4330,34 @@ out_unlock: return ret; } -int btrfs_clone_file_range(struct file *src_file, loff_t off, - struct file *dst_file, loff_t destoff, u64 len) +loff_t btrfs_remap_file_range(struct file *src_file, loff_t off, + struct file *dst_file, loff_t destoff, loff_t len, + unsigned int remap_flags) { - return btrfs_clone_files(dst_file, src_file, off, len, destoff); + int ret; + + if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) + return -EINVAL; + + if (remap_flags & REMAP_FILE_DEDUP) { + struct inode *src = file_inode(src_file); + struct inode *dst = file_inode(dst_file); + u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; + + if (WARN_ON_ONCE(bs < PAGE_SIZE)) { + /* + * Btrfs does not support blocksize < page_size. As a + * result, btrfs_cmp_data() won't correctly handle + * this situation without an update. + */ + return -EINVAL; + } + + ret = btrfs_extent_same(src, off, len, dst, destoff); + } else { + ret = btrfs_clone_files(dst_file, src_file, off, len, destoff); + } + return ret < 0 ? ret : len; } static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) diff --git a/fs/buffer.c b/fs/buffer.c index d60d61e8ed7d..1286c2b95498 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3060,6 +3060,11 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, */ bio = bio_alloc(GFP_NOIO, 1); + if (wbc) { + wbc_init_bio(wbc, bio); + wbc_account_io(wbc, bh->b_page, bh->b_size); + } + bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio_set_dev(bio, bh->b_bdev); bio->bi_write_hint = write_hint; @@ -3079,11 +3084,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, op_flags |= REQ_PRIO; bio_set_op_attrs(bio, op, op_flags); - if (wbc) { - wbc_init_bio(wbc, bio); - wbc_account_io(wbc, bh->b_page, bh->b_size); - } - submit_bio(bio); return 0; } diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c index 027408d55aee..5f0103f40079 100644 --- a/fs/ceph/acl.c +++ b/fs/ceph/acl.c @@ -104,6 +104,11 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) struct timespec64 old_ctime = inode->i_ctime; umode_t new_mode = inode->i_mode, old_mode = inode->i_mode; + if (ceph_snap(inode) != CEPH_NOSNAP) { + ret = -EROFS; + goto out; + } + switch (type) { case ACL_TYPE_ACCESS: name = XATTR_NAME_POSIX_ACL_ACCESS; @@ -138,11 +143,6 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) goto out_free; } - if (ceph_snap(inode) != CEPH_NOSNAP) { - ret = -EROFS; - goto out_free; - } - if (new_mode != old_mode) { newattrs.ia_ctime = current_time(inode); newattrs.ia_mode = new_mode; @@ -206,10 +206,9 @@ int ceph_pre_init_acls(struct inode *dir, umode_t *mode, tmp_buf = kmalloc(max(val_size1, val_size2), GFP_KERNEL); if (!tmp_buf) goto out_err; - pagelist = kmalloc(sizeof(struct ceph_pagelist), GFP_KERNEL); + pagelist = ceph_pagelist_alloc(GFP_KERNEL); if (!pagelist) goto out_err; - ceph_pagelist_init(pagelist); err = ceph_pagelist_reserve(pagelist, PAGE_SIZE); if (err) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 9c332a6f6667..8eade7a993c1 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -322,7 +322,7 @@ static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx, /* caller of readpages does not hold buffer and read caps * (fadvise, madvise and readahead cases) */ int want = CEPH_CAP_FILE_CACHE; - ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got); + ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, true, &got); if (ret < 0) { dout("start_read %p, error getting cap\n", inode); } else if (!(got & want)) { diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index dd7dfdd2ba13..f3496db4bb3e 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -519,9 +519,9 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc, * -> we take mdsc->cap_delay_lock */ static void __cap_delay_requeue(struct ceph_mds_client *mdsc, - struct ceph_inode_info *ci) + struct ceph_inode_info *ci, + bool set_timeout) { - __cap_set_timeouts(mdsc, ci); dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode, ci->i_ceph_flags, ci->i_hold_caps_max); if (!mdsc->stopping) { @@ -531,6 +531,8 @@ static void __cap_delay_requeue(struct ceph_mds_client *mdsc, goto no_change; list_del_init(&ci->i_cap_delay_list); } + if (set_timeout) + __cap_set_timeouts(mdsc, ci); list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list); no_change: spin_unlock(&mdsc->cap_delay_lock); @@ -720,7 +722,7 @@ void ceph_add_cap(struct inode *inode, dout(" issued %s, mds wanted %s, actual %s, queueing\n", ceph_cap_string(issued), ceph_cap_string(wanted), ceph_cap_string(actual_wanted)); - __cap_delay_requeue(mdsc, ci); + __cap_delay_requeue(mdsc, ci, true); } if (flags & CEPH_CAP_FLAG_AUTH) { @@ -1647,7 +1649,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && (mask & CEPH_CAP_FILE_BUFFER)) dirty |= I_DIRTY_DATASYNC; - __cap_delay_requeue(mdsc, ci); + __cap_delay_requeue(mdsc, ci, true); return dirty; } @@ -2065,7 +2067,7 @@ ack: /* Reschedule delayed caps release if we delayed anything */ if (delayed) - __cap_delay_requeue(mdsc, ci); + __cap_delay_requeue(mdsc, ci, false); spin_unlock(&ci->i_ceph_lock); @@ -2125,7 +2127,7 @@ retry: if (delayed) { spin_lock(&ci->i_ceph_lock); - __cap_delay_requeue(mdsc, ci); + __cap_delay_requeue(mdsc, ci, true); spin_unlock(&ci->i_ceph_lock); } } else { @@ -2671,17 +2673,18 @@ static void check_max_size(struct inode *inode, loff_t endoff) ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); } -int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got) +int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, + bool nonblock, int *got) { int ret, err = 0; BUG_ON(need & ~CEPH_CAP_FILE_RD); - BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); + BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO|CEPH_CAP_FILE_SHARED)); ret = ceph_pool_perm_check(ci, need); if (ret < 0) return ret; - ret = try_get_cap_refs(ci, need, want, 0, true, got, &err); + ret = try_get_cap_refs(ci, need, want, 0, nonblock, got, &err); if (ret) { if (err == -EAGAIN) { ret = 0; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 92ab20433682..27cad84dab23 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/ceph/ceph_debug.h> +#include <linux/ceph/striper.h> #include <linux/module.h> #include <linux/sched.h> @@ -557,90 +558,26 @@ enum { }; /* - * Read a range of bytes striped over one or more objects. Iterate over - * objects we stripe over. (That's not atomic, but good enough for now.) - * - * If we get a short result from the OSD, check against i_size; we need to - * only return a short read to the caller if we hit EOF. - */ -static int striped_read(struct inode *inode, - u64 pos, u64 len, - struct page **pages, int num_pages, - int page_align, int *checkeof) -{ - struct ceph_fs_client *fsc = ceph_inode_to_client(inode); - struct ceph_inode_info *ci = ceph_inode(inode); - u64 this_len; - loff_t i_size; - int page_idx; - int ret, read = 0; - bool hit_stripe, was_short; - - /* - * we may need to do multiple reads. not atomic, unfortunately. - */ -more: - this_len = len; - page_idx = (page_align + read) >> PAGE_SHIFT; - ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), - &ci->i_layout, pos, &this_len, - ci->i_truncate_seq, ci->i_truncate_size, - pages + page_idx, num_pages - page_idx, - ((page_align + read) & ~PAGE_MASK)); - if (ret == -ENOENT) - ret = 0; - hit_stripe = this_len < len; - was_short = ret >= 0 && ret < this_len; - dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read, - ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); - - i_size = i_size_read(inode); - if (ret >= 0) { - if (was_short && (pos + ret < i_size)) { - int zlen = min(this_len - ret, i_size - pos - ret); - int zoff = page_align + read + ret; - dout(" zero gap %llu to %llu\n", - pos + ret, pos + ret + zlen); - ceph_zero_page_vector_range(zoff, zlen, pages); - ret += zlen; - } - - read += ret; - pos += ret; - len -= ret; - - /* hit stripe and need continue*/ - if (len && hit_stripe && pos < i_size) - goto more; - } - - if (read > 0) { - ret = read; - /* did we bounce off eof? */ - if (pos + len > i_size) - *checkeof = CHECK_EOF; - } - - dout("striped_read returns %d\n", ret); - return ret; -} - -/* * Completely synchronous read and write methods. Direct from __user * buffer to osd, or directly to user pages (if O_DIRECT). * - * If the read spans object boundary, just do multiple reads. + * If the read spans object boundary, just do multiple reads. (That's not + * atomic, but good enough for now.) + * + * If we get a short result from the OSD, check against i_size; we need to + * only return a short read to the caller if we hit EOF. */ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to, - int *checkeof) + int *retry_op) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); - struct page **pages; - u64 off = iocb->ki_pos; - int num_pages; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_fs_client *fsc = ceph_inode_to_client(inode); + struct ceph_osd_client *osdc = &fsc->client->osdc; ssize_t ret; - size_t len = iov_iter_count(to); + u64 off = iocb->ki_pos; + u64 len = iov_iter_count(to); dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len, (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); @@ -653,61 +590,118 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to, * but it will at least behave sensibly when they are * in sequence. */ - ret = filemap_write_and_wait_range(inode->i_mapping, off, - off + len); + ret = filemap_write_and_wait_range(inode->i_mapping, off, off + len); if (ret < 0) return ret; - if (unlikely(to->type & ITER_PIPE)) { + ret = 0; + while ((len = iov_iter_count(to)) > 0) { + struct ceph_osd_request *req; + struct page **pages; + int num_pages; size_t page_off; - ret = iov_iter_get_pages_alloc(to, &pages, len, - &page_off); - if (ret <= 0) - return -ENOMEM; - num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE); + u64 i_size; + bool more; + + req = ceph_osdc_new_request(osdc, &ci->i_layout, + ci->i_vino, off, &len, 0, 1, + CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, + NULL, ci->i_truncate_seq, + ci->i_truncate_size, false); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + break; + } + + more = len < iov_iter_count(to); - ret = striped_read(inode, off, ret, pages, num_pages, - page_off, checkeof); - if (ret > 0) { - iov_iter_advance(to, ret); - off += ret; + if (unlikely(iov_iter_is_pipe(to))) { + ret = iov_iter_get_pages_alloc(to, &pages, len, + &page_off); + if (ret <= 0) { + ceph_osdc_put_request(req); + ret = -ENOMEM; + break; + } + num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE); + if (ret < len) { + len = ret; + osd_req_op_extent_update(req, 0, len); + more = false; + } } else { - iov_iter_advance(to, 0); + num_pages = calc_pages_for(off, len); + page_off = off & ~PAGE_MASK; + pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); + if (IS_ERR(pages)) { + ceph_osdc_put_request(req); + ret = PTR_ERR(pages); + break; + } } - ceph_put_page_vector(pages, num_pages, false); - } else { - num_pages = calc_pages_for(off, len); - pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - ret = striped_read(inode, off, len, pages, num_pages, - (off & ~PAGE_MASK), checkeof); - if (ret > 0) { - int l, k = 0; - size_t left = ret; - - while (left) { - size_t page_off = off & ~PAGE_MASK; - size_t copy = min_t(size_t, left, - PAGE_SIZE - page_off); - l = copy_page_to_iter(pages[k++], page_off, - copy, to); - off += l; - left -= l; - if (l < copy) + + osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off, + false, false); + ret = ceph_osdc_start_request(osdc, req, false); + if (!ret) + ret = ceph_osdc_wait_request(osdc, req); + ceph_osdc_put_request(req); + + i_size = i_size_read(inode); + dout("sync_read %llu~%llu got %zd i_size %llu%s\n", + off, len, ret, i_size, (more ? " MORE" : "")); + + if (ret == -ENOENT) + ret = 0; + if (ret >= 0 && ret < len && (off + ret < i_size)) { + int zlen = min(len - ret, i_size - off - ret); + int zoff = page_off + ret; + dout("sync_read zero gap %llu~%llu\n", + off + ret, off + ret + zlen); + ceph_zero_page_vector_range(zoff, zlen, pages); + ret += zlen; + } + + if (unlikely(iov_iter_is_pipe(to))) { + if (ret > 0) { + iov_iter_advance(to, ret); + off += ret; + } else { + iov_iter_advance(to, 0); + } + ceph_put_page_vector(pages, num_pages, false); + } else { + int idx = 0; + size_t left = ret > 0 ? ret : 0; + while (left > 0) { + size_t len, copied; + page_off = off & ~PAGE_MASK; + len = min_t(size_t, left, PAGE_SIZE - page_off); + copied = copy_page_to_iter(pages[idx++], + page_off, len, to); + off += copied; + left -= copied; + if (copied < len) { + ret = -EFAULT; break; + } } + ceph_release_page_vector(pages, num_pages); } - ceph_release_page_vector(pages, num_pages); + + if (ret <= 0 || off >= i_size || !more) + break; } if (off > iocb->ki_pos) { + if (ret >= 0 && + iov_iter_count(to) > 0 && off >= i_size_read(inode)) + *retry_op = CHECK_EOF; ret = off - iocb->ki_pos; iocb->ki_pos = off; } - dout("sync_read result %zd\n", ret); + dout("sync_read result %zd retry_op %d\n", ret, *retry_op); return ret; } @@ -821,7 +815,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req) aio_req->total_len = rc + zlen; } - iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs, + iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs, osd_data->num_bvecs, osd_data->bvec_pos.iter.bi_size); iov_iter_advance(&i, rc); @@ -865,7 +859,7 @@ static void ceph_aio_retry_work(struct work_struct *work) } spin_unlock(&ci->i_ceph_lock); - req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2, + req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1, false, GFP_NOFS); if (!req) { ret = -ENOMEM; @@ -877,6 +871,11 @@ static void ceph_aio_retry_work(struct work_struct *work) ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc); ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid); + req->r_ops[0] = orig_req->r_ops[0]; + + req->r_mtime = aio_req->mtime; + req->r_data_offset = req->r_ops[0].extent.offset; + ret = ceph_osdc_alloc_messages(req, GFP_NOFS); if (ret) { ceph_osdc_put_request(req); @@ -884,11 +883,6 @@ static void ceph_aio_retry_work(struct work_struct *work) goto out; } - req->r_ops[0] = orig_req->r_ops[0]; - - req->r_mtime = aio_req->mtime; - req->r_data_offset = req->r_ops[0].extent.offset; - ceph_osdc_put_request(orig_req); req->r_callback = ceph_aio_complete_req; @@ -1044,8 +1038,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, int zlen = min_t(size_t, len - ret, size - pos - ret); - iov_iter_bvec(&i, ITER_BVEC, bvecs, num_pages, - len); + iov_iter_bvec(&i, READ, bvecs, num_pages, len); iov_iter_advance(&i, ret); iov_iter_zero(zlen, &i); ret += zlen; @@ -1735,7 +1728,6 @@ static long ceph_fallocate(struct file *file, int mode, struct ceph_file_info *fi = file->private_data; struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); - struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_cap_flush *prealloc_cf; int want, got = 0; int dirty; @@ -1743,10 +1735,7 @@ static long ceph_fallocate(struct file *file, int mode, loff_t endoff = 0; loff_t size; - if ((offset + length) > max(i_size_read(inode), fsc->max_file_size)) - return -EFBIG; - - if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) + if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; if (!S_ISREG(inode->i_mode)) @@ -1763,18 +1752,6 @@ static long ceph_fallocate(struct file *file, int mode, goto unlock; } - if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) && - ceph_quota_is_max_bytes_exceeded(inode, offset + length)) { - ret = -EDQUOT; - goto unlock; - } - - if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL) && - !(mode & FALLOC_FL_PUNCH_HOLE)) { - ret = -ENOSPC; - goto unlock; - } - if (ci->i_inline_version != CEPH_INLINE_NONE) { ret = ceph_uninline_data(file, NULL); if (ret < 0) @@ -1782,12 +1759,12 @@ static long ceph_fallocate(struct file *file, int mode, } size = i_size_read(inode); - if (!(mode & FALLOC_FL_KEEP_SIZE)) { - endoff = offset + length; - ret = inode_newsize_ok(inode, endoff); - if (ret) - goto unlock; - } + + /* Are we punching a hole beyond EOF? */ + if (offset >= size) + goto unlock; + if ((offset + length) > size) + length = size - offset; if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; @@ -1798,16 +1775,8 @@ static long ceph_fallocate(struct file *file, int mode, if (ret < 0) goto unlock; - if (mode & FALLOC_FL_PUNCH_HOLE) { - if (offset < size) - ceph_zero_pagecache_range(inode, offset, length); - ret = ceph_zero_objects(inode, offset, length); - } else if (endoff > size) { - truncate_pagecache_range(inode, size, -1); - if (ceph_inode_set_size(inode, endoff)) - ceph_check_caps(ceph_inode(inode), - CHECK_CAPS_AUTHONLY, NULL); - } + ceph_zero_pagecache_range(inode, offset, length); + ret = ceph_zero_objects(inode, offset, length); if (!ret) { spin_lock(&ci->i_ceph_lock); @@ -1817,9 +1786,6 @@ static long ceph_fallocate(struct file *file, int mode, spin_unlock(&ci->i_ceph_lock); if (dirty) __mark_inode_dirty(inode, dirty); - if ((endoff > size) && - ceph_quota_is_max_bytes_approaching(inode, endoff)) - ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL); } ceph_put_cap_refs(ci, got); @@ -1829,6 +1795,300 @@ unlock: return ret; } +/* + * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for + * src_ci. Two attempts are made to obtain both caps, and an error is return if + * this fails; zero is returned on success. + */ +static int get_rd_wr_caps(struct ceph_inode_info *src_ci, + loff_t src_endoff, int *src_got, + struct ceph_inode_info *dst_ci, + loff_t dst_endoff, int *dst_got) +{ + int ret = 0; + bool retrying = false; + +retry_caps: + ret = ceph_get_caps(dst_ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER, + dst_endoff, dst_got, NULL); + if (ret < 0) + return ret; + + /* + * Since we're already holding the FILE_WR capability for the dst file, + * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some + * retry dance instead to try to get both capabilities. + */ + ret = ceph_try_get_caps(src_ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED, + false, src_got); + if (ret <= 0) { + /* Start by dropping dst_ci caps and getting src_ci caps */ + ceph_put_cap_refs(dst_ci, *dst_got); + if (retrying) { + if (!ret) + /* ceph_try_get_caps masks EAGAIN */ + ret = -EAGAIN; + return ret; + } + ret = ceph_get_caps(src_ci, CEPH_CAP_FILE_RD, + CEPH_CAP_FILE_SHARED, src_endoff, + src_got, NULL); + if (ret < 0) + return ret; + /*... drop src_ci caps too, and retry */ + ceph_put_cap_refs(src_ci, *src_got); + retrying = true; + goto retry_caps; + } + return ret; +} + +static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got, + struct ceph_inode_info *dst_ci, int dst_got) +{ + ceph_put_cap_refs(src_ci, src_got); + ceph_put_cap_refs(dst_ci, dst_got); +} + +/* + * This function does several size-related checks, returning an error if: + * - source file is smaller than off+len + * - destination file size is not OK (inode_newsize_ok()) + * - max bytes quotas is exceeded + */ +static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode, + loff_t src_off, loff_t dst_off, size_t len) +{ + loff_t size, endoff; + + size = i_size_read(src_inode); + /* + * Don't copy beyond source file EOF. Instead of simply setting length + * to (size - src_off), just drop to VFS default implementation, as the + * local i_size may be stale due to other clients writing to the source + * inode. + */ + if (src_off + len > size) { + dout("Copy beyond EOF (%llu + %zu > %llu)\n", + src_off, len, size); + return -EOPNOTSUPP; + } + size = i_size_read(dst_inode); + + endoff = dst_off + len; + if (inode_newsize_ok(dst_inode, endoff)) + return -EOPNOTSUPP; + + if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff)) + return -EDQUOT; + + return 0; +} + +static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off, + struct file *dst_file, loff_t dst_off, + size_t len, unsigned int flags) +{ + struct inode *src_inode = file_inode(src_file); + struct inode *dst_inode = file_inode(dst_file); + struct ceph_inode_info *src_ci = ceph_inode(src_inode); + struct ceph_inode_info *dst_ci = ceph_inode(dst_inode); + struct ceph_cap_flush *prealloc_cf; + struct ceph_object_locator src_oloc, dst_oloc; + struct ceph_object_id src_oid, dst_oid; + loff_t endoff = 0, size; + ssize_t ret = -EIO; + u64 src_objnum, dst_objnum, src_objoff, dst_objoff; + u32 src_objlen, dst_objlen, object_size; + int src_got = 0, dst_got = 0, err, dirty; + bool do_final_copy = false; + + if (src_inode == dst_inode) + return -EINVAL; + if (ceph_snap(dst_inode) != CEPH_NOSNAP) + return -EROFS; + + /* + * Some of the checks below will return -EOPNOTSUPP, which will force a + * fallback to the default VFS copy_file_range implementation. This is + * desirable in several cases (for ex, the 'len' is smaller than the + * size of the objects, or in cases where that would be more + * efficient). + */ + + if (ceph_test_mount_opt(ceph_inode_to_client(src_inode), NOCOPYFROM)) + return -EOPNOTSUPP; + + if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) || + (src_ci->i_layout.stripe_count != dst_ci->i_layout.stripe_count) || + (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) + return -EOPNOTSUPP; + + if (len < src_ci->i_layout.object_size) + return -EOPNOTSUPP; /* no remote copy will be done */ + + prealloc_cf = ceph_alloc_cap_flush(); + if (!prealloc_cf) + return -ENOMEM; + + /* Start by sync'ing the source file */ + ret = file_write_and_wait_range(src_file, src_off, (src_off + len)); + if (ret < 0) + goto out; + + /* + * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other + * clients may have dirty data in their caches. And OSDs know nothing + * about caps, so they can't safely do the remote object copies. + */ + err = get_rd_wr_caps(src_ci, (src_off + len), &src_got, + dst_ci, (dst_off + len), &dst_got); + if (err < 0) { + dout("get_rd_wr_caps returned %d\n", err); + ret = -EOPNOTSUPP; + goto out; + } + + ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len); + if (ret < 0) + goto out_caps; + + size = i_size_read(dst_inode); + endoff = dst_off + len; + + /* Drop dst file cached pages */ + ret = invalidate_inode_pages2_range(dst_inode->i_mapping, + dst_off >> PAGE_SHIFT, + endoff >> PAGE_SHIFT); + if (ret < 0) { + dout("Failed to invalidate inode pages (%zd)\n", ret); + ret = 0; /* XXX */ + } + src_oloc.pool = src_ci->i_layout.pool_id; + src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns); + dst_oloc.pool = dst_ci->i_layout.pool_id; + dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns); + + ceph_calc_file_object_mapping(&src_ci->i_layout, src_off, + src_ci->i_layout.object_size, + &src_objnum, &src_objoff, &src_objlen); + ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off, + dst_ci->i_layout.object_size, + &dst_objnum, &dst_objoff, &dst_objlen); + /* object-level offsets need to the same */ + if (src_objoff != dst_objoff) { + ret = -EOPNOTSUPP; + goto out_caps; + } + + /* + * Do a manual copy if the object offset isn't object aligned. + * 'src_objlen' contains the bytes left until the end of the object, + * starting at the src_off + */ + if (src_objoff) { + /* + * we need to temporarily drop all caps as we'll be calling + * {read,write}_iter, which will get caps again. + */ + put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got); + ret = do_splice_direct(src_file, &src_off, dst_file, + &dst_off, src_objlen, flags); + if (ret < 0) { + dout("do_splice_direct returned %d\n", err); + goto out; + } + len -= ret; + err = get_rd_wr_caps(src_ci, (src_off + len), + &src_got, dst_ci, + (dst_off + len), &dst_got); + if (err < 0) + goto out; + err = is_file_size_ok(src_inode, dst_inode, + src_off, dst_off, len); + if (err < 0) + goto out_caps; + } + object_size = src_ci->i_layout.object_size; + while (len >= object_size) { + ceph_calc_file_object_mapping(&src_ci->i_layout, src_off, + object_size, &src_objnum, + &src_objoff, &src_objlen); + ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off, + object_size, &dst_objnum, + &dst_objoff, &dst_objlen); + ceph_oid_init(&src_oid); + ceph_oid_printf(&src_oid, "%llx.%08llx", + src_ci->i_vino.ino, src_objnum); + ceph_oid_init(&dst_oid); + ceph_oid_printf(&dst_oid, "%llx.%08llx", + dst_ci->i_vino.ino, dst_objnum); + /* Do an object remote copy */ + err = ceph_osdc_copy_from( + &ceph_inode_to_client(src_inode)->client->osdc, + src_ci->i_vino.snap, 0, + &src_oid, &src_oloc, + CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL | + CEPH_OSD_OP_FLAG_FADVISE_NOCACHE, + &dst_oid, &dst_oloc, + CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL | + CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0); + if (err) { + dout("ceph_osdc_copy_from returned %d\n", err); + if (!ret) + ret = err; + goto out_caps; + } + len -= object_size; + src_off += object_size; + dst_off += object_size; + ret += object_size; + } + + if (len) + /* We still need one final local copy */ + do_final_copy = true; + + file_update_time(dst_file); + if (endoff > size) { + int caps_flags = 0; + + /* Let the MDS know about dst file size change */ + if (ceph_quota_is_max_bytes_approaching(dst_inode, endoff)) + caps_flags |= CHECK_CAPS_NODELAY; + if (ceph_inode_set_size(dst_inode, endoff)) + caps_flags |= CHECK_CAPS_AUTHONLY; + if (caps_flags) + ceph_check_caps(dst_ci, caps_flags, NULL); + } + /* Mark Fw dirty */ + spin_lock(&dst_ci->i_ceph_lock); + dst_ci->i_inline_version = CEPH_INLINE_NONE; + dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf); + spin_unlock(&dst_ci->i_ceph_lock); + if (dirty) + __mark_inode_dirty(dst_inode, dirty); + +out_caps: + put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got); + + if (do_final_copy) { + err = do_splice_direct(src_file, &src_off, dst_file, + &dst_off, len, flags); + if (err < 0) { + dout("do_splice_direct returned %d\n", err); + goto out; + } + len -= err; + ret += err; + } + +out: + ceph_free_cap_flush(prealloc_cf); + + return ret; +} + const struct file_operations ceph_file_fops = { .open = ceph_open, .release = ceph_release, @@ -1844,5 +2104,5 @@ const struct file_operations ceph_file_fops = { .unlocked_ioctl = ceph_ioctl, .compat_ioctl = ceph_ioctl, .fallocate = ceph_fallocate, + .copy_file_range = ceph_copy_file_range, }; - diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index ebc7bdaed2d0..79dd5e6ed755 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1132,8 +1132,12 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in) if (IS_ERR(realdn)) { pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", PTR_ERR(realdn), dn, in, ceph_vinop(in)); - dput(dn); - dn = realdn; /* note realdn contains the error */ + dn = realdn; + /* + * Caller should release 'dn' in the case of error. + * If 'req->r_dentry' is passed to this function, + * caller should leave 'req->r_dentry' untouched. + */ goto out; } else if (realdn) { dout("dn %p (%d) spliced with %p (%d) " @@ -1196,7 +1200,9 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) WARN_ON_ONCE(1); } - if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) { + if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME && + test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) && + !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { struct qstr dname; struct dentry *dn, *parent; @@ -1677,7 +1683,6 @@ retry_lookup: if (IS_ERR(realdn)) { err = PTR_ERR(realdn); d_drop(dn); - dn = NULL; goto next_item; } dn = realdn; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index bc43c822426a..67a9aeb2f4ec 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2071,7 +2071,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, if (req->r_old_dentry_drop) len += req->r_old_dentry->d_name.len; - msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false); + msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false); if (!msg) { msg = ERR_PTR(-ENOMEM); goto out_free2; @@ -2136,7 +2136,6 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, if (req->r_pagelist) { struct ceph_pagelist *pagelist = req->r_pagelist; - refcount_inc(&pagelist->refcnt); ceph_msg_data_add_pagelist(msg, pagelist); msg->hdr.data_len = cpu_to_le32(pagelist->length); } else { @@ -3126,12 +3125,11 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, pr_info("mds%d reconnect start\n", mds); - pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); + pagelist = ceph_pagelist_alloc(GFP_NOFS); if (!pagelist) goto fail_nopagelist; - ceph_pagelist_init(pagelist); - reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false); + reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false); if (!reply) goto fail_nomsg; @@ -3241,6 +3239,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, mutex_unlock(&mdsc->mutex); up_read(&mdsc->snap_rwsem); + ceph_pagelist_release(pagelist); return; fail: diff --git a/fs/ceph/super.c b/fs/ceph/super.c index eab1359d0553..b5ecd6f50360 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -165,6 +165,8 @@ enum { Opt_noacl, Opt_quotadf, Opt_noquotadf, + Opt_copyfrom, + Opt_nocopyfrom, }; static match_table_t fsopt_tokens = { @@ -203,6 +205,8 @@ static match_table_t fsopt_tokens = { {Opt_noacl, "noacl"}, {Opt_quotadf, "quotadf"}, {Opt_noquotadf, "noquotadf"}, + {Opt_copyfrom, "copyfrom"}, + {Opt_nocopyfrom, "nocopyfrom"}, {-1, NULL} }; @@ -355,6 +359,12 @@ static int parse_fsopt_token(char *c, void *private) case Opt_noquotadf: fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF; break; + case Opt_copyfrom: + fsopt->flags &= ~CEPH_MOUNT_OPT_NOCOPYFROM; + break; + case Opt_nocopyfrom: + fsopt->flags |= CEPH_MOUNT_OPT_NOCOPYFROM; + break; #ifdef CONFIG_CEPH_FS_POSIX_ACL case Opt_acl: fsopt->sb_flags |= SB_POSIXACL; @@ -553,6 +563,9 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) seq_puts(m, ",noacl"); #endif + if (fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) + seq_puts(m, ",nocopyfrom"); + if (fsopt->mds_namespace) seq_show_option(m, "mds_namespace", fsopt->mds_namespace); if (fsopt->wsize != CEPH_MAX_WRITE_SIZE) diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 582e28fd1b7b..c005a5400f2e 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -40,6 +40,7 @@ #define CEPH_MOUNT_OPT_NOPOOLPERM (1<<11) /* no pool permission check */ #define CEPH_MOUNT_OPT_MOUNTWAIT (1<<12) /* mount waits if no mds is up */ #define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */ +#define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */ #define CEPH_MOUNT_OPT_DEFAULT CEPH_MOUNT_OPT_DCACHE @@ -1008,7 +1009,7 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn, extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, loff_t endoff, int *got, struct page **pinned_page); extern int ceph_try_get_caps(struct ceph_inode_info *ci, - int need, int want, int *got); + int need, int want, bool nonblock, int *got); /* for counting open files by mode */ extern void __ceph_get_fmode(struct ceph_inode_info *ci, int mode); diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 5cc8b94f8206..316f6ad10644 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -951,11 +951,10 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name, if (size > 0) { /* copy value into pagelist */ - pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); + pagelist = ceph_pagelist_alloc(GFP_NOFS); if (!pagelist) return -ENOMEM; - ceph_pagelist_init(pagelist); err = ceph_pagelist_append(pagelist, value, size); if (err) goto out; diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 3e812428ac8d..ba178b09de0b 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -145,6 +145,58 @@ cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface) seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr); } +static int cifs_debug_files_proc_show(struct seq_file *m, void *v) +{ + struct list_head *stmp, *tmp, *tmp1, *tmp2; + struct TCP_Server_Info *server; + struct cifs_ses *ses; + struct cifs_tcon *tcon; + struct cifsFileInfo *cfile; + + seq_puts(m, "# Version:1\n"); + seq_puts(m, "# Format:\n"); + seq_puts(m, "# <tree id> <persistent fid> <flags> <count> <pid> <uid>"); +#ifdef CONFIG_CIFS_DEBUG2 + seq_printf(m, " <filename> <mid>\n"); +#else + seq_printf(m, " <filename>\n"); +#endif /* CIFS_DEBUG2 */ + spin_lock(&cifs_tcp_ses_lock); + list_for_each(stmp, &cifs_tcp_ses_list) { + server = list_entry(stmp, struct TCP_Server_Info, + tcp_ses_list); + list_for_each(tmp, &server->smb_ses_list) { + ses = list_entry(tmp, struct cifs_ses, smb_ses_list); + list_for_each(tmp1, &ses->tcon_list) { + tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); + spin_lock(&tcon->open_file_lock); + list_for_each(tmp2, &tcon->openFileList) { + cfile = list_entry(tmp2, struct cifsFileInfo, + tlist); + seq_printf(m, + "0x%x 0x%llx 0x%x %d %d %d %s", + tcon->tid, + cfile->fid.persistent_fid, + cfile->f_flags, + cfile->count, + cfile->pid, + from_kuid(&init_user_ns, cfile->uid), + cfile->dentry->d_name.name); +#ifdef CONFIG_CIFS_DEBUG2 + seq_printf(m, " 0x%llx\n", cfile->fid.mid); +#else + seq_printf(m, "\n"); +#endif /* CIFS_DEBUG2 */ + } + spin_unlock(&tcon->open_file_lock); + } + } + } + spin_unlock(&cifs_tcp_ses_lock); + seq_putc(m, '\n'); + return 0; +} + static int cifs_debug_data_proc_show(struct seq_file *m, void *v) { struct list_head *tmp1, *tmp2, *tmp3; @@ -565,6 +617,9 @@ cifs_proc_init(void) proc_create_single("DebugData", 0, proc_fs_cifs, cifs_debug_data_proc_show); + proc_create_single("open_files", 0400, proc_fs_cifs, + cifs_debug_files_proc_show); + proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_fops); proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_fops); proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_fops); @@ -601,6 +656,7 @@ cifs_proc_clean(void) return; remove_proc_entry("DebugData", proc_fs_cifs); + remove_proc_entry("open_files", proc_fs_cifs); remove_proc_entry("cifsFYI", proc_fs_cifs); remove_proc_entry("traceSMB", proc_fs_cifs); remove_proc_entry("Stats", proc_fs_cifs); diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index b611fc2e8984..7f01c6e60791 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c @@ -147,8 +147,10 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo) sprintf(dp, ";sec=krb5"); else if (server->sec_mskerberos) sprintf(dp, ";sec=mskrb5"); - else - goto out; + else { + cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n"); + sprintf(dp, ";sec=krb5"); + } dp = description + strlen(description); sprintf(dp, ";uid=0x%x", diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 7de9603c54f1..865706edb307 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -992,17 +992,21 @@ const struct inode_operations cifs_symlink_inode_ops = { .listxattr = cifs_listxattr, }; -static int cifs_clone_file_range(struct file *src_file, loff_t off, - struct file *dst_file, loff_t destoff, u64 len) +static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, + struct file *dst_file, loff_t destoff, loff_t len, + unsigned int remap_flags) { struct inode *src_inode = file_inode(src_file); struct inode *target_inode = file_inode(dst_file); struct cifsFileInfo *smb_file_src = src_file->private_data; - struct cifsFileInfo *smb_file_target = dst_file->private_data; - struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink); + struct cifsFileInfo *smb_file_target; + struct cifs_tcon *target_tcon; unsigned int xid; int rc; + if (remap_flags & ~REMAP_FILE_ADVISORY) + return -EINVAL; + cifs_dbg(FYI, "clone range\n"); xid = get_xid(); @@ -1013,6 +1017,9 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off, goto out; } + smb_file_target = dst_file->private_data; + target_tcon = tlink_tcon(smb_file_target->tlink); + /* * Note: cifs case is easier than btrfs since server responsible for * checks for proper open modes and file type and if it wants @@ -1042,7 +1049,7 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off, unlock_two_nondirectories(src_inode, target_inode); out: free_xid(xid); - return rc; + return rc < 0 ? rc : len; } ssize_t cifs_file_copychunk_range(unsigned int xid, @@ -1151,7 +1158,7 @@ const struct file_operations cifs_file_ops = { .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, - .clone_file_range = cifs_clone_file_range, + .remap_file_range = cifs_remap_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, }; @@ -1170,15 +1177,14 @@ const struct file_operations cifs_file_strict_ops = { .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, - .clone_file_range = cifs_clone_file_range, + .remap_file_range = cifs_remap_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, }; const struct file_operations cifs_file_direct_ops = { - /* BB reevaluate whether they can be done with directio, no cache */ - .read_iter = cifs_user_readv, - .write_iter = cifs_user_writev, + .read_iter = cifs_direct_readv, + .write_iter = cifs_direct_writev, .open = cifs_open, .release = cifs_close, .lock = cifs_lock, @@ -1189,7 +1195,7 @@ const struct file_operations cifs_file_direct_ops = { .splice_write = iter_file_splice_write, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, - .clone_file_range = cifs_clone_file_range, + .remap_file_range = cifs_remap_file_range, .llseek = cifs_llseek, .setlease = cifs_setlease, .fallocate = cifs_fallocate, @@ -1208,7 +1214,7 @@ const struct file_operations cifs_file_nobrl_ops = { .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, - .clone_file_range = cifs_clone_file_range, + .remap_file_range = cifs_remap_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, }; @@ -1226,15 +1232,14 @@ const struct file_operations cifs_file_strict_nobrl_ops = { .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, - .clone_file_range = cifs_clone_file_range, + .remap_file_range = cifs_remap_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, }; const struct file_operations cifs_file_direct_nobrl_ops = { - /* BB reevaluate whether they can be done with directio, no cache */ - .read_iter = cifs_user_readv, - .write_iter = cifs_user_writev, + .read_iter = cifs_direct_readv, + .write_iter = cifs_direct_writev, .open = cifs_open, .release = cifs_close, .fsync = cifs_fsync, @@ -1244,7 +1249,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = { .splice_write = iter_file_splice_write, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, - .clone_file_range = cifs_clone_file_range, + .remap_file_range = cifs_remap_file_range, .llseek = cifs_llseek, .setlease = cifs_setlease, .fallocate = cifs_fallocate, @@ -1256,7 +1261,7 @@ const struct file_operations cifs_dir_ops = { .read = generic_read_dir, .unlocked_ioctl = cifs_ioctl, .copy_file_range = cifs_copy_file_range, - .clone_file_range = cifs_clone_file_range, + .remap_file_range = cifs_remap_file_range, .llseek = generic_file_llseek, .fsync = cifs_dir_fsync, }; diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 24e265a51874..4c3b5cfccc49 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -101,8 +101,10 @@ extern int cifs_open(struct inode *inode, struct file *file); extern int cifs_close(struct inode *inode, struct file *file); extern int cifs_closedir(struct inode *inode, struct file *file); extern ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to); +extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to); extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to); extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from); +extern ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from); extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from); extern int cifs_lock(struct file *, int, struct file_lock *); extern int cifs_fsync(struct file *, loff_t, loff_t, int); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index ed1e0fcb69e3..38ab0fca49e1 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1125,6 +1125,9 @@ struct cifs_fid { __u8 create_guid[16]; struct cifs_pending_open *pending_open; unsigned int epoch; +#ifdef CONFIG_CIFS_DEBUG2 + __u64 mid; +#endif /* CIFS_DEBUG2 */ bool purge_cache; }; @@ -1183,6 +1186,11 @@ struct cifs_aio_ctx { unsigned int len; unsigned int total_len; bool should_dirty; + /* + * Indicates if this aio_ctx is for direct_io, + * If yes, iter is a copy of the user passed iov_iter + */ + bool direct_io; }; struct cifs_readdata; diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index 1ce733f3582f..79d842e7240c 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -1539,6 +1539,9 @@ struct reparse_symlink_data { char PathBuffer[0]; } __attribute__((packed)); +/* Flag above */ +#define SYMLINK_FLAG_RELATIVE 0x00000001 + /* For IO_REPARSE_TAG_NFS */ #define NFS_SPECFILE_LNK 0x00000000014B4E4C #define NFS_SPECFILE_CHR 0x0000000000524843 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index d82f0cc71755..6f24f129a751 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -589,7 +589,7 @@ cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, { struct msghdr smb_msg; struct kvec iov = {.iov_base = buf, .iov_len = to_read}; - iov_iter_kvec(&smb_msg.msg_iter, READ | ITER_KVEC, &iov, 1, to_read); + iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read); return cifs_readv_from_socket(server, &smb_msg); } @@ -601,7 +601,7 @@ cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, struct msghdr smb_msg; struct bio_vec bv = { .bv_page = page, .bv_len = to_read, .bv_offset = page_offset}; - iov_iter_bvec(&smb_msg.msg_iter, READ | ITER_BVEC, &bv, 1, to_read); + iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read); return cifs_readv_from_socket(server, &smb_msg); } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index c620d4b5d5d4..74c33d5fafc8 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1005,7 +1005,7 @@ cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock) * Set the byte-range lock (mandatory style). Returns: * 1) 0, if we set the lock and don't need to request to the server; * 2) 1, if no locks prevent us but we need to request to the server; - * 3) -EACCESS, if there is a lock that prevents us and wait is false. + * 3) -EACCES, if there is a lock that prevents us and wait is false. */ static int cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock, @@ -2538,6 +2538,61 @@ wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from, } static int +cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list, + struct cifs_aio_ctx *ctx) +{ + int wait_retry = 0; + unsigned int wsize, credits; + int rc; + struct TCP_Server_Info *server = + tlink_tcon(wdata->cfile->tlink)->ses->server; + + /* + * Try to resend this wdata, waiting for credits up to 3 seconds. + * Note: we are attempting to resend the whole wdata not in segments + */ + do { + rc = server->ops->wait_mtu_credits( + server, wdata->bytes, &wsize, &credits); + + if (rc) + break; + + if (wsize < wdata->bytes) { + add_credits_and_wake_if(server, credits, 0); + msleep(1000); + wait_retry++; + } + } while (wsize < wdata->bytes && wait_retry < 3); + + if (wsize < wdata->bytes) { + rc = -EBUSY; + goto out; + } + + rc = -EAGAIN; + while (rc == -EAGAIN) { + rc = 0; + if (wdata->cfile->invalidHandle) + rc = cifs_reopen_file(wdata->cfile, false); + if (!rc) + rc = server->ops->async_writev(wdata, + cifs_uncached_writedata_release); + } + + if (!rc) { + list_add_tail(&wdata->list, wdata_list); + return 0; + } + + add_credits_and_wake_if(server, wdata->credits, 0); +out: + kref_put(&wdata->refcount, cifs_uncached_writedata_release); + + return rc; +} + +static int cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, struct cifsFileInfo *open_file, struct cifs_sb_info *cifs_sb, struct list_head *wdata_list, @@ -2551,6 +2606,8 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, loff_t saved_offset = offset; pid_t pid; struct TCP_Server_Info *server; + struct page **pagevec; + size_t start; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; @@ -2567,38 +2624,79 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, if (rc) break; - nr_pages = get_numpages(wsize, len, &cur_len); - wdata = cifs_writedata_alloc(nr_pages, + if (ctx->direct_io) { + ssize_t result; + + result = iov_iter_get_pages_alloc( + from, &pagevec, wsize, &start); + if (result < 0) { + cifs_dbg(VFS, + "direct_writev couldn't get user pages " + "(rc=%zd) iter type %d iov_offset %zd " + "count %zd\n", + result, from->type, + from->iov_offset, from->count); + dump_stack(); + break; + } + cur_len = (size_t)result; + iov_iter_advance(from, cur_len); + + nr_pages = + (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE; + + wdata = cifs_writedata_direct_alloc(pagevec, cifs_uncached_writev_complete); - if (!wdata) { - rc = -ENOMEM; - add_credits_and_wake_if(server, credits, 0); - break; - } + if (!wdata) { + rc = -ENOMEM; + add_credits_and_wake_if(server, credits, 0); + break; + } - rc = cifs_write_allocate_pages(wdata->pages, nr_pages); - if (rc) { - kfree(wdata); - add_credits_and_wake_if(server, credits, 0); - break; - } - num_pages = nr_pages; - rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages); - if (rc) { - for (i = 0; i < nr_pages; i++) - put_page(wdata->pages[i]); - kfree(wdata); - add_credits_and_wake_if(server, credits, 0); - break; - } + wdata->page_offset = start; + wdata->tailsz = + nr_pages > 1 ? + cur_len - (PAGE_SIZE - start) - + (nr_pages - 2) * PAGE_SIZE : + cur_len; + } else { + nr_pages = get_numpages(wsize, len, &cur_len); + wdata = cifs_writedata_alloc(nr_pages, + cifs_uncached_writev_complete); + if (!wdata) { + rc = -ENOMEM; + add_credits_and_wake_if(server, credits, 0); + break; + } - /* - * Bring nr_pages down to the number of pages we actually used, - * and free any pages that we didn't use. - */ - for ( ; nr_pages > num_pages; nr_pages--) - put_page(wdata->pages[nr_pages - 1]); + rc = cifs_write_allocate_pages(wdata->pages, nr_pages); + if (rc) { + kfree(wdata); + add_credits_and_wake_if(server, credits, 0); + break; + } + + num_pages = nr_pages; + rc = wdata_fill_from_iovec( + wdata, from, &cur_len, &num_pages); + if (rc) { + for (i = 0; i < nr_pages; i++) + put_page(wdata->pages[i]); + kfree(wdata); + add_credits_and_wake_if(server, credits, 0); + break; + } + + /* + * Bring nr_pages down to the number of pages we + * actually used, and free any pages that we didn't use. + */ + for ( ; nr_pages > num_pages; nr_pages--) + put_page(wdata->pages[nr_pages - 1]); + + wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE); + } wdata->sync_mode = WB_SYNC_ALL; wdata->nr_pages = nr_pages; @@ -2607,7 +2705,6 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, wdata->pid = pid; wdata->bytes = cur_len; wdata->pagesz = PAGE_SIZE; - wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE); wdata->credits = credits; wdata->ctx = ctx; kref_get(&ctx->refcount); @@ -2682,13 +2779,18 @@ restart_loop: INIT_LIST_HEAD(&tmp_list); list_del_init(&wdata->list); - iov_iter_advance(&tmp_from, + if (ctx->direct_io) + rc = cifs_resend_wdata( + wdata, &tmp_list, ctx); + else { + iov_iter_advance(&tmp_from, wdata->offset - ctx->pos); - rc = cifs_write_from_iter(wdata->offset, + rc = cifs_write_from_iter(wdata->offset, wdata->bytes, &tmp_from, ctx->cfile, cifs_sb, &tmp_list, ctx); + } list_splice(&tmp_list, &ctx->list); @@ -2701,8 +2803,9 @@ restart_loop: kref_put(&wdata->refcount, cifs_uncached_writedata_release); } - for (i = 0; i < ctx->npages; i++) - put_page(ctx->bv[i].bv_page); + if (!ctx->direct_io) + for (i = 0; i < ctx->npages; i++) + put_page(ctx->bv[i].bv_page); cifs_stats_bytes_written(tcon, ctx->total_len); set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags); @@ -2717,7 +2820,8 @@ restart_loop: complete(&ctx->done); } -ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from) +static ssize_t __cifs_writev( + struct kiocb *iocb, struct iov_iter *from, bool direct) { struct file *file = iocb->ki_filp; ssize_t total_written = 0; @@ -2726,13 +2830,18 @@ ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from) struct cifs_sb_info *cifs_sb; struct cifs_aio_ctx *ctx; struct iov_iter saved_from = *from; + size_t len = iov_iter_count(from); int rc; /* - * BB - optimize the way when signing is disabled. We can drop this - * extra memory-to-memory copying and use iovec buffers for constructing - * write request. + * iov_iter_get_pages_alloc doesn't work with ITER_KVEC. + * In this case, fall back to non-direct write function. + * this could be improved by getting pages directly in ITER_KVEC */ + if (direct && from->type & ITER_KVEC) { + cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n"); + direct = false; + } rc = generic_write_checks(iocb, from); if (rc <= 0) @@ -2756,10 +2865,16 @@ ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from) ctx->pos = iocb->ki_pos; - rc = setup_aio_ctx_iter(ctx, from, WRITE); - if (rc) { - kref_put(&ctx->refcount, cifs_aio_ctx_release); - return rc; + if (direct) { + ctx->direct_io = true; + ctx->iter = *from; + ctx->len = len; + } else { + rc = setup_aio_ctx_iter(ctx, from, WRITE); + if (rc) { + kref_put(&ctx->refcount, cifs_aio_ctx_release); + return rc; + } } /* grab a lock here due to read response handlers can access ctx */ @@ -2809,6 +2924,16 @@ ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from) return total_written; } +ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from) +{ + return __cifs_writev(iocb, from, true); +} + +ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from) +{ + return __cifs_writev(iocb, from, false); +} + static ssize_t cifs_writev(struct kiocb *iocb, struct iov_iter *from) { @@ -2979,7 +3104,6 @@ cifs_uncached_readdata_release(struct kref *refcount) kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release); for (i = 0; i < rdata->nr_pages; i++) { put_page(rdata->pages[i]); - rdata->pages[i] = NULL; } cifs_readdata_release(refcount); } @@ -3004,7 +3128,7 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter) size_t copy = min_t(size_t, remaining, PAGE_SIZE); size_t written; - if (unlikely(iter->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(iter))) { void *addr = kmap_atomic(page); written = copy_to_iter(addr, copy, iter); @@ -3106,6 +3230,67 @@ cifs_uncached_copy_into_pages(struct TCP_Server_Info *server, return uncached_fill_pages(server, rdata, iter, iter->count); } +static int cifs_resend_rdata(struct cifs_readdata *rdata, + struct list_head *rdata_list, + struct cifs_aio_ctx *ctx) +{ + int wait_retry = 0; + unsigned int rsize, credits; + int rc; + struct TCP_Server_Info *server = + tlink_tcon(rdata->cfile->tlink)->ses->server; + + /* + * Try to resend this rdata, waiting for credits up to 3 seconds. + * Note: we are attempting to resend the whole rdata not in segments + */ + do { + rc = server->ops->wait_mtu_credits(server, rdata->bytes, + &rsize, &credits); + + if (rc) + break; + + if (rsize < rdata->bytes) { + add_credits_and_wake_if(server, credits, 0); + msleep(1000); + wait_retry++; + } + } while (rsize < rdata->bytes && wait_retry < 3); + + /* + * If we can't find enough credits to send this rdata + * release the rdata and return failure, this will pass + * whatever I/O amount we have finished to VFS. + */ + if (rsize < rdata->bytes) { + rc = -EBUSY; + goto out; + } + + rc = -EAGAIN; + while (rc == -EAGAIN) { + rc = 0; + if (rdata->cfile->invalidHandle) + rc = cifs_reopen_file(rdata->cfile, true); + if (!rc) + rc = server->ops->async_readv(rdata); + } + + if (!rc) { + /* Add to aio pending list */ + list_add_tail(&rdata->list, rdata_list); + return 0; + } + + add_credits_and_wake_if(server, rdata->credits, 0); +out: + kref_put(&rdata->refcount, + cifs_uncached_readdata_release); + + return rc; +} + static int cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, struct cifs_sb_info *cifs_sb, struct list_head *rdata_list, @@ -3117,6 +3302,9 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, int rc; pid_t pid; struct TCP_Server_Info *server; + struct page **pagevec; + size_t start; + struct iov_iter direct_iov = ctx->iter; server = tlink_tcon(open_file->tlink)->ses->server; @@ -3125,6 +3313,9 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, else pid = current->tgid; + if (ctx->direct_io) + iov_iter_advance(&direct_iov, offset - ctx->pos); + do { rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize, &rsize, &credits); @@ -3132,20 +3323,59 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, break; cur_len = min_t(const size_t, len, rsize); - npages = DIV_ROUND_UP(cur_len, PAGE_SIZE); - /* allocate a readdata struct */ - rdata = cifs_readdata_alloc(npages, + if (ctx->direct_io) { + ssize_t result; + + result = iov_iter_get_pages_alloc( + &direct_iov, &pagevec, + cur_len, &start); + if (result < 0) { + cifs_dbg(VFS, + "couldn't get user pages (cur_len=%zd)" + " iter type %d" + " iov_offset %zd count %zd\n", + result, direct_iov.type, + direct_iov.iov_offset, + direct_iov.count); + dump_stack(); + break; + } + cur_len = (size_t)result; + iov_iter_advance(&direct_iov, cur_len); + + rdata = cifs_readdata_direct_alloc( + pagevec, cifs_uncached_readv_complete); + if (!rdata) { + add_credits_and_wake_if(server, credits, 0); + rc = -ENOMEM; + break; + } + + npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE; + rdata->page_offset = start; + rdata->tailsz = npages > 1 ? + cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE : + cur_len; + + } else { + + npages = DIV_ROUND_UP(cur_len, PAGE_SIZE); + /* allocate a readdata struct */ + rdata = cifs_readdata_alloc(npages, cifs_uncached_readv_complete); - if (!rdata) { - add_credits_and_wake_if(server, credits, 0); - rc = -ENOMEM; - break; - } + if (!rdata) { + add_credits_and_wake_if(server, credits, 0); + rc = -ENOMEM; + break; + } - rc = cifs_read_allocate_pages(rdata, npages); - if (rc) - goto error; + rc = cifs_read_allocate_pages(rdata, npages); + if (rc) + goto error; + + rdata->tailsz = PAGE_SIZE; + } rdata->cfile = cifsFileInfo_get(open_file); rdata->nr_pages = npages; @@ -3153,7 +3383,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, rdata->bytes = cur_len; rdata->pid = pid; rdata->pagesz = PAGE_SIZE; - rdata->tailsz = PAGE_SIZE; rdata->read_into_pages = cifs_uncached_read_into_pages; rdata->copy_into_pages = cifs_uncached_copy_into_pages; rdata->credits = credits; @@ -3167,9 +3396,11 @@ error: if (rc) { add_credits_and_wake_if(server, rdata->credits, 0); kref_put(&rdata->refcount, - cifs_uncached_readdata_release); - if (rc == -EAGAIN) + cifs_uncached_readdata_release); + if (rc == -EAGAIN) { + iov_iter_revert(&direct_iov, cur_len); continue; + } break; } @@ -3225,45 +3456,62 @@ again: * reading. */ if (got_bytes && got_bytes < rdata->bytes) { - rc = cifs_readdata_to_iov(rdata, to); + rc = 0; + if (!ctx->direct_io) + rc = cifs_readdata_to_iov(rdata, to); if (rc) { kref_put(&rdata->refcount, - cifs_uncached_readdata_release); + cifs_uncached_readdata_release); continue; } } - rc = cifs_send_async_read( + if (ctx->direct_io) { + /* + * Re-use rdata as this is a + * direct I/O + */ + rc = cifs_resend_rdata( + rdata, + &tmp_list, ctx); + } else { + rc = cifs_send_async_read( rdata->offset + got_bytes, rdata->bytes - got_bytes, rdata->cfile, cifs_sb, &tmp_list, ctx); + kref_put(&rdata->refcount, + cifs_uncached_readdata_release); + } + list_splice(&tmp_list, &ctx->list); - kref_put(&rdata->refcount, - cifs_uncached_readdata_release); goto again; } else if (rdata->result) rc = rdata->result; - else + else if (!ctx->direct_io) rc = cifs_readdata_to_iov(rdata, to); /* if there was a short read -- discard anything left */ if (rdata->got_bytes && rdata->got_bytes < rdata->bytes) rc = -ENODATA; + + ctx->total_len += rdata->got_bytes; } list_del_init(&rdata->list); kref_put(&rdata->refcount, cifs_uncached_readdata_release); } - for (i = 0; i < ctx->npages; i++) { - if (ctx->should_dirty) - set_page_dirty(ctx->bv[i].bv_page); - put_page(ctx->bv[i].bv_page); - } + if (!ctx->direct_io) { + for (i = 0; i < ctx->npages; i++) { + if (ctx->should_dirty) + set_page_dirty(ctx->bv[i].bv_page); + put_page(ctx->bv[i].bv_page); + } - ctx->total_len = ctx->len - iov_iter_count(to); + ctx->total_len = ctx->len - iov_iter_count(to); + } cifs_stats_bytes_read(tcon, ctx->total_len); @@ -3281,18 +3529,28 @@ again: complete(&ctx->done); } -ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) +static ssize_t __cifs_readv( + struct kiocb *iocb, struct iov_iter *to, bool direct) { - struct file *file = iocb->ki_filp; - ssize_t rc; size_t len; - ssize_t total_read = 0; - loff_t offset = iocb->ki_pos; + struct file *file = iocb->ki_filp; struct cifs_sb_info *cifs_sb; - struct cifs_tcon *tcon; struct cifsFileInfo *cfile; + struct cifs_tcon *tcon; + ssize_t rc, total_read = 0; + loff_t offset = iocb->ki_pos; struct cifs_aio_ctx *ctx; + /* + * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC, + * fall back to data copy read path + * this could be improved by getting pages directly in ITER_KVEC + */ + if (direct && to->type & ITER_KVEC) { + cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n"); + direct = false; + } + len = iov_iter_count(to); if (!len) return 0; @@ -3316,17 +3574,23 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) if (!is_sync_kiocb(iocb)) ctx->iocb = iocb; - if (to->type == ITER_IOVEC) + if (iter_is_iovec(to)) ctx->should_dirty = true; - rc = setup_aio_ctx_iter(ctx, to, READ); - if (rc) { - kref_put(&ctx->refcount, cifs_aio_ctx_release); - return rc; + if (direct) { + ctx->pos = offset; + ctx->direct_io = true; + ctx->iter = *to; + ctx->len = len; + } else { + rc = setup_aio_ctx_iter(ctx, to, READ); + if (rc) { + kref_put(&ctx->refcount, cifs_aio_ctx_release); + return rc; + } + len = ctx->len; } - len = ctx->len; - /* grab a lock here due to read response handlers can access ctx */ mutex_lock(&ctx->aio_mutex); @@ -3368,6 +3632,16 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) return rc; } +ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to) +{ + return __cifs_readv(iocb, to, true); +} + +ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) +{ + return __cifs_readv(iocb, to, false); +} + ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to) { diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 1023d78673fb..a81a9df997c1 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1320,8 +1320,8 @@ cifs_drop_nlink(struct inode *inode) /* * If d_inode(dentry) is null (usually meaning the cached dentry * is a negative dentry) then we would attempt a standard SMB delete, but - * if that fails we can not attempt the fall back mechanisms on EACCESS - * but will return the EACCESS to the caller. Note that the VFS does not call + * if that fails we can not attempt the fall back mechanisms on EACCES + * but will return the EACCES to the caller. Note that the VFS does not call * unlink on negative dentries currently. */ int cifs_unlink(struct inode *dir, struct dentry *dentry) diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index fc43d5d25d1d..8a41f4eba726 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -788,7 +788,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) struct page **pages = NULL; struct bio_vec *bv = NULL; - if (iter->type & ITER_KVEC) { + if (iov_iter_is_kvec(iter)) { memcpy(&ctx->iter, iter, sizeof(struct iov_iter)); ctx->len = count; iov_iter_advance(iter, count); @@ -859,7 +859,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) ctx->bv = bv; ctx->len = saved_len - count; ctx->npages = npages; - iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len); + iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len); return 0; } diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index f85fc5aa2710..225fec1cfa67 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -747,6 +747,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size, int rc = 0; unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0; char *name, *value; + size_t buf_size = dst_size; size_t name_len, value_len, user_name_len; while (src_size > 0) { @@ -782,9 +783,10 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size, /* 'user.' plus a terminating null */ user_name_len = 5 + 1 + name_len; - rc += user_name_len; - - if (dst_size >= user_name_len) { + if (buf_size == 0) { + /* skip copy - calc size only */ + rc += user_name_len; + } else if (dst_size >= user_name_len) { dst_size -= user_name_len; memcpy(dst, "user.", 5); dst += 5; @@ -792,8 +794,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size, dst += name_len; *dst = 0; ++dst; - } else if (dst_size == 0) { - /* skip copy - calc size only */ + rc += user_name_len; } else { /* stop before overrun buffer */ rc = -ERANGE; @@ -1078,6 +1079,9 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock) cfile->fid.persistent_fid = fid->persistent_fid; cfile->fid.volatile_fid = fid->volatile_fid; +#ifdef CONFIG_CIFS_DEBUG2 + cfile->fid.mid = fid->mid; +#endif /* CIFS_DEBUG2 */ server->ops->set_oplock_level(cinode, oplock, fid->epoch, &fid->purge_cache); cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode); @@ -3152,13 +3156,13 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, return 0; } - iov_iter_bvec(&iter, WRITE | ITER_BVEC, bvec, npages, data_len); + iov_iter_bvec(&iter, WRITE, bvec, npages, data_len); } else if (buf_len >= data_offset + data_len) { /* read response payload is in buf */ WARN_ONCE(npages > 0, "read data can be either in buf or in pages"); iov.iov_base = buf + data_offset; iov.iov_len = data_len; - iov_iter_kvec(&iter, WRITE | ITER_KVEC, &iov, 1, data_len); + iov_iter_kvec(&iter, WRITE, &iov, 1, data_len); } else { /* read response payload cannot be in both buf and pages */ WARN_ONCE(1, "buf can not contain only a part of read data"); diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 7d7b016fe8bb..27f86537a5d1 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1512,7 +1512,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov); cifs_small_buf_release(req); rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base; - + trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc); if (rc != 0) { if (tcon) { cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE); @@ -1559,6 +1559,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, if (tcon->ses->server->ops->validate_negotiate) rc = tcon->ses->server->ops->validate_negotiate(xid, tcon); tcon_exit: + free_rsp_buf(resp_buftype, rsp); kfree(unc_path); return rc; @@ -2308,6 +2309,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, atomic_inc(&tcon->num_remote_opens); oparms->fid->persistent_fid = rsp->PersistentFileId; oparms->fid->volatile_fid = rsp->VolatileFileId; +#ifdef CONFIG_CIFS_DEBUG2 + oparms->fid->mid = le64_to_cpu(rsp->sync_hdr.MessageId); +#endif /* CIFS_DEBUG2 */ if (buf) { memcpy(buf, &rsp->CreationTime, 32); diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index f753f424d7f1..5671d5ee7f58 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -842,6 +842,41 @@ struct fsctl_get_integrity_information_rsp { /* Integrity flags for above */ #define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF 0x00000001 +/* Reparse structures - see MS-FSCC 2.1.2 */ + +/* struct fsctl_reparse_info_req is empty, only response structs (see below) */ + +struct reparse_data_buffer { + __le32 ReparseTag; + __le16 ReparseDataLength; + __u16 Reserved; + __u8 DataBuffer[0]; /* Variable Length */ +} __packed; + +struct reparse_guid_data_buffer { + __le32 ReparseTag; + __le16 ReparseDataLength; + __u16 Reserved; + __u8 ReparseGuid[16]; + __u8 DataBuffer[0]; /* Variable Length */ +} __packed; + +struct reparse_mount_point_data_buffer { + __le32 ReparseTag; + __le16 ReparseDataLength; + __u16 Reserved; + __le16 SubstituteNameOffset; + __le16 SubstituteNameLength; + __le16 PrintNameOffset; + __le16 PrintNameLength; + __u8 PathBuffer[0]; /* Variable Length */ +} __packed; + +/* See MS-FSCC 2.1.2.4 and cifspdu.h for struct reparse_symlink_data */ + +/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */ + + /* See MS-DFSC 2.2.2 */ struct fsctl_get_dfs_referral_req { __le16 MaxReferralLevel; diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index 5e282368cc4a..e94a8d1d08a3 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -2054,14 +2054,22 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg) info->smbd_recv_pending++; - switch (msg->msg_iter.type) { - case READ | ITER_KVEC: + if (iov_iter_rw(&msg->msg_iter) == WRITE) { + /* It's a bug in upper layer to get there */ + cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n", + iov_iter_rw(&msg->msg_iter)); + rc = -EINVAL; + goto out; + } + + switch (iov_iter_type(&msg->msg_iter)) { + case ITER_KVEC: buf = msg->msg_iter.kvec->iov_base; to_read = msg->msg_iter.kvec->iov_len; rc = smbd_recv_buf(info, buf, to_read); break; - case READ | ITER_BVEC: + case ITER_BVEC: page = msg->msg_iter.bvec->bv_page; page_offset = msg->msg_iter.bvec->bv_offset; to_read = msg->msg_iter.bvec->bv_len; @@ -2071,10 +2079,11 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg) default: /* It's a bug in upper layer to get there */ cifs_dbg(VFS, "CIFS: invalid msg type %d\n", - msg->msg_iter.type); + iov_iter_type(&msg->msg_iter)); rc = -EINVAL; } +out: info->smbd_recv_pending--; wake_up(&info->wait_smbd_recv_pending); diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h index cce8414fe7ec..fb049809555f 100644 --- a/fs/cifs/trace.h +++ b/fs/cifs/trace.h @@ -374,6 +374,48 @@ DEFINE_SMB3_ENTER_EXIT_EVENT(enter); DEFINE_SMB3_ENTER_EXIT_EVENT(exit_done); /* + * For SMB2/SMB3 tree connect + */ + +DECLARE_EVENT_CLASS(smb3_tcon_class, + TP_PROTO(unsigned int xid, + __u32 tid, + __u64 sesid, + const char *unc_name, + int rc), + TP_ARGS(xid, tid, sesid, unc_name, rc), + TP_STRUCT__entry( + __field(unsigned int, xid) + __field(__u32, tid) + __field(__u64, sesid) + __field(const char *, unc_name) + __field(int, rc) + ), + TP_fast_assign( + __entry->xid = xid; + __entry->tid = tid; + __entry->sesid = sesid; + __entry->unc_name = unc_name; + __entry->rc = rc; + ), + TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d", + __entry->xid, __entry->sesid, __entry->tid, + __entry->unc_name, __entry->rc) +) + +#define DEFINE_SMB3_TCON_EVENT(name) \ +DEFINE_EVENT(smb3_tcon_class, smb3_##name, \ + TP_PROTO(unsigned int xid, \ + __u32 tid, \ + __u64 sesid, \ + const char *unc_name, \ + int rc), \ + TP_ARGS(xid, tid, sesid, unc_name, rc)) + +DEFINE_SMB3_TCON_EVENT(tcon); + + +/* * For smb2/smb3 open call */ DECLARE_EVENT_CLASS(smb3_open_err_class, diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index f8112433f0c8..83ff0c25710d 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -316,8 +316,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, .iov_base = &rfc1002_marker, .iov_len = 4 }; - iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov, - 1, 4); + iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto uncork; @@ -338,8 +337,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, size += iov[i].iov_len; } - iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, - iov, n_vec, size); + iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) @@ -355,7 +353,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, rqst_page_get_length(&rqst[j], i, &bvec.bv_len, &bvec.bv_offset); - iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC, + iov_iter_bvec(&smb_msg.msg_iter, WRITE, &bvec, 1, bvec.bv_len); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) diff --git a/fs/direct-io.c b/fs/direct-io.c index 093fb54cd316..722d17c88edb 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -1313,7 +1313,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, spin_lock_init(&dio->bio_lock); dio->refcount = 1; - dio->should_dirty = (iter->type == ITER_IOVEC); + dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ; sdio.iter = iter; sdio.final_block_in_request = end >> blkbits; diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index a5e4a221435c..76976d6e50f9 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -674,7 +674,7 @@ static int receive_from_sock(struct connection *con) nvec = 2; } len = iov[0].iov_len + iov[1].iov_len; - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, nvec, len); + iov_iter_kvec(&msg.msg_iter, READ, iov, nvec, len); r = ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT | MSG_NOSIGNAL); if (ret <= 0) diff --git a/fs/exofs/super.c b/fs/exofs/super.c index 41cf2fbee50d..906839a4da8f 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -101,6 +101,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts) token = match_token(p, tokens, args); switch (token) { case Opt_name: + kfree(opts->dev_name); opts->dev_name = match_strdup(&args[0]); if (unlikely(!opts->dev_name)) { EXOFS_ERR("Error allocating dev_name"); @@ -117,7 +118,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts) EXOFS_MIN_PID); return -EINVAL; } - s_pid = 1; + s_pid = true; break; case Opt_to: if (match_int(&args[0], &option)) @@ -866,8 +867,10 @@ static struct dentry *exofs_mount(struct file_system_type *type, int ret; ret = parse_options(data, &opts); - if (ret) + if (ret) { + kfree(opts.dev_name); return ERR_PTR(ret); + } if (!opts.dev_name) opts.dev_name = dev_name; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 12f90d48ba61..3f89d0ab08fc 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -45,15 +45,6 @@ #include <linux/compiler.h> -/* Until this gets included into linux/compiler-gcc.h */ -#ifndef __nonstring -#if defined(GCC_VERSION) && (GCC_VERSION >= 80000) -#define __nonstring __attribute__((nonstring)) -#else -#define __nonstring -#endif -#endif - /* * The fourth extended filesystem constants/structures */ diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 2addcb8730e1..014f6a698cb7 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -1216,7 +1216,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); bitmap_bh = ext4_read_inode_bitmap(sb, block_group); if (IS_ERR(bitmap_bh)) - return (struct inode *) bitmap_bh; + return ERR_CAST(bitmap_bh); /* Having the inode bit set should be a 100% indicator that this * is a valid orphan (no e2fsck run on fs). Orphans also include diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 67a38532032a..17adcb16a9c8 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1556,7 +1556,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); if (IS_ERR(bh)) - return (struct dentry *) bh; + return ERR_CAST(bh); inode = NULL; if (bh) { __u32 ino = le32_to_cpu(de->inode); @@ -1600,7 +1600,7 @@ struct dentry *ext4_get_parent(struct dentry *child) bh = ext4_find_entry(d_inode(child), &dotdot, &de, NULL); if (IS_ERR(bh)) - return (struct dentry *) bh; + return ERR_CAST(bh); if (!bh) return ERR_PTR(-ENOENT); ino = le32_to_cpu(de->inode); diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 2aa62d58d8dd..db7590178dfc 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -374,13 +374,13 @@ static int io_submit_init_bio(struct ext4_io_submit *io, bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); if (!bio) return -ENOMEM; + wbc_init_bio(io->io_wbc, bio); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio_set_dev(bio, bh->b_bdev); bio->bi_end_io = ext4_end_bio; bio->bi_private = ext4_get_io_end(io->io_end); io->io_bio = bio; io->io_next_block = bh->b_blocknr; - wbc_init_bio(io->io_wbc, bio); return 0; } diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 60da84a86dab..f7b807bc1027 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -5,4 +5,4 @@ obj-$(CONFIG_FUSE_FS) += fuse.o obj-$(CONFIG_CUSE) += cuse.o -fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o +fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o diff --git a/fs/fuse/control.c b/fs/fuse/control.c index 0b694655d988..989df5accaee 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -107,7 +107,7 @@ static ssize_t fuse_conn_max_background_read(struct file *file, if (!fc) return 0; - val = fc->max_background; + val = READ_ONCE(fc->max_background); fuse_conn_put(fc); return fuse_conn_limit_read(file, buf, len, ppos, val); @@ -125,7 +125,12 @@ static ssize_t fuse_conn_max_background_write(struct file *file, if (ret > 0) { struct fuse_conn *fc = fuse_ctl_file_conn_get(file); if (fc) { + spin_lock(&fc->bg_lock); fc->max_background = val; + fc->blocked = fc->num_background >= fc->max_background; + if (!fc->blocked) + wake_up(&fc->blocked_waitq); + spin_unlock(&fc->bg_lock); fuse_conn_put(fc); } } @@ -144,7 +149,7 @@ static ssize_t fuse_conn_congestion_threshold_read(struct file *file, if (!fc) return 0; - val = fc->congestion_threshold; + val = READ_ONCE(fc->congestion_threshold); fuse_conn_put(fc); return fuse_conn_limit_read(file, buf, len, ppos, val); @@ -155,18 +160,31 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file, size_t count, loff_t *ppos) { unsigned uninitialized_var(val); + struct fuse_conn *fc; ssize_t ret; ret = fuse_conn_limit_write(file, buf, count, ppos, &val, max_user_congthresh); - if (ret > 0) { - struct fuse_conn *fc = fuse_ctl_file_conn_get(file); - if (fc) { - fc->congestion_threshold = val; - fuse_conn_put(fc); + if (ret <= 0) + goto out; + fc = fuse_ctl_file_conn_get(file); + if (!fc) + goto out; + + spin_lock(&fc->bg_lock); + fc->congestion_threshold = val; + if (fc->sb) { + if (fc->num_background < fc->congestion_threshold) { + clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); + clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); + } else { + set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); + set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); } } - + spin_unlock(&fc->bg_lock); + fuse_conn_put(fc); +out: return ret; } diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 11ea2c4a38ab..ae813e609932 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -25,6 +25,10 @@ MODULE_ALIAS_MISCDEV(FUSE_MINOR); MODULE_ALIAS("devname:fuse"); +/* Ordinary requests have even IDs, while interrupts IDs are odd */ +#define FUSE_INT_REQ_BIT (1ULL << 0) +#define FUSE_REQ_ID_STEP (1ULL << 1) + static struct kmem_cache *fuse_req_cachep; static struct fuse_dev *fuse_get_dev(struct file *file) @@ -40,9 +44,6 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages, struct fuse_page_desc *page_descs, unsigned npages) { - memset(req, 0, sizeof(*req)); - memset(pages, 0, sizeof(*pages) * npages); - memset(page_descs, 0, sizeof(*page_descs) * npages); INIT_LIST_HEAD(&req->list); INIT_LIST_HEAD(&req->intr_entry); init_waitqueue_head(&req->waitq); @@ -53,30 +54,36 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages, __set_bit(FR_PENDING, &req->flags); } +static struct page **fuse_req_pages_alloc(unsigned int npages, gfp_t flags, + struct fuse_page_desc **desc) +{ + struct page **pages; + + pages = kzalloc(npages * (sizeof(struct page *) + + sizeof(struct fuse_page_desc)), flags); + *desc = (void *) pages + npages * sizeof(struct page *); + + return pages; +} + static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags) { - struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags); + struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags); if (req) { - struct page **pages; - struct fuse_page_desc *page_descs; - - if (npages <= FUSE_REQ_INLINE_PAGES) { + struct page **pages = NULL; + struct fuse_page_desc *page_descs = NULL; + + WARN_ON(npages > FUSE_MAX_MAX_PAGES); + if (npages > FUSE_REQ_INLINE_PAGES) { + pages = fuse_req_pages_alloc(npages, flags, + &page_descs); + if (!pages) { + kmem_cache_free(fuse_req_cachep, req); + return NULL; + } + } else if (npages) { pages = req->inline_pages; page_descs = req->inline_page_descs; - } else { - pages = kmalloc_array(npages, sizeof(struct page *), - flags); - page_descs = - kmalloc_array(npages, - sizeof(struct fuse_page_desc), - flags); - } - - if (!pages || !page_descs) { - kfree(pages); - kfree(page_descs); - kmem_cache_free(fuse_req_cachep, req); - return NULL; } fuse_request_init(req, pages, page_descs, npages); @@ -95,12 +102,41 @@ struct fuse_req *fuse_request_alloc_nofs(unsigned npages) return __fuse_request_alloc(npages, GFP_NOFS); } -void fuse_request_free(struct fuse_req *req) +static void fuse_req_pages_free(struct fuse_req *req) { - if (req->pages != req->inline_pages) { + if (req->pages != req->inline_pages) kfree(req->pages); - kfree(req->page_descs); - } +} + +bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req, + gfp_t flags) +{ + struct page **pages; + struct fuse_page_desc *page_descs; + unsigned int npages = min_t(unsigned int, + max_t(unsigned int, req->max_pages * 2, + FUSE_DEFAULT_MAX_PAGES_PER_REQ), + fc->max_pages); + WARN_ON(npages <= req->max_pages); + + pages = fuse_req_pages_alloc(npages, flags, &page_descs); + if (!pages) + return false; + + memcpy(pages, req->pages, sizeof(struct page *) * req->max_pages); + memcpy(page_descs, req->page_descs, + sizeof(struct fuse_page_desc) * req->max_pages); + fuse_req_pages_free(req); + req->pages = pages; + req->page_descs = page_descs; + req->max_pages = npages; + + return true; +} + +void fuse_request_free(struct fuse_req *req) +{ + fuse_req_pages_free(req); kmem_cache_free(fuse_req_cachep, req); } @@ -235,8 +271,10 @@ static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) struct file *file = req->stolen_file; struct fuse_file *ff = file->private_data; + WARN_ON(req->max_pages); spin_lock(&fc->lock); - fuse_request_init(req, req->pages, req->page_descs, req->max_pages); + memset(req, 0, sizeof(*req)); + fuse_request_init(req, NULL, NULL, 0); BUG_ON(ff->reserved_req); ff->reserved_req = req; wake_up_all(&fc->reserved_req_waitq); @@ -287,10 +325,10 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) * We get here in the unlikely case that a background * request was allocated but not sent */ - spin_lock(&fc->lock); + spin_lock(&fc->bg_lock); if (!fc->blocked) wake_up(&fc->blocked_waitq); - spin_unlock(&fc->lock); + spin_unlock(&fc->bg_lock); } if (test_bit(FR_WAITING, &req->flags)) { @@ -319,7 +357,13 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args) static u64 fuse_get_unique(struct fuse_iqueue *fiq) { - return ++fiq->reqctr; + fiq->reqctr += FUSE_REQ_ID_STEP; + return fiq->reqctr; +} + +static unsigned int fuse_req_hash(u64 unique) +{ + return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS); } static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req) @@ -353,12 +397,13 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, static void flush_bg_queue(struct fuse_conn *fc) { + struct fuse_iqueue *fiq = &fc->iq; + while (fc->active_background < fc->max_background && !list_empty(&fc->bg_queue)) { struct fuse_req *req; - struct fuse_iqueue *fiq = &fc->iq; - req = list_entry(fc->bg_queue.next, struct fuse_req, list); + req = list_first_entry(&fc->bg_queue, struct fuse_req, list); list_del(&req->list); fc->active_background++; spin_lock(&fiq->waitq.lock); @@ -389,14 +434,21 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) WARN_ON(test_bit(FR_PENDING, &req->flags)); WARN_ON(test_bit(FR_SENT, &req->flags)); if (test_bit(FR_BACKGROUND, &req->flags)) { - spin_lock(&fc->lock); + spin_lock(&fc->bg_lock); clear_bit(FR_BACKGROUND, &req->flags); - if (fc->num_background == fc->max_background) + if (fc->num_background == fc->max_background) { fc->blocked = 0; - - /* Wake up next waiter, if any */ - if (!fc->blocked && waitqueue_active(&fc->blocked_waitq)) wake_up(&fc->blocked_waitq); + } else if (!fc->blocked) { + /* + * Wake up next waiter, if any. It's okay to use + * waitqueue_active(), as we've already synced up + * fc->blocked with waiters with the wake_up() call + * above. + */ + if (waitqueue_active(&fc->blocked_waitq)) + wake_up(&fc->blocked_waitq); + } if (fc->num_background == fc->congestion_threshold && fc->sb) { clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); @@ -405,7 +457,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) fc->num_background--; fc->active_background--; flush_bg_queue(fc); - spin_unlock(&fc->lock); + spin_unlock(&fc->bg_lock); } wake_up(&req->waitq); if (req->end) @@ -573,40 +625,38 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) return ret; } -/* - * Called under fc->lock - * - * fc->connected must have been checked previously - */ -void fuse_request_send_background_locked(struct fuse_conn *fc, - struct fuse_req *req) +bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req) { - BUG_ON(!test_bit(FR_BACKGROUND, &req->flags)); + bool queued = false; + + WARN_ON(!test_bit(FR_BACKGROUND, &req->flags)); if (!test_bit(FR_WAITING, &req->flags)) { __set_bit(FR_WAITING, &req->flags); atomic_inc(&fc->num_waiting); } __set_bit(FR_ISREPLY, &req->flags); - fc->num_background++; - if (fc->num_background == fc->max_background) - fc->blocked = 1; - if (fc->num_background == fc->congestion_threshold && fc->sb) { - set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); - set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); + spin_lock(&fc->bg_lock); + if (likely(fc->connected)) { + fc->num_background++; + if (fc->num_background == fc->max_background) + fc->blocked = 1; + if (fc->num_background == fc->congestion_threshold && fc->sb) { + set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); + set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); + } + list_add_tail(&req->list, &fc->bg_queue); + flush_bg_queue(fc); + queued = true; } - list_add_tail(&req->list, &fc->bg_queue); - flush_bg_queue(fc); + spin_unlock(&fc->bg_lock); + + return queued; } void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) { - BUG_ON(!req->end); - spin_lock(&fc->lock); - if (fc->connected) { - fuse_request_send_background_locked(fc, req); - spin_unlock(&fc->lock); - } else { - spin_unlock(&fc->lock); + WARN_ON(!req->end); + if (!fuse_request_queue_background(fc, req)) { req->out.h.error = -ENOTCONN; req->end(fc, req); fuse_put_request(fc, req); @@ -1084,12 +1134,11 @@ __releases(fiq->waitq.lock) int err; list_del_init(&req->intr_entry); - req->intr_unique = fuse_get_unique(fiq); memset(&ih, 0, sizeof(ih)); memset(&arg, 0, sizeof(arg)); ih.len = reqsize; ih.opcode = FUSE_INTERRUPT; - ih.unique = req->intr_unique; + ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT); arg.unique = req->in.h.unique; spin_unlock(&fiq->waitq.lock); @@ -1238,6 +1287,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, struct fuse_req *req; struct fuse_in *in; unsigned reqsize; + unsigned int hash; restart: spin_lock(&fiq->waitq.lock); @@ -1310,13 +1360,16 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, err = reqsize; goto out_end; } - list_move_tail(&req->list, &fpq->processing); - spin_unlock(&fpq->lock); + hash = fuse_req_hash(req->in.h.unique); + list_move_tail(&req->list, &fpq->processing[hash]); + __fuse_get_request(req); set_bit(FR_SENT, &req->flags); + spin_unlock(&fpq->lock); /* matches barrier in request_wait_answer() */ smp_mb__after_atomic(); if (test_bit(FR_INTERRUPTED, &req->flags)) queue_interrupt(fiq, req); + fuse_put_request(fc, req); return reqsize; @@ -1663,7 +1716,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, unsigned int num; unsigned int offset; size_t total_len = 0; - int num_pages; + unsigned int num_pages; offset = outarg->offset & ~PAGE_MASK; file_size = i_size_read(inode); @@ -1675,7 +1728,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, num = file_size - outarg->offset; num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; - num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ); + num_pages = min(num_pages, fc->max_pages); req = fuse_get_req(fc, num_pages); if (IS_ERR(req)) @@ -1792,10 +1845,11 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, /* Look up request on processing list by unique ID */ static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) { + unsigned int hash = fuse_req_hash(unique); struct fuse_req *req; - list_for_each_entry(req, &fpq->processing, list) { - if (req->in.h.unique == unique || req->intr_unique == unique) + list_for_each_entry(req, &fpq->processing[hash], list) { + if (req->in.h.unique == unique) return req; } return NULL; @@ -1869,22 +1923,26 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, if (!fpq->connected) goto err_unlock_pq; - req = request_find(fpq, oh.unique); + req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); if (!req) goto err_unlock_pq; - /* Is it an interrupt reply? */ - if (req->intr_unique == oh.unique) { + /* Is it an interrupt reply ID? */ + if (oh.unique & FUSE_INT_REQ_BIT) { + __fuse_get_request(req); spin_unlock(&fpq->lock); err = -EINVAL; - if (nbytes != sizeof(struct fuse_out_header)) + if (nbytes != sizeof(struct fuse_out_header)) { + fuse_put_request(fc, req); goto err_finish; + } if (oh.error == -ENOSYS) fc->no_interrupt = 1; else if (oh.error == -EAGAIN) queue_interrupt(&fc->iq, req); + fuse_put_request(fc, req); fuse_copy_finish(cs); return nbytes; @@ -2102,9 +2160,13 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort) struct fuse_dev *fud; struct fuse_req *req, *next; LIST_HEAD(to_end); + unsigned int i; + /* Background queuing checks fc->connected under bg_lock */ + spin_lock(&fc->bg_lock); fc->connected = 0; - fc->blocked = 0; + spin_unlock(&fc->bg_lock); + fc->aborted = is_abort; fuse_set_initialized(fc); list_for_each_entry(fud, &fc->devices, entry) { @@ -2123,11 +2185,16 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort) } spin_unlock(&req->waitq.lock); } - list_splice_tail_init(&fpq->processing, &to_end); + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) + list_splice_tail_init(&fpq->processing[i], + &to_end); spin_unlock(&fpq->lock); } + spin_lock(&fc->bg_lock); + fc->blocked = 0; fc->max_background = UINT_MAX; flush_bg_queue(fc); + spin_unlock(&fc->bg_lock); spin_lock(&fiq->waitq.lock); fiq->connected = 0; @@ -2163,10 +2230,12 @@ int fuse_dev_release(struct inode *inode, struct file *file) struct fuse_conn *fc = fud->fc; struct fuse_pqueue *fpq = &fud->pq; LIST_HEAD(to_end); + unsigned int i; spin_lock(&fpq->lock); WARN_ON(!list_empty(&fpq->io)); - list_splice_init(&fpq->processing, &to_end); + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) + list_splice_init(&fpq->processing[i], &to_end); spin_unlock(&fpq->lock); end_requests(fc, &to_end); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 0979609d6eba..47395b0c3b35 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -14,24 +14,9 @@ #include <linux/namei.h> #include <linux/slab.h> #include <linux/xattr.h> +#include <linux/iversion.h> #include <linux/posix_acl.h> -static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx) -{ - struct fuse_conn *fc = get_fuse_conn(dir); - struct fuse_inode *fi = get_fuse_inode(dir); - - if (!fc->do_readdirplus) - return false; - if (!fc->readdirplus_auto) - return true; - if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state)) - return true; - if (ctx->pos == 0) - return true; - return false; -} - static void fuse_advise_use_readdirplus(struct inode *dir) { struct fuse_inode *fi = get_fuse_inode(dir); @@ -80,8 +65,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec) * Set dentry and possibly attribute timeouts from the lookup/mk* * replies */ -static void fuse_change_entry_timeout(struct dentry *entry, - struct fuse_entry_out *o) +void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o) { fuse_dentry_settime(entry, time_to_jiffies(o->entry_valid, o->entry_valid_nsec)); @@ -92,18 +76,29 @@ static u64 attr_timeout(struct fuse_attr_out *o) return time_to_jiffies(o->attr_valid, o->attr_valid_nsec); } -static u64 entry_attr_timeout(struct fuse_entry_out *o) +u64 entry_attr_timeout(struct fuse_entry_out *o) { return time_to_jiffies(o->attr_valid, o->attr_valid_nsec); } +static void fuse_invalidate_attr_mask(struct inode *inode, u32 mask) +{ + set_mask_bits(&get_fuse_inode(inode)->inval_mask, 0, mask); +} + /* * Mark the attributes as stale, so that at the next call to * ->getattr() they will be fetched from userspace */ void fuse_invalidate_attr(struct inode *inode) { - get_fuse_inode(inode)->i_time = 0; + fuse_invalidate_attr_mask(inode, STATX_BASIC_STATS); +} + +static void fuse_dir_changed(struct inode *dir) +{ + fuse_invalidate_attr(dir); + inode_maybe_inc_iversion(dir, false); } /** @@ -113,7 +108,7 @@ void fuse_invalidate_attr(struct inode *inode) void fuse_invalidate_atime(struct inode *inode) { if (!IS_RDONLY(inode)) - fuse_invalidate_attr(inode); + fuse_invalidate_attr_mask(inode, STATX_ATIME); } /* @@ -262,11 +257,6 @@ invalid: goto out; } -static int invalid_nodeid(u64 nodeid) -{ - return !nodeid || nodeid == FUSE_ROOT_ID; -} - static int fuse_dentry_init(struct dentry *dentry) { dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry), GFP_KERNEL); @@ -469,7 +459,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, kfree(forget); d_instantiate(entry, inode); fuse_change_entry_timeout(entry, &outentry); - fuse_invalidate_attr(dir); + fuse_dir_changed(dir); err = finish_open(file, entry, generic_file_open); if (err) { fuse_sync_release(ff, flags); @@ -583,7 +573,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args, } else { fuse_change_entry_timeout(entry, &outarg); } - fuse_invalidate_attr(dir); + fuse_dir_changed(dir); return 0; out_put_forget_req: @@ -693,7 +683,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry) drop_nlink(inode); spin_unlock(&fc->lock); fuse_invalidate_attr(inode); - fuse_invalidate_attr(dir); + fuse_dir_changed(dir); fuse_invalidate_entry_cache(entry); fuse_update_ctime(inode); } else if (err == -EINTR) @@ -715,7 +705,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry) err = fuse_simple_request(fc, &args); if (!err) { clear_nlink(d_inode(entry)); - fuse_invalidate_attr(dir); + fuse_dir_changed(dir); fuse_invalidate_entry_cache(entry); } else if (err == -EINTR) fuse_invalidate_entry(entry); @@ -754,9 +744,9 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent, fuse_update_ctime(d_inode(newent)); } - fuse_invalidate_attr(olddir); + fuse_dir_changed(olddir); if (olddir != newdir) - fuse_invalidate_attr(newdir); + fuse_dir_changed(newdir); /* newent will end up negative */ if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent)) { @@ -932,7 +922,8 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat, } static int fuse_update_get_attr(struct inode *inode, struct file *file, - struct kstat *stat, unsigned int flags) + struct kstat *stat, u32 request_mask, + unsigned int flags) { struct fuse_inode *fi = get_fuse_inode(inode); int err = 0; @@ -942,6 +933,8 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file, sync = true; else if (flags & AT_STATX_DONT_SYNC) sync = false; + else if (request_mask & READ_ONCE(fi->inval_mask)) + sync = true; else sync = time_before64(fi->i_time, get_jiffies_64()); @@ -959,7 +952,9 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file, int fuse_update_attributes(struct inode *inode, struct file *file) { - return fuse_update_get_attr(inode, file, NULL, 0); + /* Do *not* need to get atime for internal purposes */ + return fuse_update_get_attr(inode, file, NULL, + STATX_BASIC_STATS & ~STATX_ATIME, 0); } int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid, @@ -989,7 +984,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid, if (!entry) goto unlock; - fuse_invalidate_attr(parent); + fuse_dir_changed(parent); fuse_invalidate_entry(entry); if (child_nodeid != 0 && d_really_is_positive(entry)) { @@ -1165,271 +1160,78 @@ static int fuse_permission(struct inode *inode, int mask) return err; } -static int parse_dirfile(char *buf, size_t nbytes, struct file *file, - struct dir_context *ctx) -{ - while (nbytes >= FUSE_NAME_OFFSET) { - struct fuse_dirent *dirent = (struct fuse_dirent *) buf; - size_t reclen = FUSE_DIRENT_SIZE(dirent); - if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX) - return -EIO; - if (reclen > nbytes) - break; - if (memchr(dirent->name, '/', dirent->namelen) != NULL) - return -EIO; - - if (!dir_emit(ctx, dirent->name, dirent->namelen, - dirent->ino, dirent->type)) - break; - - buf += reclen; - nbytes -= reclen; - ctx->pos = dirent->off; - } - - return 0; -} - -static int fuse_direntplus_link(struct file *file, - struct fuse_direntplus *direntplus, - u64 attr_version) -{ - struct fuse_entry_out *o = &direntplus->entry_out; - struct fuse_dirent *dirent = &direntplus->dirent; - struct dentry *parent = file->f_path.dentry; - struct qstr name = QSTR_INIT(dirent->name, dirent->namelen); - struct dentry *dentry; - struct dentry *alias; - struct inode *dir = d_inode(parent); - struct fuse_conn *fc; - struct inode *inode; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); - - if (!o->nodeid) { - /* - * Unlike in the case of fuse_lookup, zero nodeid does not mean - * ENOENT. Instead, it only means the userspace filesystem did - * not want to return attributes/handle for this entry. - * - * So do nothing. - */ - return 0; - } - - if (name.name[0] == '.') { - /* - * We could potentially refresh the attributes of the directory - * and its parent? - */ - if (name.len == 1) - return 0; - if (name.name[1] == '.' && name.len == 2) - return 0; - } - - if (invalid_nodeid(o->nodeid)) - return -EIO; - if (!fuse_valid_type(o->attr.mode)) - return -EIO; - - fc = get_fuse_conn(dir); - - name.hash = full_name_hash(parent, name.name, name.len); - dentry = d_lookup(parent, &name); - if (!dentry) { -retry: - dentry = d_alloc_parallel(parent, &name, &wq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - } - if (!d_in_lookup(dentry)) { - struct fuse_inode *fi; - inode = d_inode(dentry); - if (!inode || - get_node_id(inode) != o->nodeid || - ((o->attr.mode ^ inode->i_mode) & S_IFMT)) { - d_invalidate(dentry); - dput(dentry); - goto retry; - } - if (is_bad_inode(inode)) { - dput(dentry); - return -EIO; - } - - fi = get_fuse_inode(inode); - spin_lock(&fc->lock); - fi->nlookup++; - spin_unlock(&fc->lock); - - forget_all_cached_acls(inode); - fuse_change_attributes(inode, &o->attr, - entry_attr_timeout(o), - attr_version); - /* - * The other branch comes via fuse_iget() - * which bumps nlookup inside - */ - } else { - inode = fuse_iget(dir->i_sb, o->nodeid, o->generation, - &o->attr, entry_attr_timeout(o), - attr_version); - if (!inode) - inode = ERR_PTR(-ENOMEM); - - alias = d_splice_alias(inode, dentry); - d_lookup_done(dentry); - if (alias) { - dput(dentry); - dentry = alias; - } - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - } - if (fc->readdirplus_auto) - set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state); - fuse_change_entry_timeout(dentry, o); - - dput(dentry); - return 0; -} - -static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file, - struct dir_context *ctx, u64 attr_version) +static int fuse_readlink_page(struct inode *inode, struct page *page) { - struct fuse_direntplus *direntplus; - struct fuse_dirent *dirent; - size_t reclen; - int over = 0; - int ret; - - while (nbytes >= FUSE_NAME_OFFSET_DIRENTPLUS) { - direntplus = (struct fuse_direntplus *) buf; - dirent = &direntplus->dirent; - reclen = FUSE_DIRENTPLUS_SIZE(direntplus); - - if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX) - return -EIO; - if (reclen > nbytes) - break; - if (memchr(dirent->name, '/', dirent->namelen) != NULL) - return -EIO; - - if (!over) { - /* We fill entries into dstbuf only as much as - it can hold. But we still continue iterating - over remaining entries to link them. If not, - we need to send a FORGET for each of those - which we did not link. - */ - over = !dir_emit(ctx, dirent->name, dirent->namelen, - dirent->ino, dirent->type); - if (!over) - ctx->pos = dirent->off; - } - - buf += reclen; - nbytes -= reclen; - - ret = fuse_direntplus_link(file, direntplus, attr_version); - if (ret) - fuse_force_forget(file, direntplus->entry_out.nodeid); - } - - return 0; -} - -static int fuse_readdir(struct file *file, struct dir_context *ctx) -{ - int plus, err; - size_t nbytes; - struct page *page; - struct inode *inode = file_inode(file); struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; - u64 attr_version = 0; - bool locked; - - if (is_bad_inode(inode)) - return -EIO; + int err; req = fuse_get_req(fc, 1); if (IS_ERR(req)) return PTR_ERR(req); - page = alloc_page(GFP_KERNEL); - if (!page) { - fuse_put_request(fc, req); - return -ENOMEM; - } - - plus = fuse_use_readdirplus(inode, ctx); + req->out.page_zeroing = 1; req->out.argpages = 1; req->num_pages = 1; req->pages[0] = page; - req->page_descs[0].length = PAGE_SIZE; - if (plus) { - attr_version = fuse_get_attr_version(fc); - fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, - FUSE_READDIRPLUS); - } else { - fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, - FUSE_READDIR); - } - locked = fuse_lock_inode(inode); + req->page_descs[0].length = PAGE_SIZE - 1; + req->in.h.opcode = FUSE_READLINK; + req->in.h.nodeid = get_node_id(inode); + req->out.argvar = 1; + req->out.numargs = 1; + req->out.args[0].size = PAGE_SIZE - 1; fuse_request_send(fc, req); - fuse_unlock_inode(inode, locked); - nbytes = req->out.args[0].size; err = req->out.h.error; - fuse_put_request(fc, req); + if (!err) { - if (plus) { - err = parse_dirplusfile(page_address(page), nbytes, - file, ctx, - attr_version); - } else { - err = parse_dirfile(page_address(page), nbytes, file, - ctx); - } + char *link = page_address(page); + size_t len = req->out.args[0].size; + + BUG_ON(len >= PAGE_SIZE); + link[len] = '\0'; } - __free_page(page); + fuse_put_request(fc, req); fuse_invalidate_atime(inode); + return err; } -static const char *fuse_get_link(struct dentry *dentry, - struct inode *inode, - struct delayed_call *done) +static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, + struct delayed_call *callback) { struct fuse_conn *fc = get_fuse_conn(inode); - FUSE_ARGS(args); - char *link; - ssize_t ret; + struct page *page; + int err; + + err = -EIO; + if (is_bad_inode(inode)) + goto out_err; + if (fc->cache_symlinks) + return page_get_link(dentry, inode, callback); + + err = -ECHILD; if (!dentry) - return ERR_PTR(-ECHILD); + goto out_err; - link = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!link) - return ERR_PTR(-ENOMEM); + page = alloc_page(GFP_KERNEL); + err = -ENOMEM; + if (!page) + goto out_err; - args.in.h.opcode = FUSE_READLINK; - args.in.h.nodeid = get_node_id(inode); - args.out.argvar = 1; - args.out.numargs = 1; - args.out.args[0].size = PAGE_SIZE - 1; - args.out.args[0].value = link; - ret = fuse_simple_request(fc, &args); - if (ret < 0) { - kfree(link); - link = ERR_PTR(ret); - } else { - link[ret] = '\0'; - set_delayed_call(done, kfree_link, link); + err = fuse_readlink_page(inode, page); + if (err) { + __free_page(page); + goto out_err; } - fuse_invalidate_atime(inode); - return link; + + set_delayed_call(callback, page_put_link, page); + + return page_address(page); + +out_err: + return ERR_PTR(err); } static int fuse_dir_open(struct inode *inode, struct file *file) @@ -1662,8 +1464,11 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, file = NULL; } - if (attr->ia_valid & ATTR_SIZE) + if (attr->ia_valid & ATTR_SIZE) { + if (WARN_ON(!S_ISREG(inode->i_mode))) + return -EIO; is_truncate = true; + } if (is_truncate) { fuse_set_nowrite(inode); @@ -1811,7 +1616,7 @@ static int fuse_getattr(const struct path *path, struct kstat *stat, if (!fuse_allow_current_process(fc)) return -EACCES; - return fuse_update_get_attr(inode, NULL, stat, flags); + return fuse_update_get_attr(inode, NULL, stat, request_mask, flags); } static const struct inode_operations fuse_dir_inode_operations = { @@ -1867,11 +1672,37 @@ void fuse_init_common(struct inode *inode) void fuse_init_dir(struct inode *inode) { + struct fuse_inode *fi = get_fuse_inode(inode); + inode->i_op = &fuse_dir_inode_operations; inode->i_fop = &fuse_dir_operations; + + spin_lock_init(&fi->rdc.lock); + fi->rdc.cached = false; + fi->rdc.size = 0; + fi->rdc.pos = 0; + fi->rdc.version = 0; } +static int fuse_symlink_readpage(struct file *null, struct page *page) +{ + int err = fuse_readlink_page(page->mapping->host, page); + + if (!err) + SetPageUptodate(page); + + unlock_page(page); + + return err; +} + +static const struct address_space_operations fuse_symlink_aops = { + .readpage = fuse_symlink_readpage, +}; + void fuse_init_symlink(struct inode *inode) { inode->i_op = &fuse_symlink_inode_operations; + inode->i_data.a_ops = &fuse_symlink_aops; + inode_nohighmem(inode); } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 32d0b883e74f..cc2121b37bf5 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -59,6 +59,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) } INIT_LIST_HEAD(&ff->write_entry); + mutex_init(&ff->readdir.lock); refcount_set(&ff->count, 1); RB_CLEAR_NODE(&ff->polled_node); init_waitqueue_head(&ff->poll_wait); @@ -73,6 +74,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) void fuse_file_free(struct fuse_file *ff) { fuse_request_free(ff->reserved_req); + mutex_destroy(&ff->readdir.lock); kfree(ff); } @@ -848,11 +850,11 @@ static int fuse_readpages_fill(void *_data, struct page *page) fuse_wait_on_page_writeback(inode, page->index); if (req->num_pages && - (req->num_pages == FUSE_MAX_PAGES_PER_REQ || + (req->num_pages == fc->max_pages || (req->num_pages + 1) * PAGE_SIZE > fc->max_read || req->pages[req->num_pages - 1]->index + 1 != page->index)) { - int nr_alloc = min_t(unsigned, data->nr_pages, - FUSE_MAX_PAGES_PER_REQ); + unsigned int nr_alloc = min_t(unsigned int, data->nr_pages, + fc->max_pages); fuse_send_readpages(req, data->file); if (fc->async_read) req = fuse_get_req_for_background(fc, nr_alloc); @@ -887,7 +889,7 @@ static int fuse_readpages(struct file *file, struct address_space *mapping, struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_fill_data data; int err; - int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ); + unsigned int nr_alloc = min_t(unsigned int, nr_pages, fc->max_pages); err = -EIO; if (is_bad_inode(inode)) @@ -1102,12 +1104,13 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, return count > 0 ? count : err; } -static inline unsigned fuse_wr_pages(loff_t pos, size_t len) +static inline unsigned int fuse_wr_pages(loff_t pos, size_t len, + unsigned int max_pages) { - return min_t(unsigned, + return min_t(unsigned int, ((pos + len - 1) >> PAGE_SHIFT) - (pos >> PAGE_SHIFT) + 1, - FUSE_MAX_PAGES_PER_REQ); + max_pages); } static ssize_t fuse_perform_write(struct kiocb *iocb, @@ -1129,7 +1132,8 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, do { struct fuse_req *req; ssize_t count; - unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii)); + unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii), + fc->max_pages); req = fuse_get_req(fc, nr_pages); if (IS_ERR(req)) { @@ -1271,7 +1275,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, ssize_t ret = 0; /* Special case for kernel I/O: can copy directly into the buffer */ - if (ii->type & ITER_KVEC) { + if (iov_iter_is_kvec(ii)) { unsigned long user_addr = fuse_get_user_addr(ii); size_t frag_size = fuse_get_frag_size(ii, *nbytesp); @@ -1319,11 +1323,6 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, return ret < 0 ? ret : 0; } -static inline int fuse_iter_npages(const struct iov_iter *ii_p) -{ - return iov_iter_npages(ii_p, FUSE_MAX_PAGES_PER_REQ); -} - ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, loff_t *ppos, int flags) { @@ -1343,9 +1342,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, int err = 0; if (io->async) - req = fuse_get_req_for_background(fc, fuse_iter_npages(iter)); + req = fuse_get_req_for_background(fc, iov_iter_npages(iter, + fc->max_pages)); else - req = fuse_get_req(fc, fuse_iter_npages(iter)); + req = fuse_get_req(fc, iov_iter_npages(iter, fc->max_pages)); if (IS_ERR(req)) return PTR_ERR(req); @@ -1390,9 +1390,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, fuse_put_request(fc, req); if (io->async) req = fuse_get_req_for_background(fc, - fuse_iter_npages(iter)); + iov_iter_npages(iter, fc->max_pages)); else - req = fuse_get_req(fc, fuse_iter_npages(iter)); + req = fuse_get_req(fc, iov_iter_npages(iter, + fc->max_pages)); if (IS_ERR(req)) break; } @@ -1418,7 +1419,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io, res = fuse_direct_io(io, iter, ppos, 0); - fuse_invalidate_attr(inode); + fuse_invalidate_atime(inode); return res; } @@ -1487,6 +1488,7 @@ __acquires(fc->lock) struct fuse_inode *fi = get_fuse_inode(req->inode); struct fuse_write_in *inarg = &req->misc.write.in; __u64 data_size = req->num_pages * PAGE_SIZE; + bool queued; if (!fc->connected) goto out_free; @@ -1502,7 +1504,8 @@ __acquires(fc->lock) req->in.args[1].size = inarg->size; fi->writectr++; - fuse_request_send_background_locked(fc, req); + queued = fuse_request_queue_background(fc, req); + WARN_ON(!queued); return; out_free: @@ -1819,12 +1822,18 @@ static int fuse_writepages_fill(struct page *page, is_writeback = fuse_page_is_writeback(inode, page->index); if (req && req->num_pages && - (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || + (is_writeback || req->num_pages == fc->max_pages || (req->num_pages + 1) * PAGE_SIZE > fc->max_write || data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { fuse_writepages_send(data); data->req = NULL; + } else if (req && req->num_pages == req->max_pages) { + if (!fuse_req_realloc_pages(fc, req, GFP_NOFS)) { + fuse_writepages_send(data); + req = data->req = NULL; + } } + err = -ENOMEM; tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); if (!tmp_page) @@ -1847,7 +1856,7 @@ static int fuse_writepages_fill(struct page *page, struct fuse_inode *fi = get_fuse_inode(inode); err = -ENOMEM; - req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ); + req = fuse_request_alloc_nofs(FUSE_REQ_INLINE_PAGES); if (!req) { __free_page(tmp_page); goto out_unlock; @@ -1904,6 +1913,7 @@ static int fuse_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; + struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_fill_wb_data data; int err; @@ -1916,7 +1926,7 @@ static int fuse_writepages(struct address_space *mapping, data.ff = NULL; err = -ENOMEM; - data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, + data.orig_pages = kcalloc(fc->max_pages, sizeof(struct page *), GFP_NOFS); if (!data.orig_pages) @@ -2387,10 +2397,11 @@ static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, } /* Make sure iov_length() won't overflow */ -static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +static int fuse_verify_ioctl_iov(struct fuse_conn *fc, struct iovec *iov, + size_t count) { size_t n; - u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + u32 max = fc->max_pages << PAGE_SHIFT; for (n = 0; n < count; n++, iov++) { if (iov->iov_len > (size_t) max) @@ -2514,7 +2525,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); err = -ENOMEM; - pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL); + pages = kcalloc(fc->max_pages, sizeof(pages[0]), GFP_KERNEL); iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); if (!pages || !iov_page) goto out; @@ -2553,7 +2564,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, /* make sure there are enough buffer pages and init request with them */ err = -ENOMEM; - if (max_pages > FUSE_MAX_PAGES_PER_REQ) + if (max_pages > fc->max_pages) goto out; while (num_pages < max_pages) { pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); @@ -2640,11 +2651,11 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = iov_page; out_iov = in_iov + in_iovs; - err = fuse_verify_ioctl_iov(in_iov, in_iovs); + err = fuse_verify_ioctl_iov(fc, in_iov, in_iovs); if (err) goto out; - err = fuse_verify_ioctl_iov(out_iov, out_iovs); + err = fuse_verify_ioctl_iov(fc, out_iov, out_iovs); if (err) goto out; @@ -2835,9 +2846,9 @@ static void fuse_do_truncate(struct file *file) fuse_do_setattr(file_dentry(file), &attr, file); } -static inline loff_t fuse_round_up(loff_t off) +static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off) { - return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); + return round_up(off, fc->max_pages << PAGE_SHIFT); } static ssize_t @@ -2866,7 +2877,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) { if (offset >= i_size) return 0; - iov_iter_truncate(iter, fuse_round_up(i_size - offset)); + iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset)); count = iov_iter_count(iter); } @@ -3011,6 +3022,82 @@ out: return err; } +static ssize_t fuse_copy_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + size_t len, unsigned int flags) +{ + struct fuse_file *ff_in = file_in->private_data; + struct fuse_file *ff_out = file_out->private_data; + struct inode *inode_out = file_inode(file_out); + struct fuse_inode *fi_out = get_fuse_inode(inode_out); + struct fuse_conn *fc = ff_in->fc; + FUSE_ARGS(args); + struct fuse_copy_file_range_in inarg = { + .fh_in = ff_in->fh, + .off_in = pos_in, + .nodeid_out = ff_out->nodeid, + .fh_out = ff_out->fh, + .off_out = pos_out, + .len = len, + .flags = flags + }; + struct fuse_write_out outarg; + ssize_t err; + /* mark unstable when write-back is not used, and file_out gets + * extended */ + bool is_unstable = (!fc->writeback_cache) && + ((pos_out + len) > inode_out->i_size); + + if (fc->no_copy_file_range) + return -EOPNOTSUPP; + + inode_lock(inode_out); + + if (fc->writeback_cache) { + err = filemap_write_and_wait_range(inode_out->i_mapping, + pos_out, pos_out + len); + if (err) + goto out; + + fuse_sync_writes(inode_out); + } + + if (is_unstable) + set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); + + args.in.h.opcode = FUSE_COPY_FILE_RANGE; + args.in.h.nodeid = ff_in->nodeid; + args.in.numargs = 1; + args.in.args[0].size = sizeof(inarg); + args.in.args[0].value = &inarg; + args.out.numargs = 1; + args.out.args[0].size = sizeof(outarg); + args.out.args[0].value = &outarg; + err = fuse_simple_request(fc, &args); + if (err == -ENOSYS) { + fc->no_copy_file_range = 1; + err = -EOPNOTSUPP; + } + if (err) + goto out; + + if (fc->writeback_cache) { + fuse_write_update_size(inode_out, pos_out + outarg.size); + file_update_time(file_out); + } + + fuse_invalidate_attr(inode_out); + + err = outarg.size; +out: + if (is_unstable) + clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); + + inode_unlock(inode_out); + + return err; +} + static const struct file_operations fuse_file_operations = { .llseek = fuse_file_llseek, .read_iter = fuse_file_read_iter, @@ -3027,6 +3114,7 @@ static const struct file_operations fuse_file_operations = { .compat_ioctl = fuse_file_compat_ioctl, .poll = fuse_file_poll, .fallocate = fuse_file_fallocate, + .copy_file_range = fuse_copy_file_range, }; static const struct file_operations fuse_direct_io_file_operations = { @@ -3062,6 +3150,14 @@ static const struct address_space_operations fuse_file_aops = { void fuse_init_file_inode(struct inode *inode) { + struct fuse_inode *fi = get_fuse_inode(inode); + inode->i_fop = &fuse_file_operations; inode->i_data.a_ops = &fuse_file_aops; + + INIT_LIST_HEAD(&fi->write_files); + INIT_LIST_HEAD(&fi->queued_writes); + fi->writectr = 0; + init_waitqueue_head(&fi->page_waitq); + INIT_LIST_HEAD(&fi->writepages); } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index f78e9614bb5f..e9f712e81c7d 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -28,8 +28,11 @@ #include <linux/refcount.h> #include <linux/user_namespace.h> -/** Max number of pages that can be used in a single read request */ -#define FUSE_MAX_PAGES_PER_REQ 32 +/** Default max number of pages that can be used in a single read request */ +#define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32 + +/** Maximum of max_pages received in init_out */ +#define FUSE_MAX_MAX_PAGES 256 /** Bias for fi->writectr, meaning new writepages must not be sent */ #define FUSE_NOWRITE INT_MIN @@ -77,6 +80,9 @@ struct fuse_inode { /** Time in jiffies until the file attributes are valid */ u64 i_time; + /* Which attributes are invalid */ + u32 inval_mask; + /** The sticky bit in inode->i_mode may have been removed, so preserve the original mode */ umode_t orig_i_mode; @@ -87,21 +93,51 @@ struct fuse_inode { /** Version of last attribute change */ u64 attr_version; - /** Files usable in writepage. Protected by fc->lock */ - struct list_head write_files; + union { + /* Write related fields (regular file only) */ + struct { + /* Files usable in writepage. Protected by fc->lock */ + struct list_head write_files; + + /* Writepages pending on truncate or fsync */ + struct list_head queued_writes; - /** Writepages pending on truncate or fsync */ - struct list_head queued_writes; + /* Number of sent writes, a negative bias + * (FUSE_NOWRITE) means more writes are blocked */ + int writectr; + + /* Waitq for writepage completion */ + wait_queue_head_t page_waitq; + + /* List of writepage requestst (pending or sent) */ + struct list_head writepages; + }; + + /* readdir cache (directory only) */ + struct { + /* true if fully cached */ + bool cached; - /** Number of sent writes, a negative bias (FUSE_NOWRITE) - * means more writes are blocked */ - int writectr; + /* size of cache */ + loff_t size; - /** Waitq for writepage completion */ - wait_queue_head_t page_waitq; + /* position at end of cache (position of next entry) */ + loff_t pos; - /** List of writepage requestst (pending or sent) */ - struct list_head writepages; + /* version of the cache */ + u64 version; + + /* modification time of directory when cache was + * started */ + struct timespec64 mtime; + + /* iversion of directory when cache was started */ + u64 iversion; + + /* protects above fields */ + spinlock_t lock; + } rdc; + }; /** Miscellaneous bits describing inode state */ unsigned long state; @@ -148,6 +184,25 @@ struct fuse_file { /** Entry on inode's write_files list */ struct list_head write_entry; + /* Readdir related */ + struct { + /* + * Protects below fields against (crazy) parallel readdir on + * same open file. Uncontended in the normal case. + */ + struct mutex lock; + + /* Dir stream position */ + loff_t pos; + + /* Offset in cache */ + loff_t cache_off; + + /* Version of cache we are reading */ + u64 version; + + } readdir; + /** RB node to be linked on fuse_conn->polled_files */ struct rb_node polled_node; @@ -311,9 +366,6 @@ struct fuse_req { /** refcount */ refcount_t count; - /** Unique ID for the interrupt request */ - u64 intr_unique; - /* Request flags, updated with test/set/clear_bit() */ unsigned long flags; @@ -411,6 +463,9 @@ struct fuse_iqueue { struct fasync_struct *fasync; }; +#define FUSE_PQ_HASH_BITS 8 +#define FUSE_PQ_HASH_SIZE (1 << FUSE_PQ_HASH_BITS) + struct fuse_pqueue { /** Connection established */ unsigned connected; @@ -418,8 +473,8 @@ struct fuse_pqueue { /** Lock protecting accessess to members of this structure */ spinlock_t lock; - /** The list of requests being processed */ - struct list_head processing; + /** Hash table of requests being processed */ + struct list_head *processing; /** The list of requests under I/O */ struct list_head io; @@ -476,6 +531,9 @@ struct fuse_conn { /** Maximum write size */ unsigned max_write; + /** Maxmum number of pages that can be used in a single request */ + unsigned int max_pages; + /** Input queue */ struct fuse_iqueue iq; @@ -500,6 +558,10 @@ struct fuse_conn { /** The list of background requests set aside for later queuing */ struct list_head bg_queue; + /** Protects: max_background, congestion_threshold, num_background, + * active_background, bg_queue, blocked */ + spinlock_t bg_lock; + /** Flag indicating that INIT reply has been received. Allocating * any fuse request will be suspended until the flag is set */ int initialized; @@ -551,6 +613,9 @@ struct fuse_conn { /** handle fs handles killing suid/sgid/cap on write/chown/trunc */ unsigned handle_killpriv:1; + /** cache READLINK responses in page cache */ + unsigned cache_symlinks:1; + /* * The following bitfields are only for optimization purposes * and hence races in setting them will not cause malfunction @@ -637,6 +702,9 @@ struct fuse_conn { /** Allow other than the mounter user to access the filesystem ? */ unsigned allow_other:1; + /** Does the filesystem support copy_file_range? */ + unsigned no_copy_file_range:1; + /** The number of requests waiting for completion */ atomic_t num_waiting; @@ -697,6 +765,11 @@ static inline u64 get_node_id(struct inode *inode) return get_fuse_inode(inode)->nodeid; } +static inline int invalid_nodeid(u64 nodeid) +{ + return !nodeid || nodeid == FUSE_ROOT_ID; +} + /** Device operations */ extern const struct file_operations fuse_dev_operations; @@ -812,6 +885,10 @@ struct fuse_req *fuse_request_alloc(unsigned npages); struct fuse_req *fuse_request_alloc_nofs(unsigned npages); +bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req, + gfp_t flags); + + /** * Free a request */ @@ -856,9 +933,7 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args); * Send a request in the background */ void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req); - -void fuse_request_send_background_locked(struct fuse_conn *fc, - struct fuse_req *req); +bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req); /* Abort all requests */ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort); @@ -873,6 +948,9 @@ void fuse_invalidate_entry_cache(struct dentry *entry); void fuse_invalidate_atime(struct inode *inode); +u64 entry_attr_timeout(struct fuse_entry_out *o); +void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o); + /** * Acquire reference to fuse_conn */ @@ -992,4 +1070,8 @@ struct posix_acl; struct posix_acl *fuse_get_acl(struct inode *inode, int type); int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type); + +/* readdir.c */ +int fuse_readdir(struct file *file, struct dir_context *ctx); + #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index db9e60b7eb69..0b94b23b02d4 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -90,16 +90,12 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) fi = get_fuse_inode(inode); fi->i_time = 0; + fi->inval_mask = 0; fi->nodeid = 0; fi->nlookup = 0; fi->attr_version = 0; - fi->writectr = 0; fi->orig_ino = 0; fi->state = 0; - INIT_LIST_HEAD(&fi->write_files); - INIT_LIST_HEAD(&fi->queued_writes); - INIT_LIST_HEAD(&fi->writepages); - init_waitqueue_head(&fi->page_waitq); mutex_init(&fi->mutex); fi->forget = fuse_alloc_forget(); if (!fi->forget) { @@ -119,8 +115,10 @@ static void fuse_i_callback(struct rcu_head *head) static void fuse_destroy_inode(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); - BUG_ON(!list_empty(&fi->write_files)); - BUG_ON(!list_empty(&fi->queued_writes)); + if (S_ISREG(inode->i_mode)) { + WARN_ON(!list_empty(&fi->write_files)); + WARN_ON(!list_empty(&fi->queued_writes)); + } mutex_destroy(&fi->mutex); kfree(fi->forget); call_rcu(&inode->i_rcu, fuse_i_callback); @@ -167,6 +165,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, fi->attr_version = ++fc->attr_version; fi->i_time = attr_valid; + WRITE_ONCE(fi->inval_mask, 0); inode->i_ino = fuse_squash_ino(attr->ino); inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); @@ -594,9 +593,11 @@ static void fuse_iqueue_init(struct fuse_iqueue *fiq) static void fuse_pqueue_init(struct fuse_pqueue *fpq) { - memset(fpq, 0, sizeof(struct fuse_pqueue)); + unsigned int i; + spin_lock_init(&fpq->lock); - INIT_LIST_HEAD(&fpq->processing); + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) + INIT_LIST_HEAD(&fpq->processing[i]); INIT_LIST_HEAD(&fpq->io); fpq->connected = 1; } @@ -605,6 +606,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns) { memset(fc, 0, sizeof(*fc)); spin_lock_init(&fc->lock); + spin_lock_init(&fc->bg_lock); init_rwsem(&fc->killsb); refcount_set(&fc->count, 1); atomic_set(&fc->dev_count, 1); @@ -852,6 +854,7 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg) sanitize_global_limit(&max_user_bgreq); sanitize_global_limit(&max_user_congthresh); + spin_lock(&fc->bg_lock); if (arg->max_background) { fc->max_background = arg->max_background; @@ -865,6 +868,7 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg) fc->congestion_threshold > max_user_congthresh) fc->congestion_threshold = max_user_congthresh; } + spin_unlock(&fc->bg_lock); } static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) @@ -924,8 +928,15 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) fc->posix_acl = 1; fc->sb->s_xattr = fuse_acl_xattr_handlers; } + if (arg->flags & FUSE_CACHE_SYMLINKS) + fc->cache_symlinks = 1; if (arg->flags & FUSE_ABORT_ERROR) fc->abort_err = 1; + if (arg->flags & FUSE_MAX_PAGES) { + fc->max_pages = + min_t(unsigned int, FUSE_MAX_MAX_PAGES, + max_t(unsigned int, arg->max_pages, 1)); + } } else { ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; @@ -957,7 +968,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT | FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL | - FUSE_ABORT_ERROR; + FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS; req->in.h.opcode = FUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); @@ -1022,17 +1033,26 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc) { struct fuse_dev *fud; + struct list_head *pq; fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL); - if (fud) { - fud->fc = fuse_conn_get(fc); - fuse_pqueue_init(&fud->pq); + if (!fud) + return NULL; - spin_lock(&fc->lock); - list_add_tail(&fud->entry, &fc->devices); - spin_unlock(&fc->lock); + pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL); + if (!pq) { + kfree(fud); + return NULL; } + fud->pq.processing = pq; + fud->fc = fuse_conn_get(fc); + fuse_pqueue_init(&fud->pq); + + spin_lock(&fc->lock); + list_add_tail(&fud->entry, &fc->devices); + spin_unlock(&fc->lock); + return fud; } EXPORT_SYMBOL_GPL(fuse_dev_alloc); @@ -1141,6 +1161,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) fc->user_id = d.user_id; fc->group_id = d.group_id; fc->max_read = max_t(unsigned, 4096, d.max_read); + fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; /* Used by get_root_inode() */ sb->s_fs_info = fc; diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c new file mode 100644 index 000000000000..ab18b78f4755 --- /dev/null +++ b/fs/fuse/readdir.c @@ -0,0 +1,569 @@ +/* + FUSE: Filesystem in Userspace + Copyright (C) 2001-2018 Miklos Szeredi <miklos@szeredi.hu> + + This program can be distributed under the terms of the GNU GPL. + See the file COPYING. +*/ + + +#include "fuse_i.h" +#include <linux/iversion.h> +#include <linux/posix_acl.h> +#include <linux/pagemap.h> +#include <linux/highmem.h> + +static bool fuse_use_readdirplus(struct inode *dir, struct dir_context *ctx) +{ + struct fuse_conn *fc = get_fuse_conn(dir); + struct fuse_inode *fi = get_fuse_inode(dir); + + if (!fc->do_readdirplus) + return false; + if (!fc->readdirplus_auto) + return true; + if (test_and_clear_bit(FUSE_I_ADVISE_RDPLUS, &fi->state)) + return true; + if (ctx->pos == 0) + return true; + return false; +} + +static void fuse_add_dirent_to_cache(struct file *file, + struct fuse_dirent *dirent, loff_t pos) +{ + struct fuse_inode *fi = get_fuse_inode(file_inode(file)); + size_t reclen = FUSE_DIRENT_SIZE(dirent); + pgoff_t index; + struct page *page; + loff_t size; + u64 version; + unsigned int offset; + void *addr; + + spin_lock(&fi->rdc.lock); + /* + * Is cache already completed? Or this entry does not go at the end of + * cache? + */ + if (fi->rdc.cached || pos != fi->rdc.pos) { + spin_unlock(&fi->rdc.lock); + return; + } + version = fi->rdc.version; + size = fi->rdc.size; + offset = size & ~PAGE_MASK; + index = size >> PAGE_SHIFT; + /* Dirent doesn't fit in current page? Jump to next page. */ + if (offset + reclen > PAGE_SIZE) { + index++; + offset = 0; + } + spin_unlock(&fi->rdc.lock); + + if (offset) { + page = find_lock_page(file->f_mapping, index); + } else { + page = find_or_create_page(file->f_mapping, index, + mapping_gfp_mask(file->f_mapping)); + } + if (!page) + return; + + spin_lock(&fi->rdc.lock); + /* Raced with another readdir */ + if (fi->rdc.version != version || fi->rdc.size != size || + WARN_ON(fi->rdc.pos != pos)) + goto unlock; + + addr = kmap_atomic(page); + if (!offset) + clear_page(addr); + memcpy(addr + offset, dirent, reclen); + kunmap_atomic(addr); + fi->rdc.size = (index << PAGE_SHIFT) + offset + reclen; + fi->rdc.pos = dirent->off; +unlock: + spin_unlock(&fi->rdc.lock); + unlock_page(page); + put_page(page); +} + +static void fuse_readdir_cache_end(struct file *file, loff_t pos) +{ + struct fuse_inode *fi = get_fuse_inode(file_inode(file)); + loff_t end; + + spin_lock(&fi->rdc.lock); + /* does cache end position match current position? */ + if (fi->rdc.pos != pos) { + spin_unlock(&fi->rdc.lock); + return; + } + + fi->rdc.cached = true; + end = ALIGN(fi->rdc.size, PAGE_SIZE); + spin_unlock(&fi->rdc.lock); + + /* truncate unused tail of cache */ + truncate_inode_pages(file->f_mapping, end); +} + +static bool fuse_emit(struct file *file, struct dir_context *ctx, + struct fuse_dirent *dirent) +{ + struct fuse_file *ff = file->private_data; + + if (ff->open_flags & FOPEN_CACHE_DIR) + fuse_add_dirent_to_cache(file, dirent, ctx->pos); + + return dir_emit(ctx, dirent->name, dirent->namelen, dirent->ino, + dirent->type); +} + +static int parse_dirfile(char *buf, size_t nbytes, struct file *file, + struct dir_context *ctx) +{ + while (nbytes >= FUSE_NAME_OFFSET) { + struct fuse_dirent *dirent = (struct fuse_dirent *) buf; + size_t reclen = FUSE_DIRENT_SIZE(dirent); + if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX) + return -EIO; + if (reclen > nbytes) + break; + if (memchr(dirent->name, '/', dirent->namelen) != NULL) + return -EIO; + + if (!fuse_emit(file, ctx, dirent)) + break; + + buf += reclen; + nbytes -= reclen; + ctx->pos = dirent->off; + } + + return 0; +} + +static int fuse_direntplus_link(struct file *file, + struct fuse_direntplus *direntplus, + u64 attr_version) +{ + struct fuse_entry_out *o = &direntplus->entry_out; + struct fuse_dirent *dirent = &direntplus->dirent; + struct dentry *parent = file->f_path.dentry; + struct qstr name = QSTR_INIT(dirent->name, dirent->namelen); + struct dentry *dentry; + struct dentry *alias; + struct inode *dir = d_inode(parent); + struct fuse_conn *fc; + struct inode *inode; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + + if (!o->nodeid) { + /* + * Unlike in the case of fuse_lookup, zero nodeid does not mean + * ENOENT. Instead, it only means the userspace filesystem did + * not want to return attributes/handle for this entry. + * + * So do nothing. + */ + return 0; + } + + if (name.name[0] == '.') { + /* + * We could potentially refresh the attributes of the directory + * and its parent? + */ + if (name.len == 1) + return 0; + if (name.name[1] == '.' && name.len == 2) + return 0; + } + + if (invalid_nodeid(o->nodeid)) + return -EIO; + if (!fuse_valid_type(o->attr.mode)) + return -EIO; + + fc = get_fuse_conn(dir); + + name.hash = full_name_hash(parent, name.name, name.len); + dentry = d_lookup(parent, &name); + if (!dentry) { +retry: + dentry = d_alloc_parallel(parent, &name, &wq); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + } + if (!d_in_lookup(dentry)) { + struct fuse_inode *fi; + inode = d_inode(dentry); + if (!inode || + get_node_id(inode) != o->nodeid || + ((o->attr.mode ^ inode->i_mode) & S_IFMT)) { + d_invalidate(dentry); + dput(dentry); + goto retry; + } + if (is_bad_inode(inode)) { + dput(dentry); + return -EIO; + } + + fi = get_fuse_inode(inode); + spin_lock(&fc->lock); + fi->nlookup++; + spin_unlock(&fc->lock); + + forget_all_cached_acls(inode); + fuse_change_attributes(inode, &o->attr, + entry_attr_timeout(o), + attr_version); + /* + * The other branch comes via fuse_iget() + * which bumps nlookup inside + */ + } else { + inode = fuse_iget(dir->i_sb, o->nodeid, o->generation, + &o->attr, entry_attr_timeout(o), + attr_version); + if (!inode) + inode = ERR_PTR(-ENOMEM); + + alias = d_splice_alias(inode, dentry); + d_lookup_done(dentry); + if (alias) { + dput(dentry); + dentry = alias; + } + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + } + if (fc->readdirplus_auto) + set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state); + fuse_change_entry_timeout(dentry, o); + + dput(dentry); + return 0; +} + +static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file, + struct dir_context *ctx, u64 attr_version) +{ + struct fuse_direntplus *direntplus; + struct fuse_dirent *dirent; + size_t reclen; + int over = 0; + int ret; + + while (nbytes >= FUSE_NAME_OFFSET_DIRENTPLUS) { + direntplus = (struct fuse_direntplus *) buf; + dirent = &direntplus->dirent; + reclen = FUSE_DIRENTPLUS_SIZE(direntplus); + + if (!dirent->namelen || dirent->namelen > FUSE_NAME_MAX) + return -EIO; + if (reclen > nbytes) + break; + if (memchr(dirent->name, '/', dirent->namelen) != NULL) + return -EIO; + + if (!over) { + /* We fill entries into dstbuf only as much as + it can hold. But we still continue iterating + over remaining entries to link them. If not, + we need to send a FORGET for each of those + which we did not link. + */ + over = !fuse_emit(file, ctx, dirent); + if (!over) + ctx->pos = dirent->off; + } + + buf += reclen; + nbytes -= reclen; + + ret = fuse_direntplus_link(file, direntplus, attr_version); + if (ret) + fuse_force_forget(file, direntplus->entry_out.nodeid); + } + + return 0; +} + +static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx) +{ + int plus, err; + size_t nbytes; + struct page *page; + struct inode *inode = file_inode(file); + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_req *req; + u64 attr_version = 0; + bool locked; + + req = fuse_get_req(fc, 1); + if (IS_ERR(req)) + return PTR_ERR(req); + + page = alloc_page(GFP_KERNEL); + if (!page) { + fuse_put_request(fc, req); + return -ENOMEM; + } + + plus = fuse_use_readdirplus(inode, ctx); + req->out.argpages = 1; + req->num_pages = 1; + req->pages[0] = page; + req->page_descs[0].length = PAGE_SIZE; + if (plus) { + attr_version = fuse_get_attr_version(fc); + fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, + FUSE_READDIRPLUS); + } else { + fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, + FUSE_READDIR); + } + locked = fuse_lock_inode(inode); + fuse_request_send(fc, req); + fuse_unlock_inode(inode, locked); + nbytes = req->out.args[0].size; + err = req->out.h.error; + fuse_put_request(fc, req); + if (!err) { + if (!nbytes) { + struct fuse_file *ff = file->private_data; + + if (ff->open_flags & FOPEN_CACHE_DIR) + fuse_readdir_cache_end(file, ctx->pos); + } else if (plus) { + err = parse_dirplusfile(page_address(page), nbytes, + file, ctx, attr_version); + } else { + err = parse_dirfile(page_address(page), nbytes, file, + ctx); + } + } + + __free_page(page); + fuse_invalidate_atime(inode); + return err; +} + +enum fuse_parse_result { + FOUND_ERR = -1, + FOUND_NONE = 0, + FOUND_SOME, + FOUND_ALL, +}; + +static enum fuse_parse_result fuse_parse_cache(struct fuse_file *ff, + void *addr, unsigned int size, + struct dir_context *ctx) +{ + unsigned int offset = ff->readdir.cache_off & ~PAGE_MASK; + enum fuse_parse_result res = FOUND_NONE; + + WARN_ON(offset >= size); + + for (;;) { + struct fuse_dirent *dirent = addr + offset; + unsigned int nbytes = size - offset; + size_t reclen = FUSE_DIRENT_SIZE(dirent); + + if (nbytes < FUSE_NAME_OFFSET || !dirent->namelen) + break; + + if (WARN_ON(dirent->namelen > FUSE_NAME_MAX)) + return FOUND_ERR; + if (WARN_ON(reclen > nbytes)) + return FOUND_ERR; + if (WARN_ON(memchr(dirent->name, '/', dirent->namelen) != NULL)) + return FOUND_ERR; + + if (ff->readdir.pos == ctx->pos) { + res = FOUND_SOME; + if (!dir_emit(ctx, dirent->name, dirent->namelen, + dirent->ino, dirent->type)) + return FOUND_ALL; + ctx->pos = dirent->off; + } + ff->readdir.pos = dirent->off; + ff->readdir.cache_off += reclen; + + offset += reclen; + } + + return res; +} + +static void fuse_rdc_reset(struct inode *inode) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + + fi->rdc.cached = false; + fi->rdc.version++; + fi->rdc.size = 0; + fi->rdc.pos = 0; +} + +#define UNCACHED 1 + +static int fuse_readdir_cached(struct file *file, struct dir_context *ctx) +{ + struct fuse_file *ff = file->private_data; + struct inode *inode = file_inode(file); + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_inode *fi = get_fuse_inode(inode); + enum fuse_parse_result res; + pgoff_t index; + unsigned int size; + struct page *page; + void *addr; + + /* Seeked? If so, reset the cache stream */ + if (ff->readdir.pos != ctx->pos) { + ff->readdir.pos = 0; + ff->readdir.cache_off = 0; + } + + /* + * We're just about to start reading into the cache or reading the + * cache; both cases require an up-to-date mtime value. + */ + if (!ctx->pos && fc->auto_inval_data) { + int err = fuse_update_attributes(inode, file); + + if (err) + return err; + } + +retry: + spin_lock(&fi->rdc.lock); +retry_locked: + if (!fi->rdc.cached) { + /* Starting cache? Set cache mtime. */ + if (!ctx->pos && !fi->rdc.size) { + fi->rdc.mtime = inode->i_mtime; + fi->rdc.iversion = inode_query_iversion(inode); + } + spin_unlock(&fi->rdc.lock); + return UNCACHED; + } + /* + * When at the beginning of the directory (i.e. just after opendir(3) or + * rewinddir(3)), then need to check whether directory contents have + * changed, and reset the cache if so. + */ + if (!ctx->pos) { + if (inode_peek_iversion(inode) != fi->rdc.iversion || + !timespec64_equal(&fi->rdc.mtime, &inode->i_mtime)) { + fuse_rdc_reset(inode); + goto retry_locked; + } + } + + /* + * If cache version changed since the last getdents() call, then reset + * the cache stream. + */ + if (ff->readdir.version != fi->rdc.version) { + ff->readdir.pos = 0; + ff->readdir.cache_off = 0; + } + /* + * If at the beginning of the cache, than reset version to + * current. + */ + if (ff->readdir.pos == 0) + ff->readdir.version = fi->rdc.version; + + WARN_ON(fi->rdc.size < ff->readdir.cache_off); + + index = ff->readdir.cache_off >> PAGE_SHIFT; + + if (index == (fi->rdc.size >> PAGE_SHIFT)) + size = fi->rdc.size & ~PAGE_MASK; + else + size = PAGE_SIZE; + spin_unlock(&fi->rdc.lock); + + /* EOF? */ + if ((ff->readdir.cache_off & ~PAGE_MASK) == size) + return 0; + + page = find_get_page_flags(file->f_mapping, index, + FGP_ACCESSED | FGP_LOCK); + spin_lock(&fi->rdc.lock); + if (!page) { + /* + * Uh-oh: page gone missing, cache is useless + */ + if (fi->rdc.version == ff->readdir.version) + fuse_rdc_reset(inode); + goto retry_locked; + } + + /* Make sure it's still the same version after getting the page. */ + if (ff->readdir.version != fi->rdc.version) { + spin_unlock(&fi->rdc.lock); + unlock_page(page); + put_page(page); + goto retry; + } + spin_unlock(&fi->rdc.lock); + + /* + * Contents of the page are now protected against changing by holding + * the page lock. + */ + addr = kmap(page); + res = fuse_parse_cache(ff, addr, size, ctx); + kunmap(page); + unlock_page(page); + put_page(page); + + if (res == FOUND_ERR) + return -EIO; + + if (res == FOUND_ALL) + return 0; + + if (size == PAGE_SIZE) { + /* We hit end of page: skip to next page. */ + ff->readdir.cache_off = ALIGN(ff->readdir.cache_off, PAGE_SIZE); + goto retry; + } + + /* + * End of cache reached. If found position, then we are done, otherwise + * need to fall back to uncached, since the position we were looking for + * wasn't in the cache. + */ + return res == FOUND_SOME ? 0 : UNCACHED; +} + +int fuse_readdir(struct file *file, struct dir_context *ctx) +{ + struct fuse_file *ff = file->private_data; + struct inode *inode = file_inode(file); + int err; + + if (is_bad_inode(inode)) + return -EIO; + + mutex_lock(&ff->readdir.lock); + + err = UNCACHED; + if (ff->open_flags & FOPEN_CACHE_DIR) + err = fuse_readdir_cached(file, ctx); + if (err == UNCACHED) + err = fuse_readdir_uncached(file, ctx); + + mutex_unlock(&ff->readdir.lock); + + return err; +} diff --git a/fs/ioctl.c b/fs/ioctl.c index 2005529af560..d64f622cac8b 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -223,6 +223,7 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd, u64 off, u64 olen, u64 destoff) { struct fd src_file = fdget(srcfd); + loff_t cloned; int ret; if (!src_file.file) @@ -230,7 +231,14 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd, ret = -EXDEV; if (src_file.file->f_path.mnt != dst_file->f_path.mnt) goto fdput; - ret = vfs_clone_file_range(src_file.file, off, dst_file, destoff, olen); + cloned = vfs_clone_file_range(src_file.file, off, dst_file, destoff, + olen, 0); + if (cloned < 0) + ret = cloned; + else if (olen && cloned != olen) + ret = -EINVAL; + else + ret = 0; fdput: fdput(src_file); return ret; @@ -669,6 +677,9 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, return ioctl_fiemap(filp, arg); case FIGETBSZ: + /* anon_bdev filesystems may not have a block size */ + if (!inode->i_sb->s_blocksize) + return -EINVAL; return put_user(inode->i_sb->s_blocksize, argp); case FICLONE: diff --git a/fs/iomap.c b/fs/iomap.c index 90c2febc93ac..64ce240217a1 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -30,7 +30,6 @@ #include <linux/task_io_accounting_ops.h> #include <linux/dax.h> #include <linux/sched/signal.h> -#include <linux/swap.h> #include "internal.h" @@ -1795,7 +1794,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, if (pos >= dio->i_size) goto out_free_dio; - if (iter->type == ITER_IOVEC) + if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ) dio->flags |= IOMAP_DIO_DIRTY; } else { flags |= IOMAP_WRITE; diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 4288a6ecaf75..46d691ba04bc 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -180,8 +180,9 @@ static long nfs42_fallocate(struct file *filep, int mode, loff_t offset, loff_t return nfs42_proc_allocate(filep, offset, len); } -static int nfs42_clone_file_range(struct file *src_file, loff_t src_off, - struct file *dst_file, loff_t dst_off, u64 count) +static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off, + struct file *dst_file, loff_t dst_off, loff_t count, + unsigned int remap_flags) { struct inode *dst_inode = file_inode(dst_file); struct nfs_server *server = NFS_SERVER(dst_inode); @@ -190,6 +191,9 @@ static int nfs42_clone_file_range(struct file *src_file, loff_t src_off, bool same_inode = false; int ret; + if (remap_flags & ~REMAP_FILE_ADVISORY) + return -EINVAL; + /* check alignment w.r.t. clone_blksize */ ret = -EINVAL; if (bs) { @@ -240,7 +244,7 @@ out_unlock: inode_unlock(src_inode); } out: - return ret; + return ret < 0 ? ret : count; } #endif /* CONFIG_NFS_V4_2 */ @@ -262,7 +266,7 @@ const struct file_operations nfs4_file_operations = { .copy_file_range = nfs4_copy_file_range, .llseek = nfs4_file_llseek, .fallocate = nfs42_fallocate, - .clone_file_range = nfs42_clone_file_range, + .remap_file_range = nfs42_remap_file_range, #else .llseek = nfs_file_llseek, #endif diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index db84b4adbc49..867457d6dfbe 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3788,7 +3788,7 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, } /* - * -EACCESS could mean that the user doesn't have correct permissions + * -EACCES could mean that the user doesn't have correct permissions * to access the mount. It could also mean that we tried to mount * with a gss auth flavor, but rpc.gssd isn't running. Either way, * existing mount programs don't handle -EACCES very well so it should diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 2751976704e9..eb67098117b4 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -541,8 +541,12 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp, __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst, u64 dst_pos, u64 count) { - return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos, - count)); + loff_t cloned; + + cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); + if (count && cloned != count) + cloned = -EINVAL; + return nfserrno(cloned < 0 ? cloned : 0); } ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, @@ -923,7 +927,7 @@ __be32 nfsd_readv(struct svc_rqst *rqstp, struct svc_fh *fhp, int host_err; trace_nfsd_read_vector(rqstp, fhp, offset, *count); - iov_iter_kvec(&iter, READ | ITER_KVEC, vec, vlen, *count); + iov_iter_kvec(&iter, READ, vec, vlen, *count); host_err = vfs_iter_read(file, &iter, &offset, 0); return nfsd_finish_read(rqstp, fhp, file, offset, count, host_err); } @@ -999,7 +1003,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, if (stable && !use_wgather) flags |= RWF_SYNC; - iov_iter_kvec(&iter, WRITE | ITER_KVEC, vec, vlen, *cnt); + iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt); host_err = vfs_iter_write(file, &iter, &pos, flags); if (host_err < 0) goto out_nfserr; diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c index 4690cd75d8d7..3986c7a1f6a8 100644 --- a/fs/ntfs/namei.c +++ b/fs/ntfs/namei.c @@ -312,7 +312,7 @@ static struct dentry *ntfs_get_parent(struct dentry *child_dent) /* Get the mft record of the inode belonging to the child dentry. */ mrec = map_mft_record(ni); if (IS_ERR(mrec)) - return (struct dentry *)mrec; + return ERR_CAST(mrec); /* Find the first file name attribute in the mft record. */ ctx = ntfs_attr_get_search_ctx(ni, mrec); if (unlikely(!ctx)) { diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 1d098c3c00e0..4ebbd57cbf84 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -99,25 +99,34 @@ out: return ret; } +/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it + * will be easier to handle read failure. + */ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, unsigned int nr, struct buffer_head *bhs[]) { int status = 0; unsigned int i; struct buffer_head *bh; + int new_bh = 0; trace_ocfs2_read_blocks_sync((unsigned long long)block, nr); if (!nr) goto bail; + /* Don't put buffer head and re-assign it to NULL if it is allocated + * outside since the caller can't be aware of this alternation! + */ + new_bh = (bhs[0] == NULL); + for (i = 0 ; i < nr ; i++) { if (bhs[i] == NULL) { bhs[i] = sb_getblk(osb->sb, block++); if (bhs[i] == NULL) { status = -ENOMEM; mlog_errno(status); - goto bail; + break; } } bh = bhs[i]; @@ -158,9 +167,26 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, submit_bh(REQ_OP_READ, 0, bh); } +read_failure: for (i = nr; i > 0; i--) { bh = bhs[i - 1]; + if (unlikely(status)) { + if (new_bh && bh) { + /* If middle bh fails, let previous bh + * finish its read and then put it to + * aovoid bh leak + */ + if (!buffer_jbd(bh)) + wait_on_buffer(bh); + put_bh(bh); + bhs[i - 1] = NULL; + } else if (bh && buffer_uptodate(bh)) { + clear_buffer_uptodate(bh); + } + continue; + } + /* No need to wait on the buffer if it's managed by JBD. */ if (!buffer_jbd(bh)) wait_on_buffer(bh); @@ -170,8 +196,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, * so we can safely record this and loop back * to cleanup the other buffers. */ status = -EIO; - put_bh(bh); - bhs[i - 1] = NULL; + goto read_failure; } } @@ -179,6 +204,9 @@ bail: return status; } +/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it + * will be easier to handle read failure. + */ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, struct buffer_head *bhs[], int flags, int (*validate)(struct super_block *sb, @@ -188,6 +216,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, int i, ignore_cache = 0; struct buffer_head *bh; struct super_block *sb = ocfs2_metadata_cache_get_super(ci); + int new_bh = 0; trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags); @@ -213,6 +242,11 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, goto bail; } + /* Don't put buffer head and re-assign it to NULL if it is allocated + * outside since the caller can't be aware of this alternation! + */ + new_bh = (bhs[0] == NULL); + ocfs2_metadata_cache_io_lock(ci); for (i = 0 ; i < nr ; i++) { if (bhs[i] == NULL) { @@ -221,7 +255,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, ocfs2_metadata_cache_io_unlock(ci); status = -ENOMEM; mlog_errno(status); - goto bail; + /* Don't forget to put previous bh! */ + break; } } bh = bhs[i]; @@ -316,16 +351,27 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, } } - status = 0; - +read_failure: for (i = (nr - 1); i >= 0; i--) { bh = bhs[i]; if (!(flags & OCFS2_BH_READAHEAD)) { - if (status) { - /* Clear the rest of the buffers on error */ - put_bh(bh); - bhs[i] = NULL; + if (unlikely(status)) { + /* Clear the buffers on error including those + * ever succeeded in reading + */ + if (new_bh && bh) { + /* If middle bh fails, let previous bh + * finish its read and then put it to + * aovoid bh leak + */ + if (!buffer_jbd(bh)) + wait_on_buffer(bh); + put_bh(bh); + bhs[i] = NULL; + } else if (bh && buffer_uptodate(bh)) { + clear_buffer_uptodate(bh); + } continue; } /* We know this can't have changed as we hold the @@ -343,9 +389,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, * uptodate. */ status = -EIO; clear_buffer_needs_validate(bh); - put_bh(bh); - bhs[i] = NULL; - continue; + goto read_failure; } if (buffer_needs_validate(bh)) { @@ -355,11 +399,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, BUG_ON(buffer_jbd(bh)); clear_buffer_needs_validate(bh); status = validate(sb, bh); - if (status) { - put_bh(bh); - bhs[i] = NULL; - continue; - } + if (status) + goto read_failure; } } diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 7d9eea7d4a87..e9f236af1927 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -916,7 +916,7 @@ static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len) { struct kvec vec = { .iov_len = len, .iov_base = data, }; struct msghdr msg = { .msg_flags = MSG_DONTWAIT, }; - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, len); + iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, len); return sock_recvmsg(sock, &msg, MSG_DONTWAIT); } diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index b048d4fa3959..c121abbdfc7d 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -1897,8 +1897,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode, /* On error, skip the f_pos to the next block. */ ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1; - brelse(bh); - continue; + break; } if (le64_to_cpu(de->inode)) { unsigned char d_type = DT_UNKNOWN; diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 933aac5da193..7c835824247e 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2123,10 +2123,10 @@ static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb, /* LVB only has room for 64 bits of time here so we pack it for * now. */ -static u64 ocfs2_pack_timespec(struct timespec *spec) +static u64 ocfs2_pack_timespec(struct timespec64 *spec) { u64 res; - u64 sec = spec->tv_sec; + u64 sec = clamp_t(time64_t, spec->tv_sec, 0, 0x3ffffffffull); u32 nsec = spec->tv_nsec; res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK); @@ -2142,7 +2142,6 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode) struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; struct ocfs2_meta_lvb *lvb; - struct timespec ts; lvb = ocfs2_dlm_lvb(&lockres->l_lksb); @@ -2163,15 +2162,12 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode) lvb->lvb_igid = cpu_to_be32(i_gid_read(inode)); lvb->lvb_imode = cpu_to_be16(inode->i_mode); lvb->lvb_inlink = cpu_to_be16(inode->i_nlink); - ts = timespec64_to_timespec(inode->i_atime); lvb->lvb_iatime_packed = - cpu_to_be64(ocfs2_pack_timespec(&ts)); - ts = timespec64_to_timespec(inode->i_ctime); + cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime)); lvb->lvb_ictime_packed = - cpu_to_be64(ocfs2_pack_timespec(&ts)); - ts = timespec64_to_timespec(inode->i_mtime); + cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime)); lvb->lvb_imtime_packed = - cpu_to_be64(ocfs2_pack_timespec(&ts)); + cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime)); lvb->lvb_iattr = cpu_to_be32(oi->ip_attr); lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features); lvb->lvb_igeneration = cpu_to_be32(inode->i_generation); @@ -2180,7 +2176,7 @@ out: mlog_meta_lvb(0, lockres); } -static void ocfs2_unpack_timespec(struct timespec *spec, +static void ocfs2_unpack_timespec(struct timespec64 *spec, u64 packed_time) { spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT; @@ -2189,7 +2185,6 @@ static void ocfs2_unpack_timespec(struct timespec *spec, static void ocfs2_refresh_inode_from_lvb(struct inode *inode) { - struct timespec ts; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; struct ocfs2_meta_lvb *lvb; @@ -2217,15 +2212,12 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode) i_gid_write(inode, be32_to_cpu(lvb->lvb_igid)); inode->i_mode = be16_to_cpu(lvb->lvb_imode); set_nlink(inode, be16_to_cpu(lvb->lvb_inlink)); - ocfs2_unpack_timespec(&ts, + ocfs2_unpack_timespec(&inode->i_atime, be64_to_cpu(lvb->lvb_iatime_packed)); - inode->i_atime = timespec_to_timespec64(ts); - ocfs2_unpack_timespec(&ts, + ocfs2_unpack_timespec(&inode->i_mtime, be64_to_cpu(lvb->lvb_imtime_packed)); - inode->i_mtime = timespec_to_timespec64(ts); - ocfs2_unpack_timespec(&ts, + ocfs2_unpack_timespec(&inode->i_ctime, be64_to_cpu(lvb->lvb_ictime_packed)); - inode->i_ctime = timespec_to_timespec64(ts); spin_unlock(&oi->ip_lock); } @@ -3603,7 +3595,7 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, * we can recover correctly from node failure. Otherwise, we may get * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set. */ - if (!ocfs2_is_o2cb_active() && + if (ocfs2_userspace_stack(osb) && lockres->l_ops->flags & LOCK_TYPE_USES_LVB) lvb = 1; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 9fa35cb6f6e0..d640c5f8a85d 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2343,7 +2343,7 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, written = __generic_file_write_iter(iocb, from); /* buffered aio wouldn't have proper lock coverage today */ - BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT)); + BUG_ON(written == -EIOCBQUEUED && !direct_io); /* * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io @@ -2463,7 +2463,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb, trace_generic_file_read_iter_ret(ret); /* buffered aio wouldn't have proper lock coverage today */ - BUG_ON(ret == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT)); + BUG_ON(ret == -EIOCBQUEUED && !direct_io); /* see ocfs2_file_write_iter */ if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) { @@ -2527,24 +2527,79 @@ out: return offset; } -static int ocfs2_file_clone_range(struct file *file_in, - loff_t pos_in, - struct file *file_out, - loff_t pos_out, - u64 len) +static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags) { - return ocfs2_reflink_remap_range(file_in, pos_in, file_out, pos_out, - len, false); -} + struct inode *inode_in = file_inode(file_in); + struct inode *inode_out = file_inode(file_out); + struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb); + struct buffer_head *in_bh = NULL, *out_bh = NULL; + bool same_inode = (inode_in == inode_out); + loff_t remapped = 0; + ssize_t ret; -static int ocfs2_file_dedupe_range(struct file *file_in, - loff_t pos_in, - struct file *file_out, - loff_t pos_out, - u64 len) -{ - return ocfs2_reflink_remap_range(file_in, pos_in, file_out, pos_out, - len, true); + if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) + return -EINVAL; + if (!ocfs2_refcount_tree(osb)) + return -EOPNOTSUPP; + if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + return -EROFS; + + /* Lock both files against IO */ + ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh); + if (ret) + return ret; + + /* Check file eligibility and prepare for block sharing. */ + ret = -EINVAL; + if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) || + (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE)) + goto out_unlock; + + ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, + &len, remap_flags); + if (ret < 0 || len == 0) + goto out_unlock; + + /* Lock out changes to the allocation maps and remap. */ + down_write(&OCFS2_I(inode_in)->ip_alloc_sem); + if (!same_inode) + down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem, + SINGLE_DEPTH_NESTING); + + /* Zap any page cache for the destination file's range. */ + truncate_inode_pages_range(&inode_out->i_data, + round_down(pos_out, PAGE_SIZE), + round_up(pos_out + len, PAGE_SIZE) - 1); + + remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in, + inode_out, out_bh, pos_out, len); + up_write(&OCFS2_I(inode_in)->ip_alloc_sem); + if (!same_inode) + up_write(&OCFS2_I(inode_out)->ip_alloc_sem); + if (remapped < 0) { + ret = remapped; + mlog_errno(ret); + goto out_unlock; + } + + /* + * Empty the extent map so that we may get the right extent + * record from the disk. + */ + ocfs2_extent_map_trunc(inode_in, 0); + ocfs2_extent_map_trunc(inode_out, 0); + + ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len); + if (ret) { + mlog_errno(ret); + goto out_unlock; + } + +out_unlock: + ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh); + return remapped > 0 ? remapped : ret; } const struct inode_operations ocfs2_file_iops = { @@ -2586,8 +2641,7 @@ const struct file_operations ocfs2_fops = { .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, .fallocate = ocfs2_fallocate, - .clone_file_range = ocfs2_file_clone_range, - .dedupe_file_range = ocfs2_file_dedupe_range, + .remap_file_range = ocfs2_remap_file_range, }; const struct file_operations ocfs2_dops = { @@ -2633,8 +2687,7 @@ const struct file_operations ocfs2_fops_no_plocks = { .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, .fallocate = ocfs2_fallocate, - .clone_file_range = ocfs2_file_clone_range, - .dedupe_file_range = ocfs2_file_dedupe_range, + .remap_file_range = ocfs2_remap_file_range, }; const struct file_operations ocfs2_dops_no_plocks = { diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index bd3475694e83..b63c97f4318e 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1378,15 +1378,23 @@ static int __ocfs2_recovery_thread(void *arg) int rm_quota_used = 0, i; struct ocfs2_quota_recovery *qrec; + /* Whether the quota supported. */ + int quota_enabled = OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, + OCFS2_FEATURE_RO_COMPAT_USRQUOTA) + || OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, + OCFS2_FEATURE_RO_COMPAT_GRPQUOTA); + status = ocfs2_wait_on_mount(osb); if (status < 0) { goto bail; } - rm_quota = kcalloc(osb->max_slots, sizeof(int), GFP_NOFS); - if (!rm_quota) { - status = -ENOMEM; - goto bail; + if (quota_enabled) { + rm_quota = kcalloc(osb->max_slots, sizeof(int), GFP_NOFS); + if (!rm_quota) { + status = -ENOMEM; + goto bail; + } } restart: status = ocfs2_super_lock(osb, 1); @@ -1422,9 +1430,14 @@ restart: * then quota usage would be out of sync until some node takes * the slot. So we remember which nodes need quota recovery * and when everything else is done, we recover quotas. */ - for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++); - if (i == rm_quota_used) - rm_quota[rm_quota_used++] = slot_num; + if (quota_enabled) { + for (i = 0; i < rm_quota_used + && rm_quota[i] != slot_num; i++) + ; + + if (i == rm_quota_used) + rm_quota[rm_quota_used++] = slot_num; + } status = ocfs2_recover_node(osb, node_num, slot_num); skip_recovery: @@ -1452,16 +1465,19 @@ skip_recovery: /* Now it is right time to recover quotas... We have to do this under * superblock lock so that no one can start using the slot (and crash) * before we recover it */ - for (i = 0; i < rm_quota_used; i++) { - qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]); - if (IS_ERR(qrec)) { - status = PTR_ERR(qrec); - mlog_errno(status); - continue; + if (quota_enabled) { + for (i = 0; i < rm_quota_used; i++) { + qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]); + if (IS_ERR(qrec)) { + status = PTR_ERR(qrec); + mlog_errno(status); + continue; + } + ocfs2_queue_recovery_completion(osb->journal, + rm_quota[i], + NULL, NULL, qrec, + ORPHAN_NEED_TRUNCATE); } - ocfs2_queue_recovery_completion(osb->journal, rm_quota[i], - NULL, NULL, qrec, - ORPHAN_NEED_TRUNCATE); } ocfs2_super_unlock(osb, 1); @@ -1483,7 +1499,8 @@ bail: mutex_unlock(&osb->recovery_lock); - kfree(rm_quota); + if (quota_enabled) + kfree(rm_quota); /* no one is callint kthread_stop() for us so the kthread() api * requires that we call do_exit(). And it isn't exported, but diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index 7eb3b0a6347e..3f1685d7d43b 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -25,6 +25,7 @@ #include "ocfs2_ioctl.h" #include "alloc.h" +#include "localalloc.h" #include "aops.h" #include "dlmglue.h" #include "extent_map.h" @@ -233,6 +234,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, struct ocfs2_refcount_tree *ref_tree = NULL; u32 new_phys_cpos, new_len; u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); + int need_free = 0; if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) { BUG_ON(!ocfs2_is_refcount_inode(inode)); @@ -308,6 +310,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, if (!partial) { context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE; ret = -ENOSPC; + need_free = 1; goto out_commit; } } @@ -332,6 +335,20 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, mlog_errno(ret); out_commit: + if (need_free && context->data_ac) { + struct ocfs2_alloc_context *data_ac = context->data_ac; + + if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL) + ocfs2_free_local_alloc_bits(osb, handle, data_ac, + new_phys_cpos, new_len); + else + ocfs2_free_clusters(handle, + data_ac->ac_inode, + data_ac->ac_bh, + ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos), + new_len); + } + ocfs2_commit_trans(osb, handle); out_unlock_mutex: diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 1114ef02e780..a35259eebc56 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -4466,9 +4466,9 @@ out: } /* Update destination inode size, if necessary. */ -static int ocfs2_reflink_update_dest(struct inode *dest, - struct buffer_head *d_bh, - loff_t newlen) +int ocfs2_reflink_update_dest(struct inode *dest, + struct buffer_head *d_bh, + loff_t newlen) { handle_t *handle; int ret; @@ -4505,14 +4505,14 @@ out_commit: } /* Remap the range pos_in:len in s_inode to pos_out:len in t_inode. */ -static int ocfs2_reflink_remap_extent(struct inode *s_inode, - struct buffer_head *s_bh, - loff_t pos_in, - struct inode *t_inode, - struct buffer_head *t_bh, - loff_t pos_out, - loff_t len, - struct ocfs2_cached_dealloc_ctxt *dealloc) +static loff_t ocfs2_reflink_remap_extent(struct inode *s_inode, + struct buffer_head *s_bh, + loff_t pos_in, + struct inode *t_inode, + struct buffer_head *t_bh, + loff_t pos_out, + loff_t len, + struct ocfs2_cached_dealloc_ctxt *dealloc) { struct ocfs2_extent_tree s_et; struct ocfs2_extent_tree t_et; @@ -4520,8 +4520,9 @@ static int ocfs2_reflink_remap_extent(struct inode *s_inode, struct buffer_head *ref_root_bh = NULL; struct ocfs2_refcount_tree *ref_tree; struct ocfs2_super *osb; + loff_t remapped_bytes = 0; loff_t pstart, plen; - u32 p_cluster, num_clusters, slast, spos, tpos; + u32 p_cluster, num_clusters, slast, spos, tpos, remapped_clus = 0; unsigned int ext_flags; int ret = 0; @@ -4603,30 +4604,34 @@ static int ocfs2_reflink_remap_extent(struct inode *s_inode, next_loop: spos += num_clusters; tpos += num_clusters; + remapped_clus += num_clusters; } -out: - return ret; + goto out; out_unlock_refcount: ocfs2_unlock_refcount_tree(osb, ref_tree, 1); brelse(ref_root_bh); - return ret; +out: + remapped_bytes = ocfs2_clusters_to_bytes(t_inode->i_sb, remapped_clus); + remapped_bytes = min_t(loff_t, len, remapped_bytes); + + return remapped_bytes > 0 ? remapped_bytes : ret; } /* Set up refcount tree and remap s_inode to t_inode. */ -static int ocfs2_reflink_remap_blocks(struct inode *s_inode, - struct buffer_head *s_bh, - loff_t pos_in, - struct inode *t_inode, - struct buffer_head *t_bh, - loff_t pos_out, - loff_t len) +loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode, + struct buffer_head *s_bh, + loff_t pos_in, + struct inode *t_inode, + struct buffer_head *t_bh, + loff_t pos_out, + loff_t len) { struct ocfs2_cached_dealloc_ctxt dealloc; struct ocfs2_super *osb; struct ocfs2_dinode *dis; struct ocfs2_dinode *dit; - int ret; + loff_t ret; osb = OCFS2_SB(s_inode->i_sb); dis = (struct ocfs2_dinode *)s_bh->b_data; @@ -4698,7 +4703,7 @@ static int ocfs2_reflink_remap_blocks(struct inode *s_inode, /* Actually remap extents now. */ ret = ocfs2_reflink_remap_extent(s_inode, s_bh, pos_in, t_inode, t_bh, pos_out, len, &dealloc); - if (ret) { + if (ret < 0) { mlog_errno(ret); goto out; } @@ -4713,10 +4718,10 @@ out: } /* Lock an inode and grab a bh pointing to the inode. */ -static int ocfs2_reflink_inodes_lock(struct inode *s_inode, - struct buffer_head **bh1, - struct inode *t_inode, - struct buffer_head **bh2) +int ocfs2_reflink_inodes_lock(struct inode *s_inode, + struct buffer_head **bh1, + struct inode *t_inode, + struct buffer_head **bh2) { struct inode *inode1; struct inode *inode2; @@ -4801,10 +4806,10 @@ out_i1: } /* Unlock both inodes and release buffers. */ -static void ocfs2_reflink_inodes_unlock(struct inode *s_inode, - struct buffer_head *s_bh, - struct inode *t_inode, - struct buffer_head *t_bh) +void ocfs2_reflink_inodes_unlock(struct inode *s_inode, + struct buffer_head *s_bh, + struct inode *t_inode, + struct buffer_head *t_bh) { ocfs2_inode_unlock(s_inode, 1); ocfs2_rw_unlock(s_inode, 1); @@ -4816,82 +4821,3 @@ static void ocfs2_reflink_inodes_unlock(struct inode *s_inode, } unlock_two_nondirectories(s_inode, t_inode); } - -/* Link a range of blocks from one file to another. */ -int ocfs2_reflink_remap_range(struct file *file_in, - loff_t pos_in, - struct file *file_out, - loff_t pos_out, - u64 len, - bool is_dedupe) -{ - struct inode *inode_in = file_inode(file_in); - struct inode *inode_out = file_inode(file_out); - struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb); - struct buffer_head *in_bh = NULL, *out_bh = NULL; - bool same_inode = (inode_in == inode_out); - ssize_t ret; - - if (!ocfs2_refcount_tree(osb)) - return -EOPNOTSUPP; - if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) - return -EROFS; - - /* Lock both files against IO */ - ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh); - if (ret) - return ret; - - /* Check file eligibility and prepare for block sharing. */ - ret = -EINVAL; - if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) || - (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE)) - goto out_unlock; - - ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out, - &len, is_dedupe); - if (ret <= 0) - goto out_unlock; - - /* Lock out changes to the allocation maps and remap. */ - down_write(&OCFS2_I(inode_in)->ip_alloc_sem); - if (!same_inode) - down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem, - SINGLE_DEPTH_NESTING); - - ret = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in, inode_out, - out_bh, pos_out, len); - - /* Zap any page cache for the destination file's range. */ - if (!ret) - truncate_inode_pages_range(&inode_out->i_data, pos_out, - PAGE_ALIGN(pos_out + len) - 1); - - up_write(&OCFS2_I(inode_in)->ip_alloc_sem); - if (!same_inode) - up_write(&OCFS2_I(inode_out)->ip_alloc_sem); - if (ret) { - mlog_errno(ret); - goto out_unlock; - } - - /* - * Empty the extent map so that we may get the right extent - * record from the disk. - */ - ocfs2_extent_map_trunc(inode_in, 0); - ocfs2_extent_map_trunc(inode_out, 0); - - ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len); - if (ret) { - mlog_errno(ret); - goto out_unlock; - } - - ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh); - return 0; - -out_unlock: - ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh); - return ret; -} diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h index 4af55bf4b35b..e9e862be4a1e 100644 --- a/fs/ocfs2/refcounttree.h +++ b/fs/ocfs2/refcounttree.h @@ -115,11 +115,23 @@ int ocfs2_reflink_ioctl(struct inode *inode, const char __user *oldname, const char __user *newname, bool preserve); -int ocfs2_reflink_remap_range(struct file *file_in, - loff_t pos_in, - struct file *file_out, - loff_t pos_out, - u64 len, - bool is_dedupe); +loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode, + struct buffer_head *s_bh, + loff_t pos_in, + struct inode *t_inode, + struct buffer_head *t_bh, + loff_t pos_out, + loff_t len); +int ocfs2_reflink_inodes_lock(struct inode *s_inode, + struct buffer_head **bh1, + struct inode *t_inode, + struct buffer_head **bh2); +void ocfs2_reflink_inodes_unlock(struct inode *s_inode, + struct buffer_head *s_bh, + struct inode *t_inode, + struct buffer_head *t_bh); +int ocfs2_reflink_update_dest(struct inode *dest, + struct buffer_head *d_bh, + loff_t newlen); #endif /* OCFS2_REFCOUNTTREE_H */ diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index d6c350ba25b9..c4b029c43464 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -48,12 +48,6 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl"; */ static struct ocfs2_stack_plugin *active_stack; -inline int ocfs2_is_o2cb_active(void) -{ - return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB); -} -EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active); - static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) { struct ocfs2_stack_plugin *p; diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h index e3036e1790e8..f2dce10fae54 100644 --- a/fs/ocfs2/stackglue.h +++ b/fs/ocfs2/stackglue.h @@ -298,9 +298,6 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin); void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin); -/* In ocfs2_downconvert_lock(), we need to know which stack we are using */ -int ocfs2_is_o2cb_active(void); - extern struct kset *ocfs2_kset; #endif /* STACKGLUE_H */ diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 5e65d818937b..fe53381b26b1 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -25,7 +25,7 @@ static int read_one_page(struct page *page) struct iov_iter to; struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE}; - iov_iter_bvec(&to, ITER_BVEC | READ, &bv, 1, PAGE_SIZE); + iov_iter_bvec(&to, READ, &bv, 1, PAGE_SIZE); gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_readpage called with page %p\n", diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 1cc797a08a5b..9e62dcf06fc4 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -125,6 +125,7 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len) struct file *new_file; loff_t old_pos = 0; loff_t new_pos = 0; + loff_t cloned; int error = 0; if (len == 0) @@ -141,11 +142,10 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len) } /* Try to use clone_file_range to clone up within the same fs */ - error = do_clone_file_range(old_file, 0, new_file, 0, len); - if (!error) + cloned = do_clone_file_range(old_file, 0, new_file, 0, len, 0); + if (cloned == len) goto out; /* Couldn't clone, so now we try to copy the data */ - error = 0; /* FIXME: copy up sparse files efficiently */ while (len) { @@ -395,7 +395,6 @@ struct ovl_copy_up_ctx { struct dentry *destdir; struct qstr destname; struct dentry *workdir; - bool tmpfile; bool origin; bool indexed; bool metacopy; @@ -440,63 +439,6 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c) return err; } -static int ovl_install_temp(struct ovl_copy_up_ctx *c, struct dentry *temp, - struct dentry **newdentry) -{ - int err; - struct dentry *upper; - struct inode *udir = d_inode(c->destdir); - - upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len); - if (IS_ERR(upper)) - return PTR_ERR(upper); - - if (c->tmpfile) - err = ovl_do_link(temp, udir, upper); - else - err = ovl_do_rename(d_inode(c->workdir), temp, udir, upper, 0); - - if (!err) - *newdentry = dget(c->tmpfile ? upper : temp); - dput(upper); - - return err; -} - -static struct dentry *ovl_get_tmpfile(struct ovl_copy_up_ctx *c) -{ - int err; - struct dentry *temp; - const struct cred *old_creds = NULL; - struct cred *new_creds = NULL; - struct ovl_cattr cattr = { - /* Can't properly set mode on creation because of the umask */ - .mode = c->stat.mode & S_IFMT, - .rdev = c->stat.rdev, - .link = c->link - }; - - err = security_inode_copy_up(c->dentry, &new_creds); - temp = ERR_PTR(err); - if (err < 0) - goto out; - - if (new_creds) - old_creds = override_creds(new_creds); - - if (c->tmpfile) - temp = ovl_do_tmpfile(c->workdir, c->stat.mode); - else - temp = ovl_create_temp(c->workdir, &cattr); -out: - if (new_creds) { - revert_creds(old_creds); - put_cred(new_creds); - } - - return temp; -} - static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp) { int err; @@ -548,51 +490,148 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp) return err; } -static int ovl_copy_up_locked(struct ovl_copy_up_ctx *c) +struct ovl_cu_creds { + const struct cred *old; + struct cred *new; +}; + +static int ovl_prep_cu_creds(struct dentry *dentry, struct ovl_cu_creds *cc) +{ + int err; + + cc->old = cc->new = NULL; + err = security_inode_copy_up(dentry, &cc->new); + if (err < 0) + return err; + + if (cc->new) + cc->old = override_creds(cc->new); + + return 0; +} + +static void ovl_revert_cu_creds(struct ovl_cu_creds *cc) +{ + if (cc->new) { + revert_creds(cc->old); + put_cred(cc->new); + } +} + +/* + * Copyup using workdir to prepare temp file. Used when copying up directories, + * special files or when upper fs doesn't support O_TMPFILE. + */ +static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c) { - struct inode *udir = c->destdir->d_inode; struct inode *inode; - struct dentry *newdentry = NULL; - struct dentry *temp; + struct inode *udir = d_inode(c->destdir), *wdir = d_inode(c->workdir); + struct dentry *temp, *upper; + struct ovl_cu_creds cc; int err; + struct ovl_cattr cattr = { + /* Can't properly set mode on creation because of the umask */ + .mode = c->stat.mode & S_IFMT, + .rdev = c->stat.rdev, + .link = c->link + }; + + err = ovl_lock_rename_workdir(c->workdir, c->destdir); + if (err) + return err; + + err = ovl_prep_cu_creds(c->dentry, &cc); + if (err) + goto unlock; - temp = ovl_get_tmpfile(c); + temp = ovl_create_temp(c->workdir, &cattr); + ovl_revert_cu_creds(&cc); + + err = PTR_ERR(temp); if (IS_ERR(temp)) - return PTR_ERR(temp); + goto unlock; err = ovl_copy_up_inode(c, temp); if (err) - goto out; + goto cleanup; if (S_ISDIR(c->stat.mode) && c->indexed) { err = ovl_create_index(c->dentry, c->lowerpath.dentry, temp); if (err) - goto out; + goto cleanup; } - if (c->tmpfile) { - inode_lock_nested(udir, I_MUTEX_PARENT); - err = ovl_install_temp(c, temp, &newdentry); - inode_unlock(udir); - } else { - err = ovl_install_temp(c, temp, &newdentry); - } + upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len); + err = PTR_ERR(upper); + if (IS_ERR(upper)) + goto cleanup; + + err = ovl_do_rename(wdir, temp, udir, upper, 0); + dput(upper); if (err) - goto out; + goto cleanup; if (!c->metacopy) ovl_set_upperdata(d_inode(c->dentry)); inode = d_inode(c->dentry); - ovl_inode_update(inode, newdentry); + ovl_inode_update(inode, temp); if (S_ISDIR(inode->i_mode)) ovl_set_flag(OVL_WHITEOUTS, inode); +unlock: + unlock_rename(c->workdir, c->destdir); -out: - if (err && !c->tmpfile) - ovl_cleanup(d_inode(c->workdir), temp); - dput(temp); return err; +cleanup: + ovl_cleanup(wdir, temp); + dput(temp); + goto unlock; +} + +/* Copyup using O_TMPFILE which does not require cross dir locking */ +static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c) +{ + struct inode *udir = d_inode(c->destdir); + struct dentry *temp, *upper; + struct ovl_cu_creds cc; + int err; + + err = ovl_prep_cu_creds(c->dentry, &cc); + if (err) + return err; + + temp = ovl_do_tmpfile(c->workdir, c->stat.mode); + ovl_revert_cu_creds(&cc); + + if (IS_ERR(temp)) + return PTR_ERR(temp); + + err = ovl_copy_up_inode(c, temp); + if (err) + goto out_dput; + + inode_lock_nested(udir, I_MUTEX_PARENT); + + upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len); + err = PTR_ERR(upper); + if (!IS_ERR(upper)) { + err = ovl_do_link(temp, udir, upper); + dput(upper); + } + inode_unlock(udir); + + if (err) + goto out_dput; + + if (!c->metacopy) + ovl_set_upperdata(d_inode(c->dentry)); + ovl_inode_update(d_inode(c->dentry), temp); + + return 0; + +out_dput: + dput(temp); + return err; } /* @@ -646,18 +685,10 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c) } /* Should we copyup with O_TMPFILE or with workdir? */ - if (S_ISREG(c->stat.mode) && ofs->tmpfile) { - c->tmpfile = true; - err = ovl_copy_up_locked(c); - } else { - err = ovl_lock_rename_workdir(c->workdir, c->destdir); - if (!err) { - err = ovl_copy_up_locked(c); - unlock_rename(c->workdir, c->destdir); - } - } - - + if (S_ISREG(c->stat.mode) && ofs->tmpfile) + err = ovl_copy_up_tmpfile(c); + else + err = ovl_copy_up_workdir(c); if (err) goto out; diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 276914ae3c60..c6289147c787 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -414,13 +414,12 @@ static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name, if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !acl) return 0; - size = posix_acl_to_xattr(NULL, acl, NULL, 0); + size = posix_acl_xattr_size(acl->a_count); buffer = kmalloc(size, GFP_KERNEL); if (!buffer) return -ENOMEM; - size = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); - err = size; + err = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); if (err < 0) goto out_free; @@ -463,6 +462,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, if (IS_ERR(upper)) goto out_unlock; + err = -ESTALE; + if (d_is_negative(upper) || !IS_WHITEOUT(d_inode(upper))) + goto out_dput; + newdentry = ovl_create_temp(workdir, cattr); err = PTR_ERR(newdentry); if (IS_ERR(newdentry)) @@ -652,7 +655,6 @@ static int ovl_link(struct dentry *old, struct inode *newdir, struct dentry *new) { int err; - bool locked = false; struct inode *inode; err = ovl_want_write(old); @@ -663,13 +665,17 @@ static int ovl_link(struct dentry *old, struct inode *newdir, if (err) goto out_drop_write; + err = ovl_copy_up(new->d_parent); + if (err) + goto out_drop_write; + if (ovl_is_metacopy_dentry(old)) { err = ovl_set_redirect(old, false); if (err) goto out_drop_write; } - err = ovl_nlink_start(old, &locked); + err = ovl_nlink_start(old); if (err) goto out_drop_write; @@ -682,7 +688,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir, if (err) iput(inode); - ovl_nlink_end(old, locked); + ovl_nlink_end(old); out_drop_write: ovl_drop_write(old); out: @@ -807,7 +813,6 @@ static bool ovl_pure_upper(struct dentry *dentry) static int ovl_do_remove(struct dentry *dentry, bool is_dir) { int err; - bool locked = false; const struct cred *old_cred; struct dentry *upperdentry; bool lower_positive = ovl_lower_positive(dentry); @@ -828,7 +833,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir) if (err) goto out_drop_write; - err = ovl_nlink_start(dentry, &locked); + err = ovl_nlink_start(dentry); if (err) goto out_drop_write; @@ -844,7 +849,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir) else drop_nlink(dentry->d_inode); } - ovl_nlink_end(dentry, locked); + ovl_nlink_end(dentry); /* * Copy ctime @@ -1008,7 +1013,6 @@ static int ovl_rename(struct inode *olddir, struct dentry *old, unsigned int flags) { int err; - bool locked = false; struct dentry *old_upperdir; struct dentry *new_upperdir; struct dentry *olddentry; @@ -1017,6 +1021,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old, bool old_opaque; bool new_opaque; bool cleanup_whiteout = false; + bool update_nlink = false; bool overwrite = !(flags & RENAME_EXCHANGE); bool is_dir = d_is_dir(old); bool new_is_dir = d_is_dir(new); @@ -1074,10 +1079,12 @@ static int ovl_rename(struct inode *olddir, struct dentry *old, err = ovl_copy_up(new); if (err) goto out_drop_write; - } else { - err = ovl_nlink_start(new, &locked); + } else if (d_inode(new)) { + err = ovl_nlink_start(new); if (err) goto out_drop_write; + + update_nlink = true; } old_cred = ovl_override_creds(old->d_sb); @@ -1206,7 +1213,8 @@ out_unlock: unlock_rename(new_upperdir, old_upperdir); out_revert_creds: revert_creds(old_cred); - ovl_nlink_end(new, locked); + if (update_nlink) + ovl_nlink_end(new); out_drop_write: ovl_drop_write(old); out: diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 986313da0c88..84dd957efa24 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -434,14 +434,14 @@ enum ovl_copyop { OVL_DEDUPE, }; -static ssize_t ovl_copyfile(struct file *file_in, loff_t pos_in, +static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, - u64 len, unsigned int flags, enum ovl_copyop op) + loff_t len, unsigned int flags, enum ovl_copyop op) { struct inode *inode_out = file_inode(file_out); struct fd real_in, real_out; const struct cred *old_cred; - ssize_t ret; + loff_t ret; ret = ovl_real_fdget(file_out, &real_out); if (ret) @@ -462,12 +462,13 @@ static ssize_t ovl_copyfile(struct file *file_in, loff_t pos_in, case OVL_CLONE: ret = vfs_clone_file_range(real_in.file, pos_in, - real_out.file, pos_out, len); + real_out.file, pos_out, len, flags); break; case OVL_DEDUPE: ret = vfs_dedupe_file_range_one(real_in.file, pos_in, - real_out.file, pos_out, len); + real_out.file, pos_out, len, + flags); break; } revert_creds(old_cred); @@ -489,26 +490,31 @@ static ssize_t ovl_copy_file_range(struct file *file_in, loff_t pos_in, OVL_COPY); } -static int ovl_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, u64 len) +static loff_t ovl_remap_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags) { - return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, 0, - OVL_CLONE); -} + enum ovl_copyop op; + + if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) + return -EINVAL; + + if (remap_flags & REMAP_FILE_DEDUP) + op = OVL_DEDUPE; + else + op = OVL_CLONE; -static int ovl_dedupe_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, u64 len) -{ /* * Don't copy up because of a dedupe request, this wouldn't make sense * most of the time (data would be duplicated instead of deduplicated). */ - if (!ovl_inode_upper(file_inode(file_in)) || - !ovl_inode_upper(file_inode(file_out))) + if (op == OVL_DEDUPE && + (!ovl_inode_upper(file_inode(file_in)) || + !ovl_inode_upper(file_inode(file_out)))) return -EPERM; - return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, 0, - OVL_DEDUPE); + return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, + remap_flags, op); } const struct file_operations ovl_file_operations = { @@ -525,6 +531,5 @@ const struct file_operations ovl_file_operations = { .compat_ioctl = ovl_compat_ioctl, .copy_file_range = ovl_copy_file_range, - .clone_file_range = ovl_clone_file_range, - .dedupe_file_range = ovl_dedupe_file_range, + .remap_file_range = ovl_remap_file_range, }; diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 3b7ed5d2279c..6bcc9dedc342 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -286,13 +286,22 @@ int ovl_permission(struct inode *inode, int mask) if (err) return err; - old_cred = ovl_override_creds(inode->i_sb); - if (!upperinode && - !special_file(realinode->i_mode) && mask & MAY_WRITE) { + /* No need to do any access on underlying for special files */ + if (special_file(realinode->i_mode)) + return 0; + + /* No need to access underlying for execute */ + mask &= ~MAY_EXEC; + if ((mask & (MAY_READ | MAY_WRITE)) == 0) + return 0; + + /* Lower files get copied up, so turn write access into read */ + if (!upperinode && mask & MAY_WRITE) { mask &= ~(MAY_WRITE | MAY_APPEND); - /* Make sure mounter can read file for copy up later */ mask |= MAY_READ; } + + old_cred = ovl_override_creds(inode->i_sb); err = inode_permission(realinode, mask); revert_creds(old_cred); diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 9c0ca6a7becf..efd372312ef1 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -422,8 +422,10 @@ int ovl_verify_set_fh(struct dentry *dentry, const char *name, fh = ovl_encode_real_fh(real, is_upper); err = PTR_ERR(fh); - if (IS_ERR(fh)) + if (IS_ERR(fh)) { + fh = NULL; goto fail; + } err = ovl_verify_fh(dentry, name, fh); if (set && err == -ENODATA) diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index a3c0d9584312..5e45cb3630a0 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -271,8 +271,8 @@ bool ovl_test_flag(unsigned long flag, struct inode *inode); bool ovl_inuse_trylock(struct dentry *dentry); void ovl_inuse_unlock(struct dentry *dentry); bool ovl_need_index(struct dentry *dentry); -int ovl_nlink_start(struct dentry *dentry, bool *locked); -void ovl_nlink_end(struct dentry *dentry, bool locked); +int ovl_nlink_start(struct dentry *dentry); +void ovl_nlink_end(struct dentry *dentry); int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir); int ovl_check_metacopy_xattr(struct dentry *dentry); bool ovl_is_metacopy_dentry(struct dentry *dentry); @@ -290,6 +290,16 @@ static inline unsigned int ovl_xino_bits(struct super_block *sb) return ofs->xino_bits; } +static inline int ovl_inode_lock(struct inode *inode) +{ + return mutex_lock_interruptible(&OVL_I(inode)->lock); +} + +static inline void ovl_inode_unlock(struct inode *inode) +{ + mutex_unlock(&OVL_I(inode)->lock); +} + /* namei.c */ int ovl_check_fh_len(struct ovl_fh *fh, int fh_len); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 30adc9d408a0..0116735cc321 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -472,6 +472,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) { char *p; int err; + bool metacopy_opt = false, redirect_opt = false; config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL); if (!config->redirect_mode) @@ -516,6 +517,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) config->redirect_mode = match_strdup(&args[0]); if (!config->redirect_mode) return -ENOMEM; + redirect_opt = true; break; case OPT_INDEX_ON: @@ -548,6 +550,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) case OPT_METACOPY_ON: config->metacopy = true; + metacopy_opt = true; break; case OPT_METACOPY_OFF: @@ -572,13 +575,32 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) if (err) return err; - /* metacopy feature with upper requires redirect_dir=on */ - if (config->upperdir && config->metacopy && !config->redirect_dir) { - pr_warn("overlayfs: metadata only copy up requires \"redirect_dir=on\", falling back to metacopy=off.\n"); - config->metacopy = false; - } else if (config->metacopy && !config->redirect_follow) { - pr_warn("overlayfs: metadata only copy up requires \"redirect_dir=follow\" on non-upper mount, falling back to metacopy=off.\n"); - config->metacopy = false; + /* + * This is to make the logic below simpler. It doesn't make any other + * difference, since config->redirect_dir is only used for upper. + */ + if (!config->upperdir && config->redirect_follow) + config->redirect_dir = true; + + /* Resolve metacopy -> redirect_dir dependency */ + if (config->metacopy && !config->redirect_dir) { + if (metacopy_opt && redirect_opt) { + pr_err("overlayfs: conflicting options: metacopy=on,redirect_dir=%s\n", + config->redirect_mode); + return -EINVAL; + } + if (redirect_opt) { + /* + * There was an explicit redirect_dir=... that resulted + * in this conflict. + */ + pr_info("overlayfs: disabling metacopy due to redirect_dir=%s\n", + config->redirect_mode); + config->metacopy = false; + } else { + /* Automatically enable redirect otherwise. */ + config->redirect_follow = config->redirect_dir = true; + } } return 0; @@ -1175,9 +1197,29 @@ out: return err; } +static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) +{ + unsigned int i; + + if (!ofs->config.nfs_export && !(ofs->config.index && ofs->upper_mnt)) + return true; + + for (i = 0; i < ofs->numlowerfs; i++) { + /* + * We use uuid to associate an overlay lower file handle with a + * lower layer, so we can accept lower fs with null uuid as long + * as all lower layers with null uuid are on the same fs. + */ + if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid)) + return false; + } + return true; +} + /* Get a unique fsid for the layer */ -static int ovl_get_fsid(struct ovl_fs *ofs, struct super_block *sb) +static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path) { + struct super_block *sb = path->mnt->mnt_sb; unsigned int i; dev_t dev; int err; @@ -1191,6 +1233,14 @@ static int ovl_get_fsid(struct ovl_fs *ofs, struct super_block *sb) return i + 1; } + if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) { + ofs->config.index = false; + ofs->config.nfs_export = false; + pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n", + uuid_is_null(&sb->s_uuid) ? "null" : "conflicting", + path->dentry); + } + err = get_anon_bdev(&dev); if (err) { pr_err("overlayfs: failed to get anonymous bdev for lowerpath\n"); @@ -1225,7 +1275,7 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack, struct vfsmount *mnt; int fsid; - err = fsid = ovl_get_fsid(ofs, stack[i].mnt->mnt_sb); + err = fsid = ovl_get_fsid(ofs, &stack[i]); if (err < 0) goto out; diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index ace4fe4c39a9..7c01327b1852 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -65,8 +65,7 @@ struct super_block *ovl_same_sb(struct super_block *sb) */ int ovl_can_decode_fh(struct super_block *sb) { - if (!sb->s_export_op || !sb->s_export_op->fh_to_dentry || - uuid_is_null(&sb->s_uuid)) + if (!sb->s_export_op || !sb->s_export_op->fh_to_dentry) return 0; return sb->s_export_op->encode_fh ? -1 : FILEID_INO32_GEN; @@ -522,13 +521,13 @@ bool ovl_already_copied_up(struct dentry *dentry, int flags) int ovl_copy_up_start(struct dentry *dentry, int flags) { - struct ovl_inode *oi = OVL_I(d_inode(dentry)); + struct inode *inode = d_inode(dentry); int err; - err = mutex_lock_interruptible(&oi->lock); + err = ovl_inode_lock(inode); if (!err && ovl_already_copied_up_locked(dentry, flags)) { err = 1; /* Already copied up */ - mutex_unlock(&oi->lock); + ovl_inode_unlock(inode); } return err; @@ -536,7 +535,7 @@ int ovl_copy_up_start(struct dentry *dentry, int flags) void ovl_copy_up_end(struct dentry *dentry) { - mutex_unlock(&OVL_I(d_inode(dentry))->lock); + ovl_inode_unlock(d_inode(dentry)); } bool ovl_check_origin_xattr(struct dentry *dentry) @@ -739,14 +738,14 @@ fail: * Operations that change overlay inode and upper inode nlink need to be * synchronized with copy up for persistent nlink accounting. */ -int ovl_nlink_start(struct dentry *dentry, bool *locked) +int ovl_nlink_start(struct dentry *dentry) { - struct ovl_inode *oi = OVL_I(d_inode(dentry)); + struct inode *inode = d_inode(dentry); const struct cred *old_cred; int err; - if (!d_inode(dentry)) - return 0; + if (WARN_ON(!inode)) + return -ENOENT; /* * With inodes index is enabled, we store the union overlay nlink @@ -768,11 +767,11 @@ int ovl_nlink_start(struct dentry *dentry, bool *locked) return err; } - err = mutex_lock_interruptible(&oi->lock); + err = ovl_inode_lock(inode); if (err) return err; - if (d_is_dir(dentry) || !ovl_test_flag(OVL_INDEX, d_inode(dentry))) + if (d_is_dir(dentry) || !ovl_test_flag(OVL_INDEX, inode)) goto out; old_cred = ovl_override_creds(dentry->d_sb); @@ -787,27 +786,24 @@ int ovl_nlink_start(struct dentry *dentry, bool *locked) out: if (err) - mutex_unlock(&oi->lock); - else - *locked = true; + ovl_inode_unlock(inode); return err; } -void ovl_nlink_end(struct dentry *dentry, bool locked) +void ovl_nlink_end(struct dentry *dentry) { - if (locked) { - if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) && - d_inode(dentry)->i_nlink == 0) { - const struct cred *old_cred; + struct inode *inode = d_inode(dentry); - old_cred = ovl_override_creds(dentry->d_sb); - ovl_cleanup_index(dentry); - revert_creds(old_cred); - } + if (ovl_test_flag(OVL_INDEX, inode) && inode->i_nlink == 0) { + const struct cred *old_cred; - mutex_unlock(&OVL_I(d_inode(dentry))->lock); + old_cred = ovl_override_creds(dentry->d_sb); + ovl_cleanup_index(dentry); + revert_creds(old_cred); } + + ovl_inode_unlock(inode); } int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir) diff --git a/fs/proc/base.c b/fs/proc/base.c index 7e9f07bf260d..ce3465479447 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2905,6 +2905,21 @@ static int proc_pid_patch_state(struct seq_file *m, struct pid_namespace *ns, } #endif /* CONFIG_LIVEPATCH */ +#ifdef CONFIG_STACKLEAK_METRICS +static int proc_stack_depth(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + unsigned long prev_depth = THREAD_SIZE - + (task->prev_lowest_stack & (THREAD_SIZE - 1)); + unsigned long depth = THREAD_SIZE - + (task->lowest_stack & (THREAD_SIZE - 1)); + + seq_printf(m, "previous stack depth: %lu\nstack depth: %lu\n", + prev_depth, depth); + return 0; +} +#endif /* CONFIG_STACKLEAK_METRICS */ + /* * Thread groups */ @@ -3006,6 +3021,9 @@ static const struct pid_entry tgid_base_stuff[] = { #ifdef CONFIG_LIVEPATCH ONE("patch_state", S_IRUSR, proc_pid_patch_state), #endif +#ifdef CONFIG_STACKLEAK_METRICS + ONE("stack_depth", S_IRUGO, proc_stack_depth), +#endif }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) diff --git a/fs/read_write.c b/fs/read_write.c index 603794b207eb..bfcb4ced5664 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1407,7 +1407,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, goto fput_in; if (!(out.file->f_mode & FMODE_WRITE)) goto fput_out; - retval = -EINVAL; in_inode = file_inode(in.file); out_inode = file_inode(out.file); out_pos = out.file->f_pos; @@ -1588,11 +1587,15 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, * Try cloning first, this is supported by more file systems, and * more efficient if both clone and copy are supported (e.g. NFS). */ - if (file_in->f_op->clone_file_range) { - ret = file_in->f_op->clone_file_range(file_in, pos_in, - file_out, pos_out, len); - if (ret == 0) { - ret = len; + if (file_in->f_op->remap_file_range) { + loff_t cloned; + + cloned = file_in->f_op->remap_file_range(file_in, pos_in, + file_out, pos_out, + min_t(loff_t, MAX_RW_COUNT, len), + REMAP_FILE_CAN_SHORTEN); + if (cloned > 0) { + ret = cloned; goto done; } } @@ -1686,11 +1689,12 @@ out2: return ret; } -static int clone_verify_area(struct file *file, loff_t pos, u64 len, bool write) +static int remap_verify_area(struct file *file, loff_t pos, loff_t len, + bool write) { struct inode *inode = file_inode(file); - if (unlikely(pos < 0)) + if (unlikely(pos < 0 || len < 0)) return -EINVAL; if (unlikely((loff_t) (pos + len) < 0)) @@ -1708,22 +1712,150 @@ static int clone_verify_area(struct file *file, loff_t pos, u64 len, bool write) return security_file_permission(file, write ? MAY_WRITE : MAY_READ); } +/* + * Ensure that we don't remap a partial EOF block in the middle of something + * else. Assume that the offsets have already been checked for block + * alignment. + * + * For deduplication we always scale down to the previous block because we + * can't meaningfully compare post-EOF contents. + * + * For clone we only link a partial EOF block above the destination file's EOF. + * + * Shorten the request if possible. + */ +static int generic_remap_check_len(struct inode *inode_in, + struct inode *inode_out, + loff_t pos_out, + loff_t *len, + unsigned int remap_flags) +{ + u64 blkmask = i_blocksize(inode_in) - 1; + loff_t new_len = *len; + + if ((*len & blkmask) == 0) + return 0; + + if ((remap_flags & REMAP_FILE_DEDUP) || + pos_out + *len < i_size_read(inode_out)) + new_len &= ~blkmask; + + if (new_len == *len) + return 0; + + if (remap_flags & REMAP_FILE_CAN_SHORTEN) { + *len = new_len; + return 0; + } + + return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; +} + +/* + * Read a page's worth of file data into the page cache. Return the page + * locked. + */ +static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) +{ + struct page *page; + + page = read_mapping_page(inode->i_mapping, offset >> PAGE_SHIFT, NULL); + if (IS_ERR(page)) + return page; + if (!PageUptodate(page)) { + put_page(page); + return ERR_PTR(-EIO); + } + lock_page(page); + return page; +} + +/* + * Compare extents of two files to see if they are the same. + * Caller must have locked both inodes to prevent write races. + */ +static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, + struct inode *dest, loff_t destoff, + loff_t len, bool *is_same) +{ + loff_t src_poff; + loff_t dest_poff; + void *src_addr; + void *dest_addr; + struct page *src_page; + struct page *dest_page; + loff_t cmp_len; + bool same; + int error; + + error = -EINVAL; + same = true; + while (len) { + src_poff = srcoff & (PAGE_SIZE - 1); + dest_poff = destoff & (PAGE_SIZE - 1); + cmp_len = min(PAGE_SIZE - src_poff, + PAGE_SIZE - dest_poff); + cmp_len = min(cmp_len, len); + if (cmp_len <= 0) + goto out_error; + + src_page = vfs_dedupe_get_page(src, srcoff); + if (IS_ERR(src_page)) { + error = PTR_ERR(src_page); + goto out_error; + } + dest_page = vfs_dedupe_get_page(dest, destoff); + if (IS_ERR(dest_page)) { + error = PTR_ERR(dest_page); + unlock_page(src_page); + put_page(src_page); + goto out_error; + } + src_addr = kmap_atomic(src_page); + dest_addr = kmap_atomic(dest_page); + + flush_dcache_page(src_page); + flush_dcache_page(dest_page); + + if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len)) + same = false; + + kunmap_atomic(dest_addr); + kunmap_atomic(src_addr); + unlock_page(dest_page); + unlock_page(src_page); + put_page(dest_page); + put_page(src_page); + + if (!same) + break; + + srcoff += cmp_len; + destoff += cmp_len; + len -= cmp_len; + } + + *is_same = same; + return 0; + +out_error: + return error; +} /* * Check that the two inodes are eligible for cloning, the ranges make * sense, and then flush all dirty data. Caller must ensure that the * inodes have been locked against any other modifications. * - * Returns: 0 for "nothing to clone", 1 for "something to clone", or - * the usual negative error code. + * If there's an error, then the usual negative error code is returned. + * Otherwise returns 0 with *len set to the request length. */ -int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, - struct inode *inode_out, loff_t pos_out, - u64 *len, bool is_dedupe) +int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *len, unsigned int remap_flags) { - loff_t bs = inode_out->i_sb->s_blocksize; - loff_t blen; - loff_t isize; + struct inode *inode_in = file_inode(file_in); + struct inode *inode_out = file_inode(file_out); bool same_inode = (inode_in == inode_out); int ret; @@ -1740,50 +1872,24 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) return -EINVAL; - /* Are we going all the way to the end? */ - isize = i_size_read(inode_in); - if (isize == 0) - return 0; - /* Zero length dedupe exits immediately; reflink goes to EOF. */ if (*len == 0) { - if (is_dedupe || pos_in == isize) + loff_t isize = i_size_read(inode_in); + + if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize) return 0; if (pos_in > isize) return -EINVAL; *len = isize - pos_in; + if (*len == 0) + return 0; } - /* Ensure offsets don't wrap and the input is inside i_size */ - if (pos_in + *len < pos_in || pos_out + *len < pos_out || - pos_in + *len > isize) - return -EINVAL; - - /* Don't allow dedupe past EOF in the dest file */ - if (is_dedupe) { - loff_t disize; - - disize = i_size_read(inode_out); - if (pos_out >= disize || pos_out + *len > disize) - return -EINVAL; - } - - /* If we're linking to EOF, continue to the block boundary. */ - if (pos_in + *len == isize) - blen = ALIGN(isize, bs) - pos_in; - else - blen = *len; - - /* Only reflink if we're aligned to block boundaries */ - if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) || - !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs)) - return -EINVAL; - - /* Don't allow overlapped reflink within the same file */ - if (same_inode) { - if (pos_out + blen > pos_in && pos_out < pos_in + blen) - return -EINVAL; - } + /* Check that we don't violate system file offset limits. */ + ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len, + remap_flags); + if (ret) + return ret; /* Wait for the completion of any pending IOs on both files */ inode_dio_wait(inode_in); @@ -1803,7 +1909,7 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, /* * Check that the extents are the same. */ - if (is_dedupe) { + if (remap_flags & REMAP_FILE_DEDUP) { bool is_same = false; ret = vfs_dedupe_file_range_compare(inode_in, pos_in, @@ -1814,16 +1920,43 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, return -EBADE; } - return 1; + ret = generic_remap_check_len(inode_in, inode_out, pos_out, len, + remap_flags); + if (ret) + return ret; + + /* If can't alter the file contents, we're done. */ + if (!(remap_flags & REMAP_FILE_DEDUP)) { + /* Update the timestamps, since we can alter file contents. */ + if (!(file_out->f_mode & FMODE_NOCMTIME)) { + ret = file_update_time(file_out); + if (ret) + return ret; + } + + /* + * Clear the security bits if the process is not being run by + * root. This keeps people from modifying setuid and setgid + * binaries. + */ + ret = file_remove_privs(file_out); + if (ret) + return ret; + } + + return 0; } -EXPORT_SYMBOL(vfs_clone_file_prep_inodes); +EXPORT_SYMBOL(generic_remap_file_range_prep); -int do_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, u64 len) +loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); - int ret; + loff_t ret; + + WARN_ON_ONCE(remap_flags); if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) return -EISDIR; @@ -1843,155 +1976,76 @@ int do_clone_file_range(struct file *file_in, loff_t pos_in, (file_out->f_flags & O_APPEND)) return -EBADF; - if (!file_in->f_op->clone_file_range) + if (!file_in->f_op->remap_file_range) return -EOPNOTSUPP; - ret = clone_verify_area(file_in, pos_in, len, false); + ret = remap_verify_area(file_in, pos_in, len, false); if (ret) return ret; - ret = clone_verify_area(file_out, pos_out, len, true); + ret = remap_verify_area(file_out, pos_out, len, true); if (ret) return ret; - if (pos_in + len > i_size_read(inode_in)) - return -EINVAL; - - ret = file_in->f_op->clone_file_range(file_in, pos_in, - file_out, pos_out, len); - if (!ret) { - fsnotify_access(file_in); - fsnotify_modify(file_out); - } + ret = file_in->f_op->remap_file_range(file_in, pos_in, + file_out, pos_out, len, remap_flags); + if (ret < 0) + return ret; + fsnotify_access(file_in); + fsnotify_modify(file_out); return ret; } EXPORT_SYMBOL(do_clone_file_range); -int vfs_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, u64 len) +loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags) { - int ret; + loff_t ret; file_start_write(file_out); - ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len); + ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len, + remap_flags); file_end_write(file_out); return ret; } EXPORT_SYMBOL(vfs_clone_file_range); -/* - * Read a page's worth of file data into the page cache. Return the page - * locked. - */ -static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) +/* Check whether we are allowed to dedupe the destination file */ +static bool allow_file_dedupe(struct file *file) { - struct address_space *mapping; - struct page *page; - pgoff_t n; - - n = offset >> PAGE_SHIFT; - mapping = inode->i_mapping; - page = read_mapping_page(mapping, n, NULL); - if (IS_ERR(page)) - return page; - if (!PageUptodate(page)) { - put_page(page); - return ERR_PTR(-EIO); - } - lock_page(page); - return page; + if (capable(CAP_SYS_ADMIN)) + return true; + if (file->f_mode & FMODE_WRITE) + return true; + if (uid_eq(current_fsuid(), file_inode(file)->i_uid)) + return true; + if (!inode_permission(file_inode(file), MAY_WRITE)) + return true; + return false; } -/* - * Compare extents of two files to see if they are the same. - * Caller must have locked both inodes to prevent write races. - */ -int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, - struct inode *dest, loff_t destoff, - loff_t len, bool *is_same) +loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, + struct file *dst_file, loff_t dst_pos, + loff_t len, unsigned int remap_flags) { - loff_t src_poff; - loff_t dest_poff; - void *src_addr; - void *dest_addr; - struct page *src_page; - struct page *dest_page; - loff_t cmp_len; - bool same; - int error; - - error = -EINVAL; - same = true; - while (len) { - src_poff = srcoff & (PAGE_SIZE - 1); - dest_poff = destoff & (PAGE_SIZE - 1); - cmp_len = min(PAGE_SIZE - src_poff, - PAGE_SIZE - dest_poff); - cmp_len = min(cmp_len, len); - if (cmp_len <= 0) - goto out_error; - - src_page = vfs_dedupe_get_page(src, srcoff); - if (IS_ERR(src_page)) { - error = PTR_ERR(src_page); - goto out_error; - } - dest_page = vfs_dedupe_get_page(dest, destoff); - if (IS_ERR(dest_page)) { - error = PTR_ERR(dest_page); - unlock_page(src_page); - put_page(src_page); - goto out_error; - } - src_addr = kmap_atomic(src_page); - dest_addr = kmap_atomic(dest_page); + loff_t ret; - flush_dcache_page(src_page); - flush_dcache_page(dest_page); - - if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len)) - same = false; - - kunmap_atomic(dest_addr); - kunmap_atomic(src_addr); - unlock_page(dest_page); - unlock_page(src_page); - put_page(dest_page); - put_page(src_page); - - if (!same) - break; - - srcoff += cmp_len; - destoff += cmp_len; - len -= cmp_len; - } - - *is_same = same; - return 0; - -out_error: - return error; -} -EXPORT_SYMBOL(vfs_dedupe_file_range_compare); - -int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, - struct file *dst_file, loff_t dst_pos, u64 len) -{ - s64 ret; + WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP | + REMAP_FILE_CAN_SHORTEN)); ret = mnt_want_write_file(dst_file); if (ret) return ret; - ret = clone_verify_area(dst_file, dst_pos, len, true); + ret = remap_verify_area(dst_file, dst_pos, len, true); if (ret < 0) goto out_drop_write; - ret = -EINVAL; - if (!(capable(CAP_SYS_ADMIN) || (dst_file->f_mode & FMODE_WRITE))) + ret = -EPERM; + if (!allow_file_dedupe(dst_file)) goto out_drop_write; ret = -EXDEV; @@ -2003,11 +2057,16 @@ int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, goto out_drop_write; ret = -EINVAL; - if (!dst_file->f_op->dedupe_file_range) + if (!dst_file->f_op->remap_file_range) goto out_drop_write; - ret = dst_file->f_op->dedupe_file_range(src_file, src_pos, - dst_file, dst_pos, len); + if (len == 0) { + ret = 0; + goto out_drop_write; + } + + ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file, + dst_pos, len, remap_flags | REMAP_FILE_DEDUP); out_drop_write: mnt_drop_write_file(dst_file); @@ -2024,7 +2083,7 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same) int i; int ret; u16 count = same->dest_count; - int deduped; + loff_t deduped; if (!(file->f_mode & FMODE_READ)) return -EINVAL; @@ -2043,7 +2102,7 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same) if (!S_ISREG(src->i_mode)) goto out; - ret = clone_verify_area(file, off, len, false); + ret = remap_verify_area(file, off, len, false); if (ret < 0) goto out; ret = 0; @@ -2075,7 +2134,8 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same) } deduped = vfs_dedupe_file_range_one(file, off, dst_file, - info->dest_offset, len); + info->dest_offset, len, + REMAP_FILE_CAN_SHORTEN); if (deduped == -EBADE) info->status = FILE_DEDUPE_RANGE_DIFFERS; else if (deduped < 0) diff --git a/fs/splice.c b/fs/splice.c index b3daa971f597..3553f1956508 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -301,7 +301,7 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, struct kiocb kiocb; int idx, ret; - iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len); + iov_iter_pipe(&to, READ, pipe, len); idx = to.idx; init_sync_kiocb(&kiocb, in); kiocb.ki_pos = *ppos; @@ -386,7 +386,7 @@ static ssize_t default_file_splice_read(struct file *in, loff_t *ppos, */ offset = *ppos & ~PAGE_MASK; - iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len + offset); + iov_iter_pipe(&to, READ, pipe, len + offset); res = iov_iter_get_pages_alloc(&to, &pages, len + offset, &base); if (res <= 0) @@ -745,8 +745,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out, left -= this_len; } - iov_iter_bvec(&from, ITER_BVEC | WRITE, array, n, - sd.total_len - left); + iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left); ret = vfs_iter_write(out, &from, &sd.pos, 0); if (ret <= 0) break; diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig index bbc78549be4c..529856fbccd0 100644 --- a/fs/ubifs/Kconfig +++ b/fs/ubifs/Kconfig @@ -7,6 +7,7 @@ config UBIFS_FS select CRYPTO if UBIFS_FS_ZLIB select CRYPTO_LZO if UBIFS_FS_LZO select CRYPTO_DEFLATE if UBIFS_FS_ZLIB + select CRYPTO_HASH_INFO depends on MTD_UBI help UBIFS is a file system for flash devices which works on top of UBI. @@ -85,3 +86,13 @@ config UBIFS_FS_SECURITY the extended attribute support in advance. If you are not using a security module, say N. + +config UBIFS_FS_AUTHENTICATION + bool "UBIFS authentication support" + select CRYPTO_HMAC + help + Enable authentication support for UBIFS. This feature offers protection + against offline changes for both data and metadata of the filesystem. + If you say yes here you should also select a hashing algorithm such as + sha256, these are not selected automatically since there are many + different options. diff --git a/fs/ubifs/Makefile b/fs/ubifs/Makefile index 6197d7e539e4..5f838319c8d5 100644 --- a/fs/ubifs/Makefile +++ b/fs/ubifs/Makefile @@ -8,3 +8,4 @@ ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o debug.o ubifs-y += misc.o ubifs-$(CONFIG_UBIFS_FS_ENCRYPTION) += crypto.o ubifs-$(CONFIG_UBIFS_FS_XATTR) += xattr.o +ubifs-$(CONFIG_UBIFS_FS_AUTHENTICATION) += auth.o diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c new file mode 100644 index 000000000000..124e965a28b3 --- /dev/null +++ b/fs/ubifs/auth.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is part of UBIFS. + * + * Copyright (C) 2018 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de> + */ + +/* + * This file implements various helper functions for UBIFS authentication support + */ + +#include <linux/crypto.h> +#include <crypto/hash.h> +#include <crypto/sha.h> +#include <crypto/algapi.h> +#include <keys/user-type.h> + +#include "ubifs.h" + +/** + * ubifs_node_calc_hash - calculate the hash of a UBIFS node + * @c: UBIFS file-system description object + * @node: the node to calculate a hash for + * @hash: the returned hash + * + * Returns 0 for success or a negative error code otherwise. + */ +int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node, + u8 *hash) +{ + const struct ubifs_ch *ch = node; + SHASH_DESC_ON_STACK(shash, c->hash_tfm); + int err; + + shash->tfm = c->hash_tfm; + shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_shash_digest(shash, node, le32_to_cpu(ch->len), hash); + if (err < 0) + return err; + return 0; +} + +/** + * ubifs_hash_calc_hmac - calculate a HMAC from a hash + * @c: UBIFS file-system description object + * @hash: the node to calculate a HMAC for + * @hmac: the returned HMAC + * + * Returns 0 for success or a negative error code otherwise. + */ +static int ubifs_hash_calc_hmac(const struct ubifs_info *c, const u8 *hash, + u8 *hmac) +{ + SHASH_DESC_ON_STACK(shash, c->hmac_tfm); + int err; + + shash->tfm = c->hmac_tfm; + shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_shash_digest(shash, hash, c->hash_len, hmac); + if (err < 0) + return err; + return 0; +} + +/** + * ubifs_prepare_auth_node - Prepare an authentication node + * @c: UBIFS file-system description object + * @node: the node to calculate a hash for + * @hash: input hash of previous nodes + * + * This function prepares an authentication node for writing onto flash. + * It creates a HMAC from the given input hash and writes it to the node. + * + * Returns 0 for success or a negative error code otherwise. + */ +int ubifs_prepare_auth_node(struct ubifs_info *c, void *node, + struct shash_desc *inhash) +{ + SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm); + struct ubifs_auth_node *auth = node; + u8 *hash; + int err; + + hash = kmalloc(crypto_shash_descsize(c->hash_tfm), GFP_NOFS); + if (!hash) + return -ENOMEM; + + hash_desc->tfm = c->hash_tfm; + hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + ubifs_shash_copy_state(c, inhash, hash_desc); + + err = crypto_shash_final(hash_desc, hash); + if (err) + goto out; + + err = ubifs_hash_calc_hmac(c, hash, auth->hmac); + if (err) + goto out; + + auth->ch.node_type = UBIFS_AUTH_NODE; + ubifs_prepare_node(c, auth, ubifs_auth_node_sz(c), 0); + + err = 0; +out: + kfree(hash); + + return err; +} + +static struct shash_desc *ubifs_get_desc(const struct ubifs_info *c, + struct crypto_shash *tfm) +{ + struct shash_desc *desc; + int err; + + if (!ubifs_authenticated(c)) + return NULL; + + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); + + desc->tfm = tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_shash_init(desc); + if (err) { + kfree(desc); + return ERR_PTR(err); + } + + return desc; +} + +/** + * __ubifs_hash_get_desc - get a descriptor suitable for hashing a node + * @c: UBIFS file-system description object + * + * This function returns a descriptor suitable for hashing a node. Free after use + * with kfree. + */ +struct shash_desc *__ubifs_hash_get_desc(const struct ubifs_info *c) +{ + return ubifs_get_desc(c, c->hash_tfm); +} + +/** + * __ubifs_shash_final - finalize shash + * @c: UBIFS file-system description object + * @desc: the descriptor + * @out: the output hash + * + * Simple wrapper around crypto_shash_final(), safe to be called with + * disabled authentication. + */ +int __ubifs_shash_final(const struct ubifs_info *c, struct shash_desc *desc, + u8 *out) +{ + if (ubifs_authenticated(c)) + return crypto_shash_final(desc, out); + + return 0; +} + +/** + * ubifs_bad_hash - Report hash mismatches + * @c: UBIFS file-system description object + * @node: the node + * @hash: the expected hash + * @lnum: the LEB @node was read from + * @offs: offset in LEB @node was read from + * + * This function reports a hash mismatch when a node has a different hash than + * expected. + */ +void ubifs_bad_hash(const struct ubifs_info *c, const void *node, const u8 *hash, + int lnum, int offs) +{ + int len = min(c->hash_len, 20); + int cropped = len != c->hash_len; + const char *cont = cropped ? "..." : ""; + + u8 calc[UBIFS_HASH_ARR_SZ]; + + __ubifs_node_calc_hash(c, node, calc); + + ubifs_err(c, "hash mismatch on node at LEB %d:%d", lnum, offs); + ubifs_err(c, "hash expected: %*ph%s", len, hash, cont); + ubifs_err(c, "hash calculated: %*ph%s", len, calc, cont); +} + +/** + * __ubifs_node_check_hash - check the hash of a node against given hash + * @c: UBIFS file-system description object + * @node: the node + * @expected: the expected hash + * + * This function calculates a hash over a node and compares it to the given hash. + * Returns 0 if both hashes are equal or authentication is disabled, otherwise a + * negative error code is returned. + */ +int __ubifs_node_check_hash(const struct ubifs_info *c, const void *node, + const u8 *expected) +{ + u8 calc[UBIFS_HASH_ARR_SZ]; + int err; + + err = __ubifs_node_calc_hash(c, node, calc); + if (err) + return err; + + if (ubifs_check_hash(c, expected, calc)) + return -EPERM; + + return 0; +} + +/** + * ubifs_init_authentication - initialize UBIFS authentication support + * @c: UBIFS file-system description object + * + * This function returns 0 for success or a negative error code otherwise. + */ +int ubifs_init_authentication(struct ubifs_info *c) +{ + struct key *keyring_key; + const struct user_key_payload *ukp; + int err; + char hmac_name[CRYPTO_MAX_ALG_NAME]; + + if (!c->auth_hash_name) { + ubifs_err(c, "authentication hash name needed with authentication"); + return -EINVAL; + } + + c->auth_hash_algo = match_string(hash_algo_name, HASH_ALGO__LAST, + c->auth_hash_name); + if ((int)c->auth_hash_algo < 0) { + ubifs_err(c, "Unknown hash algo %s specified", + c->auth_hash_name); + return -EINVAL; + } + + snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", + c->auth_hash_name); + + keyring_key = request_key(&key_type_logon, c->auth_key_name, NULL); + + if (IS_ERR(keyring_key)) { + ubifs_err(c, "Failed to request key: %ld", + PTR_ERR(keyring_key)); + return PTR_ERR(keyring_key); + } + + down_read(&keyring_key->sem); + + if (keyring_key->type != &key_type_logon) { + ubifs_err(c, "key type must be logon"); + err = -ENOKEY; + goto out; + } + + ukp = user_key_payload_locked(keyring_key); + if (!ukp) { + /* key was revoked before we acquired its semaphore */ + err = -EKEYREVOKED; + goto out; + } + + c->hash_tfm = crypto_alloc_shash(c->auth_hash_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(c->hash_tfm)) { + err = PTR_ERR(c->hash_tfm); + ubifs_err(c, "Can not allocate %s: %d", + c->auth_hash_name, err); + goto out; + } + + c->hash_len = crypto_shash_digestsize(c->hash_tfm); + if (c->hash_len > UBIFS_HASH_ARR_SZ) { + ubifs_err(c, "hash %s is bigger than maximum allowed hash size (%d > %d)", + c->auth_hash_name, c->hash_len, UBIFS_HASH_ARR_SZ); + err = -EINVAL; + goto out_free_hash; + } + + c->hmac_tfm = crypto_alloc_shash(hmac_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(c->hmac_tfm)) { + err = PTR_ERR(c->hmac_tfm); + ubifs_err(c, "Can not allocate %s: %d", hmac_name, err); + goto out_free_hash; + } + + c->hmac_desc_len = crypto_shash_digestsize(c->hmac_tfm); + if (c->hmac_desc_len > UBIFS_HMAC_ARR_SZ) { + ubifs_err(c, "hmac %s is bigger than maximum allowed hmac size (%d > %d)", + hmac_name, c->hmac_desc_len, UBIFS_HMAC_ARR_SZ); + err = -EINVAL; + goto out_free_hash; + } + + err = crypto_shash_setkey(c->hmac_tfm, ukp->data, ukp->datalen); + if (err) + goto out_free_hmac; + + c->authenticated = true; + + c->log_hash = ubifs_hash_get_desc(c); + if (IS_ERR(c->log_hash)) + goto out_free_hmac; + + err = 0; + +out_free_hmac: + if (err) + crypto_free_shash(c->hmac_tfm); +out_free_hash: + if (err) + crypto_free_shash(c->hash_tfm); +out: + up_read(&keyring_key->sem); + key_put(keyring_key); + + return err; +} + +/** + * __ubifs_exit_authentication - release resource + * @c: UBIFS file-system description object + * + * This function releases the authentication related resources. + */ +void __ubifs_exit_authentication(struct ubifs_info *c) +{ + if (!ubifs_authenticated(c)) + return; + + crypto_free_shash(c->hmac_tfm); + crypto_free_shash(c->hash_tfm); + kfree(c->log_hash); +} + +/** + * ubifs_node_calc_hmac - calculate the HMAC of a UBIFS node + * @c: UBIFS file-system description object + * @node: the node to insert a HMAC into. + * @len: the length of the node + * @ofs_hmac: the offset in the node where the HMAC is inserted + * @hmac: returned HMAC + * + * This function calculates a HMAC of a UBIFS node. The HMAC is expected to be + * embedded into the node, so this area is not covered by the HMAC. Also not + * covered is the UBIFS_NODE_MAGIC and the CRC of the node. + */ +static int ubifs_node_calc_hmac(const struct ubifs_info *c, const void *node, + int len, int ofs_hmac, void *hmac) +{ + SHASH_DESC_ON_STACK(shash, c->hmac_tfm); + int hmac_len = c->hmac_desc_len; + int err; + + ubifs_assert(c, ofs_hmac > 8); + ubifs_assert(c, ofs_hmac + hmac_len < len); + + shash->tfm = c->hmac_tfm; + shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_shash_init(shash); + if (err) + return err; + + /* behind common node header CRC up to HMAC begin */ + err = crypto_shash_update(shash, node + 8, ofs_hmac - 8); + if (err < 0) + return err; + + /* behind HMAC, if any */ + if (len - ofs_hmac - hmac_len > 0) { + err = crypto_shash_update(shash, node + ofs_hmac + hmac_len, + len - ofs_hmac - hmac_len); + if (err < 0) + return err; + } + + return crypto_shash_final(shash, hmac); +} + +/** + * __ubifs_node_insert_hmac - insert a HMAC into a UBIFS node + * @c: UBIFS file-system description object + * @node: the node to insert a HMAC into. + * @len: the length of the node + * @ofs_hmac: the offset in the node where the HMAC is inserted + * + * This function inserts a HMAC at offset @ofs_hmac into the node given in + * @node. + * + * This function returns 0 for success or a negative error code otherwise. + */ +int __ubifs_node_insert_hmac(const struct ubifs_info *c, void *node, int len, + int ofs_hmac) +{ + return ubifs_node_calc_hmac(c, node, len, ofs_hmac, node + ofs_hmac); +} + +/** + * __ubifs_node_verify_hmac - verify the HMAC of UBIFS node + * @c: UBIFS file-system description object + * @node: the node to insert a HMAC into. + * @len: the length of the node + * @ofs_hmac: the offset in the node where the HMAC is inserted + * + * This function verifies the HMAC at offset @ofs_hmac of the node given in + * @node. Returns 0 if successful or a negative error code otherwise. + */ +int __ubifs_node_verify_hmac(const struct ubifs_info *c, const void *node, + int len, int ofs_hmac) +{ + int hmac_len = c->hmac_desc_len; + u8 *hmac; + int err; + + hmac = kmalloc(hmac_len, GFP_NOFS); + if (!hmac) + return -ENOMEM; + + err = ubifs_node_calc_hmac(c, node, len, ofs_hmac, hmac); + if (err) + return err; + + err = crypto_memneq(hmac, node + ofs_hmac, hmac_len); + + kfree(hmac); + + if (!err) + return 0; + + return -EPERM; +} + +int __ubifs_shash_copy_state(const struct ubifs_info *c, struct shash_desc *src, + struct shash_desc *target) +{ + u8 *state; + int err; + + state = kmalloc(crypto_shash_descsize(src->tfm), GFP_NOFS); + if (!state) + return -ENOMEM; + + err = crypto_shash_export(src, state); + if (err) + goto out; + + err = crypto_shash_import(target, state); + +out: + kfree(state); + + return err; +} + +/** + * ubifs_hmac_wkm - Create a HMAC of the well known message + * @c: UBIFS file-system description object + * @hmac: The HMAC of the well known message + * + * This function creates a HMAC of a well known message. This is used + * to check if the provided key is suitable to authenticate a UBIFS + * image. This is only a convenience to the user to provide a better + * error message when the wrong key is provided. + * + * This function returns 0 for success or a negative error code otherwise. + */ +int ubifs_hmac_wkm(struct ubifs_info *c, u8 *hmac) +{ + SHASH_DESC_ON_STACK(shash, c->hmac_tfm); + int err; + const char well_known_message[] = "UBIFS"; + + if (!ubifs_authenticated(c)) + return 0; + + shash->tfm = c->hmac_tfm; + shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_shash_init(shash); + if (err) + return err; + + err = crypto_shash_update(shash, well_known_message, + sizeof(well_known_message) - 1); + if (err < 0) + return err; + + err = crypto_shash_final(shash, hmac); + if (err) + return err; + return 0; +} diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 564e330d05b1..c49ff50fdceb 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -165,6 +165,8 @@ const char *dbg_ntype(int type) return "commit start node"; case UBIFS_ORPH_NODE: return "orphan node"; + case UBIFS_AUTH_NODE: + return "auth node"; default: return "unknown node"; } @@ -542,6 +544,10 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node) (unsigned long long)le64_to_cpu(orph->inos[i])); break; } + case UBIFS_AUTH_NODE: + { + break; + } default: pr_err("node type %d was not recognized\n", (int)ch->node_type); diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c index d2680e0b4a36..bf75fdc76fc3 100644 --- a/fs/ubifs/gc.c +++ b/fs/ubifs/gc.c @@ -254,7 +254,8 @@ static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb, snod->type == UBIFS_DATA_NODE || snod->type == UBIFS_DENT_NODE || snod->type == UBIFS_XENT_NODE || - snod->type == UBIFS_TRUN_NODE); + snod->type == UBIFS_TRUN_NODE || + snod->type == UBIFS_AUTH_NODE); if (snod->type != UBIFS_INO_NODE && snod->type != UBIFS_DATA_NODE && @@ -364,12 +365,13 @@ static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb) /* Write nodes to their new location. Use the first-fit strategy */ while (1) { - int avail; + int avail, moved = 0; struct ubifs_scan_node *snod, *tmp; /* Move data nodes */ list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) { - avail = c->leb_size - wbuf->offs - wbuf->used; + avail = c->leb_size - wbuf->offs - wbuf->used - + ubifs_auth_node_sz(c); if (snod->len > avail) /* * Do not skip data nodes in order to optimize @@ -377,14 +379,21 @@ static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb) */ break; + err = ubifs_shash_update(c, c->jheads[GCHD].log_hash, + snod->node, snod->len); + if (err) + goto out; + err = move_node(c, sleb, snod, wbuf); if (err) goto out; + moved = 1; } /* Move non-data nodes */ list_for_each_entry_safe(snod, tmp, &nondata, list) { - avail = c->leb_size - wbuf->offs - wbuf->used; + avail = c->leb_size - wbuf->offs - wbuf->used - + ubifs_auth_node_sz(c); if (avail < min) break; @@ -402,9 +411,41 @@ static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb) continue; } + err = ubifs_shash_update(c, c->jheads[GCHD].log_hash, + snod->node, snod->len); + if (err) + goto out; + err = move_node(c, sleb, snod, wbuf); if (err) goto out; + moved = 1; + } + + if (ubifs_authenticated(c) && moved) { + struct ubifs_auth_node *auth; + + auth = kmalloc(ubifs_auth_node_sz(c), GFP_NOFS); + if (!auth) { + err = -ENOMEM; + goto out; + } + + err = ubifs_prepare_auth_node(c, auth, + c->jheads[GCHD].log_hash); + if (err) { + kfree(auth); + goto out; + } + + err = ubifs_wbuf_write_nolock(wbuf, auth, + ubifs_auth_node_sz(c)); + if (err) { + kfree(auth); + goto out; + } + + ubifs_add_dirt(c, wbuf->lnum, ubifs_auth_node_sz(c)); } if (list_empty(&sleb->nodes) && list_empty(&nondata)) diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 099bec94b820..d124117efd42 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -365,20 +365,8 @@ static unsigned long long next_sqnum(struct ubifs_info *c) return sqnum; } -/** - * ubifs_prepare_node - prepare node to be written to flash. - * @c: UBIFS file-system description object - * @node: the node to pad - * @len: node length - * @pad: if the buffer has to be padded - * - * This function prepares node at @node to be written to the media - it - * calculates node CRC, fills the common header, and adds proper padding up to - * the next minimum I/O unit if @pad is not zero. - */ -void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad) +void ubifs_init_node(struct ubifs_info *c, void *node, int len, int pad) { - uint32_t crc; struct ubifs_ch *ch = node; unsigned long long sqnum = next_sqnum(c); @@ -389,8 +377,6 @@ void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad) ch->group_type = UBIFS_NO_NODE_GROUP; ch->sqnum = cpu_to_le64(sqnum); ch->padding[0] = ch->padding[1] = 0; - crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8); - ch->crc = cpu_to_le32(crc); if (pad) { len = ALIGN(len, 8); @@ -399,6 +385,68 @@ void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad) } } +void ubifs_crc_node(struct ubifs_info *c, void *node, int len) +{ + struct ubifs_ch *ch = node; + uint32_t crc; + + crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8); + ch->crc = cpu_to_le32(crc); +} + +/** + * ubifs_prepare_node_hmac - prepare node to be written to flash. + * @c: UBIFS file-system description object + * @node: the node to pad + * @len: node length + * @hmac_offs: offset of the HMAC in the node + * @pad: if the buffer has to be padded + * + * This function prepares node at @node to be written to the media - it + * calculates node CRC, fills the common header, and adds proper padding up to + * the next minimum I/O unit if @pad is not zero. if @hmac_offs is positive then + * a HMAC is inserted into the node at the given offset. + * + * This function returns 0 for success or a negative error code otherwise. + */ +int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len, + int hmac_offs, int pad) +{ + int err; + + ubifs_init_node(c, node, len, pad); + + if (hmac_offs > 0) { + err = ubifs_node_insert_hmac(c, node, len, hmac_offs); + if (err) + return err; + } + + ubifs_crc_node(c, node, len); + + return 0; +} + +/** + * ubifs_prepare_node - prepare node to be written to flash. + * @c: UBIFS file-system description object + * @node: the node to pad + * @len: node length + * @pad: if the buffer has to be padded + * + * This function prepares node at @node to be written to the media - it + * calculates node CRC, fills the common header, and adds proper padding up to + * the next minimum I/O unit if @pad is not zero. + */ +void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad) +{ + /* + * Deliberately ignore return value since this function can only fail + * when a hmac offset is given. + */ + ubifs_prepare_node_hmac(c, node, len, 0, pad); +} + /** * ubifs_prep_grp_node - prepare node of a group to be written to flash. * @c: UBIFS file-system description object @@ -849,12 +897,13 @@ out: } /** - * ubifs_write_node - write node to the media. + * ubifs_write_node_hmac - write node to the media. * @c: UBIFS file-system description object * @buf: the node to write * @len: node length * @lnum: logical eraseblock number * @offs: offset within the logical eraseblock + * @hmac_offs: offset of the HMAC within the node * * This function automatically fills node magic number, assigns sequence * number, and calculates node CRC checksum. The length of the @buf buffer has @@ -862,8 +911,8 @@ out: * appends padding node and padding bytes if needed. Returns zero in case of * success and a negative error code in case of failure. */ -int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, - int offs) +int ubifs_write_node_hmac(struct ubifs_info *c, void *buf, int len, int lnum, + int offs, int hmac_offs) { int err, buf_len = ALIGN(len, c->min_io_size); @@ -878,7 +927,10 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, if (c->ro_error) return -EROFS; - ubifs_prepare_node(c, buf, len, 1); + err = ubifs_prepare_node_hmac(c, buf, len, hmac_offs, 1); + if (err) + return err; + err = ubifs_leb_write(c, lnum, buf, offs, buf_len); if (err) ubifs_dump_node(c, buf); @@ -887,6 +939,26 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, } /** + * ubifs_write_node - write node to the media. + * @c: UBIFS file-system description object + * @buf: the node to write + * @len: node length + * @lnum: logical eraseblock number + * @offs: offset within the logical eraseblock + * + * This function automatically fills node magic number, assigns sequence + * number, and calculates node CRC checksum. The length of the @buf buffer has + * to be aligned to the minimal I/O unit size. This function automatically + * appends padding node and padding bytes if needed. Returns zero in case of + * success and a negative error code in case of failure. + */ +int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, + int offs) +{ + return ubifs_write_node_hmac(c, buf, len, lnum, offs, -1); +} + +/** * ubifs_read_node_wbuf - read node from the media or write-buffer. * @wbuf: wbuf to check for un-written data * @buf: buffer to read to diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 802565a17733..729dc76c83df 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -90,6 +90,12 @@ static inline void zero_trun_node_unused(struct ubifs_trun_node *trun) memset(trun->padding, 0, 12); } +static void ubifs_add_auth_dirt(struct ubifs_info *c, int lnum) +{ + if (ubifs_authenticated(c)) + ubifs_add_dirt(c, lnum, ubifs_auth_node_sz(c)); +} + /** * reserve_space - reserve space in the journal. * @c: UBIFS file-system description object @@ -228,34 +234,33 @@ out_return: return err; } -/** - * write_node - write node to a journal head. - * @c: UBIFS file-system description object - * @jhead: journal head - * @node: node to write - * @len: node length - * @lnum: LEB number written is returned here - * @offs: offset written is returned here - * - * This function writes a node to reserved space of journal head @jhead. - * Returns zero in case of success and a negative error code in case of - * failure. - */ -static int write_node(struct ubifs_info *c, int jhead, void *node, int len, - int *lnum, int *offs) +static int ubifs_hash_nodes(struct ubifs_info *c, void *node, + int len, struct shash_desc *hash) { - struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; + int auth_node_size = ubifs_auth_node_sz(c); + int err; - ubifs_assert(c, jhead != GCHD); + while (1) { + const struct ubifs_ch *ch = node; + int nodelen = le32_to_cpu(ch->len); - *lnum = c->jheads[jhead].wbuf.lnum; - *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; + ubifs_assert(c, len >= auth_node_size); - dbg_jnl("jhead %s, LEB %d:%d, len %d", - dbg_jhead(jhead), *lnum, *offs, len); - ubifs_prepare_node(c, node, len, 0); + if (len == auth_node_size) + break; + + ubifs_assert(c, len > nodelen); + ubifs_assert(c, ch->magic == cpu_to_le32(UBIFS_NODE_MAGIC)); - return ubifs_wbuf_write_nolock(wbuf, node, len); + err = ubifs_shash_update(c, hash, (void *)node, nodelen); + if (err) + return err; + + node += ALIGN(nodelen, 8); + len -= ALIGN(nodelen, 8); + } + + return ubifs_prepare_auth_node(c, node, hash); } /** @@ -268,9 +273,9 @@ static int write_node(struct ubifs_info *c, int jhead, void *node, int len, * @offs: offset written is returned here * @sync: non-zero if the write-buffer has to by synchronized * - * This function is the same as 'write_node()' but it does not assume the - * buffer it is writing is a node, so it does not prepare it (which means - * initializing common header and calculating CRC). + * This function writes data to the reserved space of journal head @jhead. + * Returns zero in case of success and a negative error code in case of + * failure. */ static int write_head(struct ubifs_info *c, int jhead, void *buf, int len, int *lnum, int *offs, int sync) @@ -285,6 +290,12 @@ static int write_head(struct ubifs_info *c, int jhead, void *buf, int len, dbg_jnl("jhead %s, LEB %d:%d, len %d", dbg_jhead(jhead), *lnum, *offs, len); + if (ubifs_authenticated(c)) { + err = ubifs_hash_nodes(c, buf, len, c->jheads[jhead].log_hash); + if (err) + return err; + } + err = ubifs_wbuf_write_nolock(wbuf, buf, len); if (err) return err; @@ -548,6 +559,9 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, struct ubifs_dent_node *dent; struct ubifs_ino_node *ino; union ubifs_key dent_key, ino_key; + u8 hash_dent[UBIFS_HASH_ARR_SZ]; + u8 hash_ino[UBIFS_HASH_ARR_SZ]; + u8 hash_ino_host[UBIFS_HASH_ARR_SZ]; ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); @@ -570,7 +584,10 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ; /* Make sure to also account for extended attributes */ - len += host_ui->data_len; + if (ubifs_authenticated(c)) + len += ALIGN(host_ui->data_len, 8) + ubifs_auth_node_sz(c); + else + len += host_ui->data_len; dent = kzalloc(len, GFP_NOFS); if (!dent) @@ -602,11 +619,21 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, zero_dent_node_unused(dent); ubifs_prep_grp_node(c, dent, dlen, 0); + err = ubifs_node_calc_hash(c, dent, hash_dent); + if (err) + goto out_release; ino = (void *)dent + aligned_dlen; pack_inode(c, ino, inode, 0); + err = ubifs_node_calc_hash(c, ino, hash_ino); + if (err) + goto out_release; + ino = (void *)ino + aligned_ilen; pack_inode(c, ino, dir, 1); + err = ubifs_node_calc_hash(c, ino, hash_ino_host); + if (err) + goto out_release; if (last_reference) { err = ubifs_add_orphan(c, inode->i_ino); @@ -628,6 +655,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, } release_head(c, BASEHD); kfree(dent); + ubifs_add_auth_dirt(c, lnum); if (deletion) { if (nm->hash) @@ -638,7 +666,8 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, goto out_ro; err = ubifs_add_dirt(c, lnum, dlen); } else - err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, nm); + err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, + hash_dent, nm); if (err) goto out_ro; @@ -650,14 +679,14 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, */ ino_key_init(c, &ino_key, inode->i_ino); ino_offs = dent_offs + aligned_dlen; - err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen); + err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen, hash_ino); if (err) goto out_ro; ino_key_init(c, &ino_key, dir->i_ino); ino_offs += aligned_ilen; err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, - UBIFS_INO_NODE_SZ + host_ui->data_len); + UBIFS_INO_NODE_SZ + host_ui->data_len, hash_ino_host); if (err) goto out_ro; @@ -706,10 +735,12 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, const union ubifs_key *key, const void *buf, int len) { struct ubifs_data_node *data; - int err, lnum, offs, compr_type, out_len, compr_len; + int err, lnum, offs, compr_type, out_len, compr_len, auth_len; int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1; + int write_len; struct ubifs_inode *ui = ubifs_inode(inode); bool encrypted = ubifs_crypt_is_encrypted(inode); + u8 hash[UBIFS_HASH_ARR_SZ]; dbg_jnlk(key, "ino %lu, blk %u, len %d, key ", (unsigned long)key_inum(c, key), key_block(c, key), len); @@ -718,7 +749,9 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, if (encrypted) dlen += UBIFS_CIPHER_BLOCK_SIZE; - data = kmalloc(dlen, GFP_NOFS | __GFP_NOWARN); + auth_len = ubifs_auth_node_sz(c); + + data = kmalloc(dlen + auth_len, GFP_NOFS | __GFP_NOWARN); if (!data) { /* * Fall-back to the write reserve buffer. Note, we might be @@ -757,20 +790,33 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, } dlen = UBIFS_DATA_NODE_SZ + out_len; + if (ubifs_authenticated(c)) + write_len = ALIGN(dlen, 8) + auth_len; + else + write_len = dlen; + data->compr_type = cpu_to_le16(compr_type); /* Make reservation before allocating sequence numbers */ - err = make_reservation(c, DATAHD, dlen); + err = make_reservation(c, DATAHD, write_len); if (err) goto out_free; - err = write_node(c, DATAHD, data, dlen, &lnum, &offs); + ubifs_prepare_node(c, data, dlen, 0); + err = write_head(c, DATAHD, data, write_len, &lnum, &offs, 0); + if (err) + goto out_release; + + err = ubifs_node_calc_hash(c, data, hash); if (err) goto out_release; + ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key)); release_head(c, DATAHD); - err = ubifs_tnc_add(c, key, lnum, offs, dlen); + ubifs_add_auth_dirt(c, lnum); + + err = ubifs_tnc_add(c, key, lnum, offs, dlen, hash); if (err) goto out_ro; @@ -808,7 +854,9 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) int err, lnum, offs; struct ubifs_ino_node *ino; struct ubifs_inode *ui = ubifs_inode(inode); - int sync = 0, len = UBIFS_INO_NODE_SZ, last_reference = !inode->i_nlink; + int sync = 0, write_len, ilen = UBIFS_INO_NODE_SZ; + int last_reference = !inode->i_nlink; + u8 hash[UBIFS_HASH_ARR_SZ]; dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink); @@ -817,20 +865,30 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) * need to synchronize the write-buffer either. */ if (!last_reference) { - len += ui->data_len; + ilen += ui->data_len; sync = IS_SYNC(inode); } - ino = kmalloc(len, GFP_NOFS); + + if (ubifs_authenticated(c)) + write_len = ALIGN(ilen, 8) + ubifs_auth_node_sz(c); + else + write_len = ilen; + + ino = kmalloc(write_len, GFP_NOFS); if (!ino) return -ENOMEM; /* Make reservation before allocating sequence numbers */ - err = make_reservation(c, BASEHD, len); + err = make_reservation(c, BASEHD, write_len); if (err) goto out_free; pack_inode(c, ino, inode, 1); - err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync); + err = ubifs_node_calc_hash(c, ino, hash); + if (err) + goto out_release; + + err = write_head(c, BASEHD, ino, write_len, &lnum, &offs, sync); if (err) goto out_release; if (!sync) @@ -838,17 +896,19 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) inode->i_ino); release_head(c, BASEHD); + ubifs_add_auth_dirt(c, lnum); + if (last_reference) { err = ubifs_tnc_remove_ino(c, inode->i_ino); if (err) goto out_ro; ubifs_delete_orphan(c, inode->i_ino); - err = ubifs_add_dirt(c, lnum, len); + err = ubifs_add_dirt(c, lnum, ilen); } else { union ubifs_key key; ino_key_init(c, &key, inode->i_ino); - err = ubifs_tnc_add(c, &key, lnum, offs, len); + err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash); } if (err) goto out_ro; @@ -958,6 +1018,10 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, int aligned_dlen1, aligned_dlen2; int twoparents = (fst_dir != snd_dir); void *p; + u8 hash_dent1[UBIFS_HASH_ARR_SZ]; + u8 hash_dent2[UBIFS_HASH_ARR_SZ]; + u8 hash_p1[UBIFS_HASH_ARR_SZ]; + u8 hash_p2[UBIFS_HASH_ARR_SZ]; ubifs_assert(c, ubifs_inode(fst_dir)->data_len == 0); ubifs_assert(c, ubifs_inode(snd_dir)->data_len == 0); @@ -973,6 +1037,8 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, if (twoparents) len += plen; + len += ubifs_auth_node_sz(c); + dent1 = kzalloc(len, GFP_NOFS); if (!dent1) return -ENOMEM; @@ -993,6 +1059,9 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, set_dent_cookie(c, dent1); zero_dent_node_unused(dent1); ubifs_prep_grp_node(c, dent1, dlen1, 0); + err = ubifs_node_calc_hash(c, dent1, hash_dent1); + if (err) + goto out_release; /* Make new dent for 2nd entry */ dent2 = (void *)dent1 + aligned_dlen1; @@ -1006,14 +1075,26 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, set_dent_cookie(c, dent2); zero_dent_node_unused(dent2); ubifs_prep_grp_node(c, dent2, dlen2, 0); + err = ubifs_node_calc_hash(c, dent2, hash_dent2); + if (err) + goto out_release; p = (void *)dent2 + aligned_dlen2; - if (!twoparents) + if (!twoparents) { pack_inode(c, p, fst_dir, 1); - else { + err = ubifs_node_calc_hash(c, p, hash_p1); + if (err) + goto out_release; + } else { pack_inode(c, p, fst_dir, 0); + err = ubifs_node_calc_hash(c, p, hash_p1); + if (err) + goto out_release; p += ALIGN(plen, 8); pack_inode(c, p, snd_dir, 1); + err = ubifs_node_calc_hash(c, p, hash_p2); + if (err) + goto out_release; } err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync); @@ -1027,28 +1108,30 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, } release_head(c, BASEHD); + ubifs_add_auth_dirt(c, lnum); + dent_key_init(c, &key, snd_dir->i_ino, snd_nm); - err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, snd_nm); + err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, snd_nm); if (err) goto out_ro; offs += aligned_dlen1; dent_key_init(c, &key, fst_dir->i_ino, fst_nm); - err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, fst_nm); + err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, fst_nm); if (err) goto out_ro; offs += aligned_dlen2; ino_key_init(c, &key, fst_dir->i_ino); - err = ubifs_tnc_add(c, &key, lnum, offs, plen); + err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p1); if (err) goto out_ro; if (twoparents) { offs += ALIGN(plen, 8); ino_key_init(c, &key, snd_dir->i_ino); - err = ubifs_tnc_add(c, &key, lnum, offs, plen); + err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p2); if (err) goto out_ro; } @@ -1101,6 +1184,11 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, int last_reference = !!(new_inode && new_inode->i_nlink == 0); int move = (old_dir != new_dir); struct ubifs_inode *uninitialized_var(new_ui); + u8 hash_old_dir[UBIFS_HASH_ARR_SZ]; + u8 hash_new_dir[UBIFS_HASH_ARR_SZ]; + u8 hash_new_inode[UBIFS_HASH_ARR_SZ]; + u8 hash_dent1[UBIFS_HASH_ARR_SZ]; + u8 hash_dent2[UBIFS_HASH_ARR_SZ]; ubifs_assert(c, ubifs_inode(old_dir)->data_len == 0); ubifs_assert(c, ubifs_inode(new_dir)->data_len == 0); @@ -1123,6 +1211,9 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8); if (move) len += plen; + + len += ubifs_auth_node_sz(c); + dent = kzalloc(len, GFP_NOFS); if (!dent) return -ENOMEM; @@ -1143,6 +1234,9 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, set_dent_cookie(c, dent); zero_dent_node_unused(dent); ubifs_prep_grp_node(c, dent, dlen1, 0); + err = ubifs_node_calc_hash(c, dent, hash_dent1); + if (err) + goto out_release; dent2 = (void *)dent + aligned_dlen1; dent2->ch.node_type = UBIFS_DENT_NODE; @@ -1162,19 +1256,36 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, set_dent_cookie(c, dent2); zero_dent_node_unused(dent2); ubifs_prep_grp_node(c, dent2, dlen2, 0); + err = ubifs_node_calc_hash(c, dent2, hash_dent2); + if (err) + goto out_release; p = (void *)dent2 + aligned_dlen2; if (new_inode) { pack_inode(c, p, new_inode, 0); + err = ubifs_node_calc_hash(c, p, hash_new_inode); + if (err) + goto out_release; + p += ALIGN(ilen, 8); } - if (!move) + if (!move) { pack_inode(c, p, old_dir, 1); - else { + err = ubifs_node_calc_hash(c, p, hash_old_dir); + if (err) + goto out_release; + } else { pack_inode(c, p, old_dir, 0); + err = ubifs_node_calc_hash(c, p, hash_old_dir); + if (err) + goto out_release; + p += ALIGN(plen, 8); pack_inode(c, p, new_dir, 1); + err = ubifs_node_calc_hash(c, p, hash_new_dir); + if (err) + goto out_release; } if (last_reference) { @@ -1200,15 +1311,17 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, } release_head(c, BASEHD); + ubifs_add_auth_dirt(c, lnum); + dent_key_init(c, &key, new_dir->i_ino, new_nm); - err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, new_nm); + err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, new_nm); if (err) goto out_ro; offs += aligned_dlen1; if (whiteout) { dent_key_init(c, &key, old_dir->i_ino, old_nm); - err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, old_nm); + err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm); if (err) goto out_ro; @@ -1227,21 +1340,21 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, offs += aligned_dlen2; if (new_inode) { ino_key_init(c, &key, new_inode->i_ino); - err = ubifs_tnc_add(c, &key, lnum, offs, ilen); + err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash_new_inode); if (err) goto out_ro; offs += ALIGN(ilen, 8); } ino_key_init(c, &key, old_dir->i_ino); - err = ubifs_tnc_add(c, &key, lnum, offs, plen); + err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir); if (err) goto out_ro; if (move) { offs += ALIGN(plen, 8); ino_key_init(c, &key, new_dir->i_ino); - err = ubifs_tnc_add(c, &key, lnum, offs, plen); + err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_new_dir); if (err) goto out_ro; } @@ -1360,6 +1473,8 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, struct ubifs_inode *ui = ubifs_inode(inode); ino_t inum = inode->i_ino; unsigned int blk; + u8 hash_ino[UBIFS_HASH_ARR_SZ]; + u8 hash_dn[UBIFS_HASH_ARR_SZ]; dbg_jnl("ino %lu, size %lld -> %lld", (unsigned long)inum, old_size, new_size); @@ -1369,6 +1484,9 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ + UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR; + + sz += ubifs_auth_node_sz(c); + ino = kmalloc(sz, GFP_NOFS); if (!ino) return -ENOMEM; @@ -1414,16 +1532,28 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, /* Must make reservation before allocating sequence numbers */ len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ; - if (dlen) + + if (ubifs_authenticated(c)) + len += ALIGN(dlen, 8) + ubifs_auth_node_sz(c); + else len += dlen; + err = make_reservation(c, BASEHD, len); if (err) goto out_free; pack_inode(c, ino, inode, 0); + err = ubifs_node_calc_hash(c, ino, hash_ino); + if (err) + goto out_release; + ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1); - if (dlen) + if (dlen) { ubifs_prep_grp_node(c, dn, dlen, 1); + err = ubifs_node_calc_hash(c, dn, hash_dn); + if (err) + goto out_release; + } err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync); if (err) @@ -1432,15 +1562,17 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum); release_head(c, BASEHD); + ubifs_add_auth_dirt(c, lnum); + if (dlen) { sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ; - err = ubifs_tnc_add(c, &key, lnum, sz, dlen); + err = ubifs_tnc_add(c, &key, lnum, sz, dlen, hash_dn); if (err) goto out_ro; } ino_key_init(c, &key, inum); - err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ); + err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ, hash_ino); if (err) goto out_ro; @@ -1495,12 +1627,13 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, const struct inode *inode, const struct fscrypt_name *nm) { - int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen; + int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen, write_len; struct ubifs_dent_node *xent; struct ubifs_ino_node *ino; union ubifs_key xent_key, key1, key2; int sync = IS_DIRSYNC(host); struct ubifs_inode *host_ui = ubifs_inode(host); + u8 hash[UBIFS_HASH_ARR_SZ]; ubifs_assert(c, inode->i_nlink == 0); ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); @@ -1514,12 +1647,14 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, hlen = host_ui->data_len + UBIFS_INO_NODE_SZ; len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8); - xent = kzalloc(len, GFP_NOFS); + write_len = len + ubifs_auth_node_sz(c); + + xent = kzalloc(write_len, GFP_NOFS); if (!xent) return -ENOMEM; /* Make reservation before allocating sequence numbers */ - err = make_reservation(c, BASEHD, len); + err = make_reservation(c, BASEHD, write_len); if (err) { kfree(xent); return err; @@ -1540,11 +1675,16 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, pack_inode(c, ino, inode, 0); ino = (void *)ino + UBIFS_INO_NODE_SZ; pack_inode(c, ino, host, 1); + err = ubifs_node_calc_hash(c, ino, hash); + if (err) + goto out_release; - err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync); + err = write_head(c, BASEHD, xent, write_len, &lnum, &xent_offs, sync); if (!sync && !err) ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino); release_head(c, BASEHD); + + ubifs_add_auth_dirt(c, lnum); kfree(xent); if (err) goto out_ro; @@ -1572,7 +1712,7 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, /* And update TNC with the new host inode position */ ino_key_init(c, &key1, host->i_ino); - err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen); + err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen, hash); if (err) goto out_ro; @@ -1583,6 +1723,9 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, mark_inode_clean(c, host_ui); return 0; +out_release: + kfree(xent); + release_head(c, BASEHD); out_ro: ubifs_ro_mode(c, err); finish_reservation(c); @@ -1610,6 +1753,8 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode, struct ubifs_ino_node *ino; union ubifs_key key; int sync = IS_DIRSYNC(host); + u8 hash_host[UBIFS_HASH_ARR_SZ]; + u8 hash[UBIFS_HASH_ARR_SZ]; dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino); ubifs_assert(c, host->i_nlink > 0); @@ -1621,6 +1766,8 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode, aligned_len1 = ALIGN(len1, 8); aligned_len = aligned_len1 + ALIGN(len2, 8); + aligned_len += ubifs_auth_node_sz(c); + ino = kzalloc(aligned_len, GFP_NOFS); if (!ino) return -ENOMEM; @@ -1631,7 +1778,13 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode, goto out_free; pack_inode(c, ino, host, 0); + err = ubifs_node_calc_hash(c, ino, hash_host); + if (err) + goto out_release; pack_inode(c, (void *)ino + aligned_len1, inode, 1); + err = ubifs_node_calc_hash(c, (void *)ino + aligned_len1, hash); + if (err) + goto out_release; err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0); if (!sync && !err) { @@ -1644,13 +1797,15 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode, if (err) goto out_ro; + ubifs_add_auth_dirt(c, lnum); + ino_key_init(c, &key, host->i_ino); - err = ubifs_tnc_add(c, &key, lnum, offs, len1); + err = ubifs_tnc_add(c, &key, lnum, offs, len1, hash_host); if (err) goto out_ro; ino_key_init(c, &key, inode->i_ino); - err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2); + err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2, hash); if (err) goto out_ro; @@ -1662,6 +1817,8 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode, kfree(ino); return 0; +out_release: + release_head(c, BASEHD); out_ro: ubifs_ro_mode(c, err); finish_reservation(c); diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c index 86b0828f5499..15fd854149bb 100644 --- a/fs/ubifs/log.c +++ b/fs/ubifs/log.c @@ -236,6 +236,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) bud->lnum = lnum; bud->start = offs; bud->jhead = jhead; + bud->log_hash = NULL; ref->ch.node_type = UBIFS_REF_NODE; ref->lnum = cpu_to_le32(bud->lnum); @@ -275,6 +276,14 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) if (err) goto out_unlock; + err = ubifs_shash_update(c, c->log_hash, ref, UBIFS_REF_NODE_SZ); + if (err) + goto out_unlock; + + err = ubifs_shash_copy_state(c, c->log_hash, c->jheads[jhead].log_hash); + if (err) + goto out_unlock; + c->lhead_offs += c->ref_node_alsz; ubifs_add_bud(c, bud); @@ -377,6 +386,14 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum) cs->cmt_no = cpu_to_le64(c->cmt_no); ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0); + err = ubifs_shash_init(c, c->log_hash); + if (err) + goto out; + + err = ubifs_shash_update(c, c->log_hash, cs, UBIFS_CS_NODE_SZ); + if (err < 0) + goto out; + /* * Note, we do not lock 'c->log_mutex' because this is the commit start * phase and we are exclusively using the log. And we do not lock @@ -402,6 +419,12 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum) ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0); len += UBIFS_REF_NODE_SZ; + + err = ubifs_shash_update(c, c->log_hash, ref, + UBIFS_REF_NODE_SZ); + if (err) + goto out; + ubifs_shash_copy_state(c, c->log_hash, c->jheads[i].log_hash); } ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len); @@ -516,6 +539,7 @@ int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum) if (err) return err; list_del(&bud->list); + kfree(bud->log_hash); kfree(bud); } mutex_lock(&c->log_mutex); diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index 31393370e334..d1d5e96350dd 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -604,11 +604,12 @@ static int calc_pnode_num_from_parent(const struct ubifs_info *c, * @lpt_first: LEB number of first LPT LEB * @lpt_lebs: number of LEBs for LPT is passed and returned here * @big_lpt: use big LPT model is passed and returned here + * @hash: hash of the LPT is returned here * * This function returns %0 on success and a negative error code on failure. */ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, - int *lpt_lebs, int *big_lpt) + int *lpt_lebs, int *big_lpt, u8 *hash) { int lnum, err = 0, node_sz, iopos, i, j, cnt, len, alen, row; int blnum, boffs, bsz, bcnt; @@ -617,6 +618,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, void *buf = NULL, *p; struct ubifs_lpt_lprops *ltab = NULL; int *lsave = NULL; + struct shash_desc *desc; err = calc_dflt_lpt_geom(c, main_lebs, big_lpt); if (err) @@ -630,6 +632,10 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, /* Needed by 'ubifs_pack_lsave()' */ c->main_first = c->leb_cnt - *main_lebs; + desc = ubifs_hash_get_desc(c); + if (IS_ERR(desc)) + return PTR_ERR(desc); + lsave = kmalloc_array(c->lsave_cnt, sizeof(int), GFP_KERNEL); pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_KERNEL); nnode = kzalloc(sizeof(struct ubifs_nnode), GFP_KERNEL); @@ -677,6 +683,10 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, /* Add first pnode */ ubifs_pack_pnode(c, p, pnode); + err = ubifs_shash_update(c, desc, p, c->pnode_sz); + if (err) + goto out; + p += c->pnode_sz; len = c->pnode_sz; pnode->num += 1; @@ -711,6 +721,10 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, len = 0; } ubifs_pack_pnode(c, p, pnode); + err = ubifs_shash_update(c, desc, p, c->pnode_sz); + if (err) + goto out; + p += c->pnode_sz; len += c->pnode_sz; /* @@ -830,6 +844,10 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, if (err) goto out; + err = ubifs_shash_final(c, desc, hash); + if (err) + goto out; + c->nhead_lnum = lnum; c->nhead_offs = ALIGN(len, c->min_io_size); @@ -853,6 +871,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs); out: c->ltab = NULL; + kfree(desc); kfree(lsave); vfree(ltab); vfree(buf); @@ -1439,26 +1458,25 @@ struct ubifs_pnode *ubifs_get_pnode(struct ubifs_info *c, } /** - * ubifs_lpt_lookup - lookup LEB properties in the LPT. + * ubifs_pnode_lookup - lookup a pnode in the LPT. * @c: UBIFS file-system description object - * @lnum: LEB number to lookup + * @i: pnode number (0 to (main_lebs - 1) / UBIFS_LPT_FANOUT) * - * This function returns a pointer to the LEB properties on success or a - * negative error code on failure. + * This function returns a pointer to the pnode on success or a negative + * error code on failure. */ -struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum) +struct ubifs_pnode *ubifs_pnode_lookup(struct ubifs_info *c, int i) { - int err, i, h, iip, shft; + int err, h, iip, shft; struct ubifs_nnode *nnode; - struct ubifs_pnode *pnode; if (!c->nroot) { err = ubifs_read_nnode(c, NULL, 0); if (err) return ERR_PTR(err); } + i <<= UBIFS_LPT_FANOUT_SHIFT; nnode = c->nroot; - i = lnum - c->main_first; shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT; for (h = 1; h < c->lpt_hght; h++) { iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); @@ -1468,7 +1486,24 @@ struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum) return ERR_CAST(nnode); } iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); - pnode = ubifs_get_pnode(c, nnode, iip); + return ubifs_get_pnode(c, nnode, iip); +} + +/** + * ubifs_lpt_lookup - lookup LEB properties in the LPT. + * @c: UBIFS file-system description object + * @lnum: LEB number to lookup + * + * This function returns a pointer to the LEB properties on success or a + * negative error code on failure. + */ +struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum) +{ + int i, iip; + struct ubifs_pnode *pnode; + + i = lnum - c->main_first; + pnode = ubifs_pnode_lookup(c, i >> UBIFS_LPT_FANOUT_SHIFT); if (IS_ERR(pnode)) return ERR_CAST(pnode); iip = (i & (UBIFS_LPT_FANOUT - 1)); @@ -1620,6 +1655,131 @@ struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum) } /** + * ubifs_lpt_calc_hash - Calculate hash of the LPT pnodes + * @c: UBIFS file-system description object + * @hash: the returned hash of the LPT pnodes + * + * This function iterates over the LPT pnodes and creates a hash over them. + * Returns 0 for success or a negative error code otherwise. + */ +int ubifs_lpt_calc_hash(struct ubifs_info *c, u8 *hash) +{ + struct ubifs_nnode *nnode, *nn; + struct ubifs_cnode *cnode; + struct shash_desc *desc; + int iip = 0, i; + int bufsiz = max_t(int, c->nnode_sz, c->pnode_sz); + void *buf; + int err; + + if (!ubifs_authenticated(c)) + return 0; + + desc = ubifs_hash_get_desc(c); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + buf = kmalloc(bufsiz, GFP_NOFS); + if (!buf) { + err = -ENOMEM; + goto out; + } + + if (!c->nroot) { + err = ubifs_read_nnode(c, NULL, 0); + if (err) + return err; + } + + cnode = (struct ubifs_cnode *)c->nroot; + + while (cnode) { + nnode = cnode->parent; + nn = (struct ubifs_nnode *)cnode; + if (cnode->level > 1) { + while (iip < UBIFS_LPT_FANOUT) { + if (nn->nbranch[iip].lnum == 0) { + /* Go right */ + iip++; + continue; + } + + nnode = ubifs_get_nnode(c, nn, iip); + if (IS_ERR(nnode)) { + err = PTR_ERR(nnode); + goto out; + } + + /* Go down */ + iip = 0; + cnode = (struct ubifs_cnode *)nnode; + break; + } + if (iip < UBIFS_LPT_FANOUT) + continue; + } else { + struct ubifs_pnode *pnode; + + for (i = 0; i < UBIFS_LPT_FANOUT; i++) { + if (nn->nbranch[i].lnum == 0) + continue; + pnode = ubifs_get_pnode(c, nn, i); + if (IS_ERR(pnode)) { + err = PTR_ERR(pnode); + goto out; + } + + ubifs_pack_pnode(c, buf, pnode); + err = ubifs_shash_update(c, desc, buf, + c->pnode_sz); + if (err) + goto out; + } + } + /* Go up and to the right */ + iip = cnode->iip + 1; + cnode = (struct ubifs_cnode *)nnode; + } + + err = ubifs_shash_final(c, desc, hash); +out: + kfree(desc); + kfree(buf); + + return err; +} + +/** + * lpt_check_hash - check the hash of the LPT. + * @c: UBIFS file-system description object + * + * This function calculates a hash over all pnodes in the LPT and compares it with + * the hash stored in the master node. Returns %0 on success and a negative error + * code on failure. + */ +static int lpt_check_hash(struct ubifs_info *c) +{ + int err; + u8 hash[UBIFS_HASH_ARR_SZ]; + + if (!ubifs_authenticated(c)) + return 0; + + err = ubifs_lpt_calc_hash(c, hash); + if (err) + return err; + + if (ubifs_check_hash(c, c->mst_node->hash_lpt, hash)) { + err = -EPERM; + ubifs_err(c, "Failed to authenticate LPT"); + } else { + err = 0; + } + + return err; +} + +/** * lpt_init_rd - initialize the LPT for reading. * @c: UBIFS file-system description object * @@ -1660,6 +1820,10 @@ static int lpt_init_rd(struct ubifs_info *c) if (err) return err; + err = lpt_check_hash(c); + if (err) + return err; + dbg_lp("space_bits %d", c->space_bits); dbg_lp("lpt_lnum_bits %d", c->lpt_lnum_bits); dbg_lp("lpt_offs_bits %d", c->lpt_offs_bits); diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index 7ce30994bbba..1f88caffdf2a 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -619,38 +619,6 @@ static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c, } /** - * pnode_lookup - lookup a pnode in the LPT. - * @c: UBIFS file-system description object - * @i: pnode number (0 to (main_lebs - 1) / UBIFS_LPT_FANOUT)) - * - * This function returns a pointer to the pnode on success or a negative - * error code on failure. - */ -static struct ubifs_pnode *pnode_lookup(struct ubifs_info *c, int i) -{ - int err, h, iip, shft; - struct ubifs_nnode *nnode; - - if (!c->nroot) { - err = ubifs_read_nnode(c, NULL, 0); - if (err) - return ERR_PTR(err); - } - i <<= UBIFS_LPT_FANOUT_SHIFT; - nnode = c->nroot; - shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT; - for (h = 1; h < c->lpt_hght; h++) { - iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); - shft -= UBIFS_LPT_FANOUT_SHIFT; - nnode = ubifs_get_nnode(c, nnode, iip); - if (IS_ERR(nnode)) - return ERR_CAST(nnode); - } - iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); - return ubifs_get_pnode(c, nnode, iip); -} - -/** * add_pnode_dirt - add dirty space to LPT LEB properties. * @c: UBIFS file-system description object * @pnode: pnode for which to add dirt @@ -702,7 +670,7 @@ static int make_tree_dirty(struct ubifs_info *c) { struct ubifs_pnode *pnode; - pnode = pnode_lookup(c, 0); + pnode = ubifs_pnode_lookup(c, 0); if (IS_ERR(pnode)) return PTR_ERR(pnode); @@ -956,7 +924,7 @@ static int make_pnode_dirty(struct ubifs_info *c, int node_num, int lnum, struct ubifs_pnode *pnode; struct ubifs_nbranch *branch; - pnode = pnode_lookup(c, node_num); + pnode = ubifs_pnode_lookup(c, node_num); if (IS_ERR(pnode)) return PTR_ERR(pnode); branch = &pnode->parent->nbranch[pnode->iip]; @@ -1279,6 +1247,10 @@ int ubifs_lpt_start_commit(struct ubifs_info *c) if (err) goto out; + err = ubifs_lpt_calc_hash(c, c->mst_node->hash_lpt); + if (err) + goto out; + /* Copy the LPT's own lprops for end commit to write */ memcpy(c->ltab_cmt, c->ltab, sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs); @@ -1558,7 +1530,7 @@ static int dbg_is_pnode_dirty(struct ubifs_info *c, int lnum, int offs) struct ubifs_nbranch *branch; cond_resched(); - pnode = pnode_lookup(c, i); + pnode = ubifs_pnode_lookup(c, i); if (IS_ERR(pnode)) return PTR_ERR(pnode); branch = &pnode->parent->nbranch[pnode->iip]; @@ -1710,7 +1682,7 @@ int dbg_check_ltab(struct ubifs_info *c) for (i = 0; i < cnt; i++) { struct ubifs_pnode *pnode; - pnode = pnode_lookup(c, i); + pnode = ubifs_pnode_lookup(c, i); if (IS_ERR(pnode)) return PTR_ERR(pnode); cond_resched(); diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c index 9df4a41bba52..5ea51bbd14c7 100644 --- a/fs/ubifs/master.c +++ b/fs/ubifs/master.c @@ -25,6 +25,42 @@ #include "ubifs.h" /** + * ubifs_compare_master_node - compare two UBIFS master nodes + * @c: UBIFS file-system description object + * @m1: the first node + * @m2: the second node + * + * This function compares two UBIFS master nodes. Returns 0 if they are equal + * and nonzero if not. + */ +int ubifs_compare_master_node(struct ubifs_info *c, void *m1, void *m2) +{ + int ret; + int behind; + int hmac_offs = offsetof(struct ubifs_mst_node, hmac); + + /* + * Do not compare the common node header since the sequence number and + * hence the CRC are different. + */ + ret = memcmp(m1 + UBIFS_CH_SZ, m2 + UBIFS_CH_SZ, + hmac_offs - UBIFS_CH_SZ); + if (ret) + return ret; + + /* + * Do not compare the embedded HMAC aswell which also must be different + * due to the different common node header. + */ + behind = hmac_offs + UBIFS_MAX_HMAC_LEN; + + if (UBIFS_MST_NODE_SZ > behind) + return memcmp(m1 + behind, m2 + behind, UBIFS_MST_NODE_SZ - behind); + + return 0; +} + +/** * scan_for_master - search the valid master node. * @c: UBIFS file-system description object * @@ -37,7 +73,7 @@ static int scan_for_master(struct ubifs_info *c) { struct ubifs_scan_leb *sleb; struct ubifs_scan_node *snod; - int lnum, offs = 0, nodes_cnt; + int lnum, offs = 0, nodes_cnt, err; lnum = UBIFS_MST_LNUM; @@ -69,12 +105,23 @@ static int scan_for_master(struct ubifs_info *c) goto out_dump; if (snod->offs != offs) goto out; - if (memcmp((void *)c->mst_node + UBIFS_CH_SZ, - (void *)snod->node + UBIFS_CH_SZ, - UBIFS_MST_NODE_SZ - UBIFS_CH_SZ)) + if (ubifs_compare_master_node(c, c->mst_node, snod->node)) goto out; + c->mst_offs = offs; ubifs_scan_destroy(sleb); + + if (!ubifs_authenticated(c)) + return 0; + + err = ubifs_node_verify_hmac(c, c->mst_node, + sizeof(struct ubifs_mst_node), + offsetof(struct ubifs_mst_node, hmac)); + if (err) { + ubifs_err(c, "Failed to verify master node HMAC"); + return -EPERM; + } + return 0; out: @@ -305,6 +352,8 @@ int ubifs_read_master(struct ubifs_info *c) c->lst.total_dead = le64_to_cpu(c->mst_node->total_dead); c->lst.total_dark = le64_to_cpu(c->mst_node->total_dark); + ubifs_copy_hash(c, c->mst_node->hash_root_idx, c->zroot.hash); + c->calc_idx_sz = c->bi.old_idx_sz; if (c->mst_node->flags & cpu_to_le32(UBIFS_MST_NO_ORPHS)) @@ -378,7 +427,9 @@ int ubifs_write_master(struct ubifs_info *c) c->mst_offs = offs; c->mst_node->highest_inum = cpu_to_le64(c->highest_inum); - err = ubifs_write_node(c, c->mst_node, len, lnum, offs); + ubifs_copy_hash(c, c->zroot.hash, c->mst_node->hash_root_idx); + err = ubifs_write_node_hmac(c, c->mst_node, len, lnum, offs, + offsetof(struct ubifs_mst_node, hmac)); if (err) return err; @@ -389,7 +440,8 @@ int ubifs_write_master(struct ubifs_info *c) if (err) return err; } - err = ubifs_write_node(c, c->mst_node, len, lnum, offs); + err = ubifs_write_node_hmac(c, c->mst_node, len, lnum, offs, + offsetof(struct ubifs_mst_node, hmac)); return err; } diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h index 21d35d7dd975..6f87237fdbf4 100644 --- a/fs/ubifs/misc.h +++ b/fs/ubifs/misc.h @@ -197,7 +197,8 @@ static inline int ubifs_return_leb(struct ubifs_info *c, int lnum) */ static inline int ubifs_idx_node_sz(const struct ubifs_info *c, int child_cnt) { - return UBIFS_IDX_NODE_SZ + (UBIFS_BRANCH_SZ + c->key_len) * child_cnt; + return UBIFS_IDX_NODE_SZ + (UBIFS_BRANCH_SZ + c->key_len + c->hash_len) + * child_cnt; } /** @@ -212,7 +213,7 @@ struct ubifs_branch *ubifs_idx_branch(const struct ubifs_info *c, int bnum) { return (struct ubifs_branch *)((void *)idx->branches + - (UBIFS_BRANCH_SZ + c->key_len) * bnum); + (UBIFS_BRANCH_SZ + c->key_len + c->hash_len) * bnum); } /** diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 984e30e83c0b..8526b7ec4707 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -212,7 +212,10 @@ static int write_rcvrd_mst_node(struct ubifs_info *c, save_flags = mst->flags; mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY); - ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1); + err = ubifs_prepare_node_hmac(c, mst, UBIFS_MST_NODE_SZ, + offsetof(struct ubifs_mst_node, hmac), 1); + if (err) + goto out; err = ubifs_leb_change(c, lnum, mst, sz); if (err) goto out; @@ -264,9 +267,7 @@ int ubifs_recover_master_node(struct ubifs_info *c) offs2 = (void *)mst2 - buf2; if (offs1 == offs2) { /* Same offset, so must be the same */ - if (memcmp((void *)mst1 + UBIFS_CH_SZ, - (void *)mst2 + UBIFS_CH_SZ, - UBIFS_MST_NODE_SZ - UBIFS_CH_SZ)) + if (ubifs_compare_master_node(c, mst1, mst2)) goto out_err; mst = mst1; } else if (offs2 + sz == offs1) { @@ -1462,15 +1463,81 @@ out: } /** + * inode_fix_size - fix inode size + * @c: UBIFS file-system description object + * @e: inode size information for recovery + */ +static int inode_fix_size(struct ubifs_info *c, struct size_entry *e) +{ + struct inode *inode; + struct ubifs_inode *ui; + int err; + + if (c->ro_mount) + ubifs_assert(c, !e->inode); + + if (e->inode) { + /* Remounting rw, pick up inode we stored earlier */ + inode = e->inode; + } else { + inode = ubifs_iget(c->vfs_sb, e->inum); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + if (inode->i_size >= e->d_size) { + /* + * The original inode in the index already has a size + * big enough, nothing to do + */ + iput(inode); + return 0; + } + + dbg_rcvry("ino %lu size %lld -> %lld", + (unsigned long)e->inum, + inode->i_size, e->d_size); + + ui = ubifs_inode(inode); + + inode->i_size = e->d_size; + ui->ui_size = e->d_size; + ui->synced_i_size = e->d_size; + + e->inode = inode; + } + + /* + * In readonly mode just keep the inode pinned in memory until we go + * readwrite. In readwrite mode write the inode to the journal with the + * fixed size. + */ + if (c->ro_mount) + return 0; + + err = ubifs_jnl_write_inode(c, inode); + + iput(inode); + + if (err) + return err; + + rb_erase(&e->rb, &c->size_tree); + kfree(e); + + return 0; +} + +/** * ubifs_recover_size - recover inode size. * @c: UBIFS file-system description object + * @in_place: If true, do a in-place size fixup * * This function attempts to fix inode size discrepancies identified by the * 'ubifs_recover_size_accum()' function. * * This functions returns %0 on success and a negative error code on failure. */ -int ubifs_recover_size(struct ubifs_info *c) +int ubifs_recover_size(struct ubifs_info *c, bool in_place) { struct rb_node *this = rb_first(&c->size_tree); @@ -1479,6 +1546,9 @@ int ubifs_recover_size(struct ubifs_info *c) int err; e = rb_entry(this, struct size_entry, rb); + + this = rb_next(this); + if (!e->exists) { union ubifs_key key; @@ -1502,40 +1572,26 @@ int ubifs_recover_size(struct ubifs_info *c) } if (e->exists && e->i_size < e->d_size) { - if (c->ro_mount) { - /* Fix the inode size and pin it in memory */ - struct inode *inode; - struct ubifs_inode *ui; - - ubifs_assert(c, !e->inode); - - inode = ubifs_iget(c->vfs_sb, e->inum); - if (IS_ERR(inode)) - return PTR_ERR(inode); - - ui = ubifs_inode(inode); - if (inode->i_size < e->d_size) { - dbg_rcvry("ino %lu size %lld -> %lld", - (unsigned long)e->inum, - inode->i_size, e->d_size); - inode->i_size = e->d_size; - ui->ui_size = e->d_size; - ui->synced_i_size = e->d_size; - e->inode = inode; - this = rb_next(this); - continue; - } - iput(inode); - } else { - /* Fix the size in place */ + ubifs_assert(c, !(c->ro_mount && in_place)); + + /* + * We found data that is outside the found inode size, + * fixup the inode size + */ + + if (in_place) { err = fix_size_in_place(c, e); if (err) return err; iput(e->inode); + } else { + err = inode_fix_size(c, e); + if (err) + return err; + continue; } } - this = rb_next(this); rb_erase(&e->rb, &c->size_tree); kfree(e); } diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 4844538eb926..75f961c4c044 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -34,6 +34,8 @@ #include "ubifs.h" #include <linux/list_sort.h> +#include <crypto/hash.h> +#include <crypto/algapi.h> /** * struct replay_entry - replay list entry. @@ -56,6 +58,7 @@ struct replay_entry { int lnum; int offs; int len; + u8 hash[UBIFS_HASH_ARR_SZ]; unsigned int deletion:1; unsigned long long sqnum; struct list_head list; @@ -228,7 +231,7 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r) err = ubifs_tnc_remove_nm(c, &r->key, &r->nm); else err = ubifs_tnc_add_nm(c, &r->key, r->lnum, r->offs, - r->len, &r->nm); + r->len, r->hash, &r->nm); } else { if (r->deletion) switch (key_type(c, &r->key)) { @@ -248,7 +251,7 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r) } else err = ubifs_tnc_add(c, &r->key, r->lnum, r->offs, - r->len); + r->len, r->hash); if (err) return err; @@ -352,9 +355,9 @@ static void destroy_replay_list(struct ubifs_info *c) * in case of success and a negative error code in case of failure. */ static int insert_node(struct ubifs_info *c, int lnum, int offs, int len, - union ubifs_key *key, unsigned long long sqnum, - int deletion, int *used, loff_t old_size, - loff_t new_size) + const u8 *hash, union ubifs_key *key, + unsigned long long sqnum, int deletion, int *used, + loff_t old_size, loff_t new_size) { struct replay_entry *r; @@ -372,6 +375,7 @@ static int insert_node(struct ubifs_info *c, int lnum, int offs, int len, r->lnum = lnum; r->offs = offs; r->len = len; + ubifs_copy_hash(c, hash, r->hash); r->deletion = !!deletion; r->sqnum = sqnum; key_copy(c, key, &r->key); @@ -400,8 +404,9 @@ static int insert_node(struct ubifs_info *c, int lnum, int offs, int len, * negative error code in case of failure. */ static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len, - union ubifs_key *key, const char *name, int nlen, - unsigned long long sqnum, int deletion, int *used) + const u8 *hash, union ubifs_key *key, + const char *name, int nlen, unsigned long long sqnum, + int deletion, int *used) { struct replay_entry *r; char *nbuf; @@ -425,6 +430,7 @@ static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len, r->lnum = lnum; r->offs = offs; r->len = len; + ubifs_copy_hash(c, hash, r->hash); r->deletion = !!deletion; r->sqnum = sqnum; key_copy(c, key, &r->key); @@ -528,6 +534,105 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud) } /** + * authenticate_sleb - authenticate one scan LEB + * @c: UBIFS file-system description object + * @sleb: the scan LEB to authenticate + * @log_hash: + * @is_last: if true, this is is the last LEB + * + * This function iterates over the buds of a single LEB authenticating all buds + * with the authentication nodes on this LEB. Authentication nodes are written + * after some buds and contain a HMAC covering the authentication node itself + * and the buds between the last authentication node and the current + * authentication node. It can happen that the last buds cannot be authenticated + * because a powercut happened when some nodes were written but not the + * corresponding authentication node. This function returns the number of nodes + * that could be authenticated or a negative error code. + */ +static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, + struct shash_desc *log_hash, int is_last) +{ + int n_not_auth = 0; + struct ubifs_scan_node *snod; + int n_nodes = 0; + int err; + u8 *hash, *hmac; + + if (!ubifs_authenticated(c)) + return sleb->nodes_cnt; + + hash = kmalloc(crypto_shash_descsize(c->hash_tfm), GFP_NOFS); + hmac = kmalloc(c->hmac_desc_len, GFP_NOFS); + if (!hash || !hmac) { + err = -ENOMEM; + goto out; + } + + list_for_each_entry(snod, &sleb->nodes, list) { + + n_nodes++; + + if (snod->type == UBIFS_AUTH_NODE) { + struct ubifs_auth_node *auth = snod->node; + SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm); + SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm); + + hash_desc->tfm = c->hash_tfm; + hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + ubifs_shash_copy_state(c, log_hash, hash_desc); + err = crypto_shash_final(hash_desc, hash); + if (err) + goto out; + + hmac_desc->tfm = c->hmac_tfm; + hmac_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + err = crypto_shash_digest(hmac_desc, hash, c->hash_len, + hmac); + if (err) + goto out; + + err = ubifs_check_hmac(c, auth->hmac, hmac); + if (err) { + err = -EPERM; + goto out; + } + n_not_auth = 0; + } else { + err = crypto_shash_update(log_hash, snod->node, + snod->len); + if (err) + goto out; + n_not_auth++; + } + } + + /* + * A powercut can happen when some nodes were written, but not yet + * the corresponding authentication node. This may only happen on + * the last bud though. + */ + if (n_not_auth) { + if (is_last) { + dbg_mnt("%d unauthenticated nodes found on LEB %d, Ignoring them", + n_not_auth, sleb->lnum); + err = 0; + } else { + dbg_mnt("%d unauthenticated nodes found on non-last LEB %d", + n_not_auth, sleb->lnum); + err = -EPERM; + } + } else { + err = 0; + } +out: + kfree(hash); + kfree(hmac); + + return err ? err : n_nodes - n_not_auth; +} + +/** * replay_bud - replay a bud logical eraseblock. * @c: UBIFS file-system description object * @b: bud entry which describes the bud @@ -540,6 +645,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) { int is_last = is_last_bud(c, b->bud); int err = 0, used = 0, lnum = b->bud->lnum, offs = b->bud->start; + int n_nodes, n = 0; struct ubifs_scan_leb *sleb; struct ubifs_scan_node *snod; @@ -559,6 +665,15 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) if (IS_ERR(sleb)) return PTR_ERR(sleb); + n_nodes = authenticate_sleb(c, sleb, b->bud->log_hash, is_last); + if (n_nodes < 0) { + err = n_nodes; + goto out; + } + + ubifs_shash_copy_state(c, b->bud->log_hash, + c->jheads[b->bud->jhead].log_hash); + /* * The bud does not have to start from offset zero - the beginning of * the 'lnum' LEB may contain previously committed data. One of the @@ -582,6 +697,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) */ list_for_each_entry(snod, &sleb->nodes, list) { + u8 hash[UBIFS_HASH_ARR_SZ]; int deletion = 0; cond_resched(); @@ -591,6 +707,8 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) goto out_dump; } + ubifs_node_calc_hash(c, snod->node, hash); + if (snod->sqnum > c->max_sqnum) c->max_sqnum = snod->sqnum; @@ -602,7 +720,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) if (le32_to_cpu(ino->nlink) == 0) deletion = 1; - err = insert_node(c, lnum, snod->offs, snod->len, + err = insert_node(c, lnum, snod->offs, snod->len, hash, &snod->key, snod->sqnum, deletion, &used, 0, new_size); break; @@ -614,7 +732,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) key_block(c, &snod->key) * UBIFS_BLOCK_SIZE; - err = insert_node(c, lnum, snod->offs, snod->len, + err = insert_node(c, lnum, snod->offs, snod->len, hash, &snod->key, snod->sqnum, deletion, &used, 0, new_size); break; @@ -628,7 +746,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) if (err) goto out_dump; - err = insert_dent(c, lnum, snod->offs, snod->len, + err = insert_dent(c, lnum, snod->offs, snod->len, hash, &snod->key, dent->name, le16_to_cpu(dent->nlen), snod->sqnum, !le64_to_cpu(dent->inum), &used); @@ -654,11 +772,13 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) * functions which expect nodes to have keys. */ trun_key_init(c, &key, le32_to_cpu(trun->inum)); - err = insert_node(c, lnum, snod->offs, snod->len, + err = insert_node(c, lnum, snod->offs, snod->len, hash, &key, snod->sqnum, 1, &used, old_size, new_size); break; } + case UBIFS_AUTH_NODE: + break; default: ubifs_err(c, "unexpected node type %d in bud LEB %d:%d", snod->type, lnum, snod->offs); @@ -667,6 +787,10 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) } if (err) goto out; + + n++; + if (n == n_nodes) + break; } ubifs_assert(c, ubifs_search_bud(c, lnum)); @@ -745,6 +869,7 @@ static int add_replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead, { struct ubifs_bud *bud; struct bud_entry *b; + int err; dbg_mnt("add replay bud LEB %d:%d, head %d", lnum, offs, jhead); @@ -754,13 +879,21 @@ static int add_replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead, b = kmalloc(sizeof(struct bud_entry), GFP_KERNEL); if (!b) { - kfree(bud); - return -ENOMEM; + err = -ENOMEM; + goto out; } bud->lnum = lnum; bud->start = offs; bud->jhead = jhead; + bud->log_hash = ubifs_hash_get_desc(c); + if (IS_ERR(bud->log_hash)) { + err = PTR_ERR(bud->log_hash); + goto out; + } + + ubifs_shash_copy_state(c, c->log_hash, bud->log_hash); + ubifs_add_bud(c, bud); b->bud = bud; @@ -768,6 +901,11 @@ static int add_replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead, list_add_tail(&b->list, &c->replay_buds); return 0; +out: + kfree(bud); + kfree(b); + + return err; } /** @@ -873,6 +1011,14 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) c->cs_sqnum = le64_to_cpu(node->ch.sqnum); dbg_mnt("commit start sqnum %llu", c->cs_sqnum); + + err = ubifs_shash_init(c, c->log_hash); + if (err) + goto out; + + err = ubifs_shash_update(c, c->log_hash, node, UBIFS_CS_NODE_SZ); + if (err < 0) + goto out; } if (snod->sqnum < c->cs_sqnum) { @@ -920,6 +1066,11 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) if (err) goto out_dump; + err = ubifs_shash_update(c, c->log_hash, ref, + UBIFS_REF_NODE_SZ); + if (err) + goto out; + err = add_replay_bud(c, le32_to_cpu(ref->lnum), le32_to_cpu(ref->offs), le32_to_cpu(ref->jhead), diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index bf17f58908ff..75a69dd26d6e 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -82,10 +82,13 @@ static int create_default_filesystem(struct ubifs_info *c) int err, tmp, jnl_lebs, log_lebs, max_buds, main_lebs, main_first; int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0; int min_leb_cnt = UBIFS_MIN_LEB_CNT; + int idx_node_size; long long tmp64, main_bytes; __le64 tmp_le64; __le32 tmp_le32; struct timespec64 ts; + u8 hash[UBIFS_HASH_ARR_SZ]; + u8 hash_lpt[UBIFS_HASH_ARR_SZ]; /* Some functions called from here depend on the @c->key_len filed */ c->key_len = UBIFS_SK_LEN; @@ -147,7 +150,7 @@ static int create_default_filesystem(struct ubifs_info *c) c->lsave_cnt = DEFAULT_LSAVE_CNT; c->max_leb_cnt = c->leb_cnt; err = ubifs_create_dflt_lpt(c, &main_lebs, lpt_first, &lpt_lebs, - &big_lpt); + &big_lpt, hash_lpt); if (err) return err; @@ -156,17 +159,35 @@ static int create_default_filesystem(struct ubifs_info *c) main_first = c->leb_cnt - main_lebs; + sup = kzalloc(ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size), GFP_KERNEL); + mst = kzalloc(c->mst_node_alsz, GFP_KERNEL); + idx_node_size = ubifs_idx_node_sz(c, 1); + idx = kzalloc(ALIGN(tmp, c->min_io_size), GFP_KERNEL); + ino = kzalloc(ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size), GFP_KERNEL); + cs = kzalloc(ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size), GFP_KERNEL); + + if (!sup || !mst || !idx || !ino || !cs) { + err = -ENOMEM; + goto out; + } + /* Create default superblock */ - tmp = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size); - sup = kzalloc(tmp, GFP_KERNEL); - if (!sup) - return -ENOMEM; tmp64 = (long long)max_buds * c->leb_size; if (big_lpt) sup_flags |= UBIFS_FLG_BIGLPT; sup_flags |= UBIFS_FLG_DOUBLE_HASH; + if (ubifs_authenticated(c)) { + sup_flags |= UBIFS_FLG_AUTHENTICATION; + sup->hash_algo = cpu_to_le16(c->auth_hash_algo); + err = ubifs_hmac_wkm(c, sup->hmac_wkm); + if (err) + goto out; + } else { + sup->hash_algo = 0xffff; + } + sup->ch.node_type = UBIFS_SB_NODE; sup->key_hash = UBIFS_KEY_HASH_R5; sup->flags = cpu_to_le32(sup_flags); @@ -197,17 +218,9 @@ static int create_default_filesystem(struct ubifs_info *c) sup->rp_size = cpu_to_le64(tmp64); sup->ro_compat_version = cpu_to_le32(UBIFS_RO_COMPAT_VERSION); - err = ubifs_write_node(c, sup, UBIFS_SB_NODE_SZ, 0, 0); - kfree(sup); - if (err) - return err; - dbg_gen("default superblock created at LEB 0:0"); /* Create default master node */ - mst = kzalloc(c->mst_node_alsz, GFP_KERNEL); - if (!mst) - return -ENOMEM; mst->ch.node_type = UBIFS_MST_NODE; mst->log_lnum = cpu_to_le32(UBIFS_LOG_LNUM); @@ -233,6 +246,7 @@ static int create_default_filesystem(struct ubifs_info *c) mst->empty_lebs = cpu_to_le32(main_lebs - 2); mst->idx_lebs = cpu_to_le32(1); mst->leb_cnt = cpu_to_le32(c->leb_cnt); + ubifs_copy_hash(c, hash_lpt, mst->hash_lpt); /* Calculate lprops statistics */ tmp64 = main_bytes; @@ -253,24 +267,9 @@ static int create_default_filesystem(struct ubifs_info *c) mst->total_used = cpu_to_le64(UBIFS_INO_NODE_SZ); - err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM, 0); - if (err) { - kfree(mst); - return err; - } - err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM + 1, - 0); - kfree(mst); - if (err) - return err; - dbg_gen("default master node created at LEB %d:0", UBIFS_MST_LNUM); /* Create the root indexing node */ - tmp = ubifs_idx_node_sz(c, 1); - idx = kzalloc(ALIGN(tmp, c->min_io_size), GFP_KERNEL); - if (!idx) - return -ENOMEM; c->key_fmt = UBIFS_SIMPLE_KEY_FMT; c->key_hash = key_r5_hash; @@ -282,19 +281,11 @@ static int create_default_filesystem(struct ubifs_info *c) key_write_idx(c, &key, &br->key); br->lnum = cpu_to_le32(main_first + DEFAULT_DATA_LEB); br->len = cpu_to_le32(UBIFS_INO_NODE_SZ); - err = ubifs_write_node(c, idx, tmp, main_first + DEFAULT_IDX_LEB, 0); - kfree(idx); - if (err) - return err; dbg_gen("default root indexing node created LEB %d:0", main_first + DEFAULT_IDX_LEB); /* Create default root inode */ - tmp = ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size); - ino = kzalloc(tmp, GFP_KERNEL); - if (!ino) - return -ENOMEM; ino_key_init_flash(c, &ino->key, UBIFS_ROOT_INO); ino->ch.node_type = UBIFS_INO_NODE; @@ -317,12 +308,6 @@ static int create_default_filesystem(struct ubifs_info *c) /* Set compression enabled by default */ ino->flags = cpu_to_le32(UBIFS_COMPR_FL); - err = ubifs_write_node(c, ino, UBIFS_INO_NODE_SZ, - main_first + DEFAULT_DATA_LEB, 0); - kfree(ino); - if (err) - return err; - dbg_gen("root inode created at LEB %d:0", main_first + DEFAULT_DATA_LEB); @@ -331,19 +316,54 @@ static int create_default_filesystem(struct ubifs_info *c) * always the case during normal file-system operation. Write a fake * commit start node to the log. */ - tmp = ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size); - cs = kzalloc(tmp, GFP_KERNEL); - if (!cs) - return -ENOMEM; cs->ch.node_type = UBIFS_CS_NODE; + + err = ubifs_write_node_hmac(c, sup, UBIFS_SB_NODE_SZ, 0, 0, + offsetof(struct ubifs_sb_node, hmac)); + if (err) + goto out; + + err = ubifs_write_node(c, ino, UBIFS_INO_NODE_SZ, + main_first + DEFAULT_DATA_LEB, 0); + if (err) + goto out; + + ubifs_node_calc_hash(c, ino, hash); + ubifs_copy_hash(c, hash, ubifs_branch_hash(c, br)); + + err = ubifs_write_node(c, idx, idx_node_size, main_first + DEFAULT_IDX_LEB, 0); + if (err) + goto out; + + ubifs_node_calc_hash(c, idx, hash); + ubifs_copy_hash(c, hash, mst->hash_root_idx); + + err = ubifs_write_node_hmac(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM, 0, + offsetof(struct ubifs_mst_node, hmac)); + if (err) + goto out; + + err = ubifs_write_node_hmac(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM + 1, + 0, offsetof(struct ubifs_mst_node, hmac)); + if (err) + goto out; + err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM, 0); - kfree(cs); if (err) - return err; + goto out; ubifs_msg(c, "default file-system created"); - return 0; + + err = 0; +out: + kfree(sup); + kfree(mst); + kfree(idx); + kfree(ino); + kfree(cs); + + return err; } /** @@ -498,7 +518,7 @@ failed: * code. Note, the user of this function is responsible of kfree()'ing the * returned superblock buffer. */ -struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c) +static struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c) { struct ubifs_sb_node *sup; int err; @@ -517,6 +537,65 @@ struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c) return sup; } +static int authenticate_sb_node(struct ubifs_info *c, + const struct ubifs_sb_node *sup) +{ + unsigned int sup_flags = le32_to_cpu(sup->flags); + u8 hmac_wkm[UBIFS_HMAC_ARR_SZ]; + int authenticated = !!(sup_flags & UBIFS_FLG_AUTHENTICATION); + int hash_algo; + int err; + + if (c->authenticated && !authenticated) { + ubifs_err(c, "authenticated FS forced, but found FS without authentication"); + return -EINVAL; + } + + if (!c->authenticated && authenticated) { + ubifs_err(c, "authenticated FS found, but no key given"); + return -EINVAL; + } + + ubifs_msg(c, "Mounting in %sauthenticated mode", + c->authenticated ? "" : "un"); + + if (!c->authenticated) + return 0; + + if (!IS_ENABLED(CONFIG_UBIFS_FS_AUTHENTICATION)) + return -EOPNOTSUPP; + + hash_algo = le16_to_cpu(sup->hash_algo); + if (hash_algo >= HASH_ALGO__LAST) { + ubifs_err(c, "superblock uses unknown hash algo %d", + hash_algo); + return -EINVAL; + } + + if (strcmp(hash_algo_name[hash_algo], c->auth_hash_name)) { + ubifs_err(c, "This filesystem uses %s for hashing," + " but %s is specified", hash_algo_name[hash_algo], + c->auth_hash_name); + return -EINVAL; + } + + err = ubifs_hmac_wkm(c, hmac_wkm); + if (err) + return err; + + if (ubifs_check_hmac(c, hmac_wkm, sup->hmac_wkm)) { + ubifs_err(c, "provided key does not fit"); + return -ENOKEY; + } + + err = ubifs_node_verify_hmac(c, sup, sizeof(*sup), + offsetof(struct ubifs_sb_node, hmac)); + if (err) + ubifs_err(c, "Failed to authenticate superblock: %d", err); + + return err; +} + /** * ubifs_write_sb_node - write superblock node. * @c: UBIFS file-system description object @@ -527,8 +606,13 @@ struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c) int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup) { int len = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size); + int err; + + err = ubifs_prepare_node_hmac(c, sup, UBIFS_SB_NODE_SZ, + offsetof(struct ubifs_sb_node, hmac), 1); + if (err) + return err; - ubifs_prepare_node(c, sup, UBIFS_SB_NODE_SZ, 1); return ubifs_leb_change(c, UBIFS_SB_LNUM, sup, len); } @@ -555,6 +639,8 @@ int ubifs_read_superblock(struct ubifs_info *c) if (IS_ERR(sup)) return PTR_ERR(sup); + c->sup_node = sup; + c->fmt_version = le32_to_cpu(sup->fmt_version); c->ro_compat_version = le32_to_cpu(sup->ro_compat_version); @@ -603,7 +689,7 @@ int ubifs_read_superblock(struct ubifs_info *c) c->key_hash = key_test_hash; c->key_hash_type = UBIFS_KEY_HASH_TEST; break; - }; + } c->key_fmt = sup->key_fmt; @@ -640,6 +726,10 @@ int ubifs_read_superblock(struct ubifs_info *c) c->double_hash = !!(sup_flags & UBIFS_FLG_DOUBLE_HASH); c->encrypted = !!(sup_flags & UBIFS_FLG_ENCRYPTION); + err = authenticate_sb_node(c, sup); + if (err) + goto out; + if ((sup_flags & ~UBIFS_FLG_MASK) != 0) { ubifs_err(c, "Unknown feature flags found: %#x", sup_flags & ~UBIFS_FLG_MASK); @@ -686,7 +776,6 @@ int ubifs_read_superblock(struct ubifs_info *c) err = validate_sb(c, sup); out: - kfree(sup); return err; } @@ -815,7 +904,7 @@ out: int ubifs_fixup_free_space(struct ubifs_info *c) { int err; - struct ubifs_sb_node *sup; + struct ubifs_sb_node *sup = c->sup_node; ubifs_assert(c, c->space_fixup); ubifs_assert(c, !c->ro_mount); @@ -826,16 +915,11 @@ int ubifs_fixup_free_space(struct ubifs_info *c) if (err) return err; - sup = ubifs_read_sb_node(c); - if (IS_ERR(sup)) - return PTR_ERR(sup); - /* Free-space fixup is no longer required */ c->space_fixup = 0; sup->flags &= cpu_to_le32(~UBIFS_FLG_SPACE_FIXUP); err = ubifs_write_sb_node(c, sup); - kfree(sup); if (err) return err; @@ -846,7 +930,7 @@ int ubifs_fixup_free_space(struct ubifs_info *c) int ubifs_enable_encryption(struct ubifs_info *c) { int err; - struct ubifs_sb_node *sup; + struct ubifs_sb_node *sup = c->sup_node; if (c->encrypted) return 0; @@ -859,16 +943,11 @@ int ubifs_enable_encryption(struct ubifs_info *c) return -EINVAL; } - sup = ubifs_read_sb_node(c); - if (IS_ERR(sup)) - return PTR_ERR(sup); - sup->flags |= cpu_to_le32(UBIFS_FLG_ENCRYPTION); err = ubifs_write_sb_node(c, sup); if (!err) c->encrypted = 1; - kfree(sup); return err; } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index fec62e9dfbe6..1fac1133dadd 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -579,6 +579,9 @@ static int init_constants_early(struct ubifs_info *c) c->ranges[UBIFS_REF_NODE].len = UBIFS_REF_NODE_SZ; c->ranges[UBIFS_TRUN_NODE].len = UBIFS_TRUN_NODE_SZ; c->ranges[UBIFS_CS_NODE].len = UBIFS_CS_NODE_SZ; + c->ranges[UBIFS_AUTH_NODE].min_len = UBIFS_AUTH_NODE_SZ; + c->ranges[UBIFS_AUTH_NODE].max_len = UBIFS_AUTH_NODE_SZ + + UBIFS_MAX_HMAC_LEN; c->ranges[UBIFS_INO_NODE].min_len = UBIFS_INO_NODE_SZ; c->ranges[UBIFS_INO_NODE].max_len = UBIFS_MAX_INO_NODE_SZ; @@ -816,6 +819,9 @@ static int alloc_wbufs(struct ubifs_info *c) c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback; c->jheads[i].wbuf.jhead = i; c->jheads[i].grouped = 1; + c->jheads[i].log_hash = ubifs_hash_get_desc(c); + if (IS_ERR(c->jheads[i].log_hash)) + goto out; } /* @@ -826,6 +832,12 @@ static int alloc_wbufs(struct ubifs_info *c) c->jheads[GCHD].grouped = 0; return 0; + +out: + while (i--) + kfree(c->jheads[i].log_hash); + + return err; } /** @@ -840,6 +852,7 @@ static void free_wbufs(struct ubifs_info *c) for (i = 0; i < c->jhead_cnt; i++) { kfree(c->jheads[i].wbuf.buf); kfree(c->jheads[i].wbuf.inodes); + kfree(c->jheads[i].log_hash); } kfree(c->jheads); c->jheads = NULL; @@ -924,6 +937,8 @@ static int check_volume_empty(struct ubifs_info *c) * Opt_no_chk_data_crc: do not check CRCs when reading data nodes * Opt_override_compr: override default compressor * Opt_assert: set ubifs_assert() action + * Opt_auth_key: The key name used for authentication + * Opt_auth_hash_name: The hash type used for authentication * Opt_err: just end of array marker */ enum { @@ -935,6 +950,8 @@ enum { Opt_no_chk_data_crc, Opt_override_compr, Opt_assert, + Opt_auth_key, + Opt_auth_hash_name, Opt_ignore, Opt_err, }; @@ -947,6 +964,8 @@ static const match_table_t tokens = { {Opt_chk_data_crc, "chk_data_crc"}, {Opt_no_chk_data_crc, "no_chk_data_crc"}, {Opt_override_compr, "compr=%s"}, + {Opt_auth_key, "auth_key=%s"}, + {Opt_auth_hash_name, "auth_hash_name=%s"}, {Opt_ignore, "ubi=%s"}, {Opt_ignore, "vol=%s"}, {Opt_assert, "assert=%s"}, @@ -1070,6 +1089,16 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, kfree(act); break; } + case Opt_auth_key: + c->auth_key_name = kstrdup(args[0].from, GFP_KERNEL); + if (!c->auth_key_name) + return -ENOMEM; + break; + case Opt_auth_hash_name: + c->auth_hash_name = kstrdup(args[0].from, GFP_KERNEL); + if (!c->auth_hash_name) + return -ENOMEM; + break; case Opt_ignore: break; default: @@ -1249,6 +1278,19 @@ static int mount_ubifs(struct ubifs_info *c) c->mounting = 1; + if (c->auth_key_name) { + if (IS_ENABLED(CONFIG_UBIFS_FS_AUTHENTICATION)) { + err = ubifs_init_authentication(c); + if (err) + goto out_free; + } else { + ubifs_err(c, "auth_key_name, but UBIFS is built without" + " authentication support"); + err = -EINVAL; + goto out_free; + } + } + err = ubifs_read_superblock(c); if (err) goto out_free; @@ -1367,12 +1409,21 @@ static int mount_ubifs(struct ubifs_info *c) } if (c->need_recovery) { - err = ubifs_recover_size(c); - if (err) - goto out_orphans; + if (!ubifs_authenticated(c)) { + err = ubifs_recover_size(c, true); + if (err) + goto out_orphans; + } + err = ubifs_rcvry_gc_commit(c); if (err) goto out_orphans; + + if (ubifs_authenticated(c)) { + err = ubifs_recover_size(c, false); + if (err) + goto out_orphans; + } } else { err = take_gc_lnum(c); if (err) @@ -1391,7 +1442,7 @@ static int mount_ubifs(struct ubifs_info *c) if (err) goto out_orphans; } else if (c->need_recovery) { - err = ubifs_recover_size(c); + err = ubifs_recover_size(c, false); if (err) goto out_orphans; } else { @@ -1557,7 +1608,10 @@ static void ubifs_umount(struct ubifs_info *c) free_wbufs(c); free_orphans(c); ubifs_lpt_free(c, 0); + ubifs_exit_authentication(c); + kfree(c->auth_key_name); + kfree(c->auth_hash_name); kfree(c->cbuf); kfree(c->rcvrd_mst_node); kfree(c->mst_node); @@ -1605,16 +1659,10 @@ static int ubifs_remount_rw(struct ubifs_info *c) goto out; if (c->old_leb_cnt != c->leb_cnt) { - struct ubifs_sb_node *sup; + struct ubifs_sb_node *sup = c->sup_node; - sup = ubifs_read_sb_node(c); - if (IS_ERR(sup)) { - err = PTR_ERR(sup); - goto out; - } sup->leb_cnt = cpu_to_le32(c->leb_cnt); err = ubifs_write_sb_node(c, sup); - kfree(sup); if (err) goto out; } @@ -1624,9 +1672,11 @@ static int ubifs_remount_rw(struct ubifs_info *c) err = ubifs_write_rcvrd_mst_node(c); if (err) goto out; - err = ubifs_recover_size(c); - if (err) - goto out; + if (!ubifs_authenticated(c)) { + err = ubifs_recover_size(c, true); + if (err) + goto out; + } err = ubifs_clean_lebs(c, c->sbuf); if (err) goto out; @@ -1692,10 +1742,19 @@ static int ubifs_remount_rw(struct ubifs_info *c) goto out; } - if (c->need_recovery) + if (c->need_recovery) { err = ubifs_rcvry_gc_commit(c); - else + if (err) + goto out; + + if (ubifs_authenticated(c)) { + err = ubifs_recover_size(c, false); + if (err) + goto out; + } + } else { err = ubifs_leb_unmap(c, c->gc_lnum); + } if (err) goto out; diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index bf416e512743..25572ffea163 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -35,7 +35,7 @@ #include "ubifs.h" static int try_read_node(const struct ubifs_info *c, void *buf, int type, - int len, int lnum, int offs); + struct ubifs_zbranch *zbr); static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, struct ubifs_zbranch *zbr, void *node); @@ -433,9 +433,7 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr, * @c: UBIFS file-system description object * @buf: buffer to read to * @type: node type - * @len: node length (not aligned) - * @lnum: LEB number of node to read - * @offs: offset of node to read + * @zbr: the zbranch describing the node to read * * This function tries to read a node of known type and length, checks it and * stores it in @buf. This function returns %1 if a node is present and %0 if @@ -453,8 +451,11 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr, * journal nodes may potentially be corrupted, so checking is required. */ static int try_read_node(const struct ubifs_info *c, void *buf, int type, - int len, int lnum, int offs) + struct ubifs_zbranch *zbr) { + int len = zbr->len; + int lnum = zbr->lnum; + int offs = zbr->offs; int err, node_len; struct ubifs_ch *ch = buf; uint32_t crc, node_crc; @@ -487,6 +488,12 @@ static int try_read_node(const struct ubifs_info *c, void *buf, int type, if (crc != node_crc) return 0; + err = ubifs_node_check_hash(c, buf, zbr->hash); + if (err) { + ubifs_bad_hash(c, buf, zbr->hash, lnum, offs); + return 0; + } + return 1; } @@ -507,8 +514,7 @@ static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, dbg_tnck(key, "LEB %d:%d, key ", zbr->lnum, zbr->offs); - ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum, - zbr->offs); + ret = try_read_node(c, node, key_type(c, key), zbr); if (ret == 1) { union ubifs_key node_key; struct ubifs_dent_node *dent = node; @@ -1713,6 +1719,12 @@ static int validate_data_node(struct ubifs_info *c, void *buf, goto out; } + err = ubifs_node_check_hash(c, buf, zbr->hash); + if (err) { + ubifs_bad_hash(c, buf, zbr->hash, zbr->lnum, zbr->offs); + return err; + } + len = le32_to_cpu(ch->len); if (len != zbr->len) { ubifs_err(c, "bad node length %d, expected %d", len, zbr->len); @@ -2260,13 +2272,14 @@ do_split: * @lnum: LEB number of node * @offs: node offset * @len: node length + * @hash: The hash over the node * * This function adds a node with key @key to TNC. The node may be new or it may * obsolete some existing one. Returns %0 on success or negative error code on * failure. */ int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum, - int offs, int len) + int offs, int len, const u8 *hash) { int found, n, err = 0; struct ubifs_znode *znode; @@ -2281,6 +2294,7 @@ int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum, zbr.lnum = lnum; zbr.offs = offs; zbr.len = len; + ubifs_copy_hash(c, hash, zbr.hash); key_copy(c, key, &zbr.key); err = tnc_insert(c, znode, &zbr, n + 1); } else if (found == 1) { @@ -2291,6 +2305,7 @@ int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum, zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; + ubifs_copy_hash(c, hash, zbr->hash); } else err = found; if (!err) @@ -2392,13 +2407,14 @@ out_unlock: * @lnum: LEB number of node * @offs: node offset * @len: node length + * @hash: The hash over the node * @nm: node name * * This is the same as 'ubifs_tnc_add()' but it should be used with keys which * may have collisions, like directory entry keys. */ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key, - int lnum, int offs, int len, + int lnum, int offs, int len, const u8 *hash, const struct fscrypt_name *nm) { int found, n, err = 0; @@ -2441,6 +2457,7 @@ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key, zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; + ubifs_copy_hash(c, hash, zbr->hash); goto out_unlock; } } @@ -2452,6 +2469,7 @@ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key, zbr.lnum = lnum; zbr.offs = offs; zbr.len = len; + ubifs_copy_hash(c, hash, zbr.hash); key_copy(c, key, &zbr.key); err = tnc_insert(c, znode, &zbr, n + 1); if (err) diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index dba87d09b989..dbcd2c350b65 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -38,6 +38,7 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, struct ubifs_znode *znode, int lnum, int offs, int len) { struct ubifs_znode *zp; + u8 hash[UBIFS_HASH_ARR_SZ]; int i, err; /* Make index node */ @@ -52,6 +53,7 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, br->lnum = cpu_to_le32(zbr->lnum); br->offs = cpu_to_le32(zbr->offs); br->len = cpu_to_le32(zbr->len); + ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br)); if (!zbr->lnum || !zbr->len) { ubifs_err(c, "bad ref in znode"); ubifs_dump_znode(c, znode); @@ -62,6 +64,7 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, } } ubifs_prepare_node(c, idx, len, 0); + ubifs_node_calc_hash(c, idx, hash); znode->lnum = lnum; znode->offs = offs; @@ -78,10 +81,12 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, zbr->lnum = lnum; zbr->offs = offs; zbr->len = len; + ubifs_copy_hash(c, hash, zbr->hash); } else { c->zroot.lnum = lnum; c->zroot.offs = offs; c->zroot.len = len; + ubifs_copy_hash(c, hash, c->zroot.hash); } c->calc_idx_sz += ALIGN(len, 8); @@ -647,6 +652,8 @@ static int get_znodes_to_commit(struct ubifs_info *c) znode->cnext = c->cnext; break; } + znode->cparent = znode->parent; + znode->ciip = znode->iip; znode->cnext = cnext; znode = cnext; cnt += 1; @@ -840,6 +847,8 @@ static int write_index(struct ubifs_info *c) } while (1) { + u8 hash[UBIFS_HASH_ARR_SZ]; + cond_resched(); znode = cnext; @@ -857,6 +866,7 @@ static int write_index(struct ubifs_info *c) br->lnum = cpu_to_le32(zbr->lnum); br->offs = cpu_to_le32(zbr->offs); br->len = cpu_to_le32(zbr->len); + ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br)); if (!zbr->lnum || !zbr->len) { ubifs_err(c, "bad ref in znode"); ubifs_dump_znode(c, znode); @@ -868,6 +878,23 @@ static int write_index(struct ubifs_info *c) } len = ubifs_idx_node_sz(c, znode->child_cnt); ubifs_prepare_node(c, idx, len, 0); + ubifs_node_calc_hash(c, idx, hash); + + mutex_lock(&c->tnc_mutex); + + if (znode->cparent) + ubifs_copy_hash(c, hash, + znode->cparent->zbranch[znode->ciip].hash); + + if (znode->parent) { + if (!ubifs_zn_obsolete(znode)) + ubifs_copy_hash(c, hash, + znode->parent->zbranch[znode->iip].hash); + } else { + ubifs_copy_hash(c, hash, c->zroot.hash); + } + + mutex_unlock(&c->tnc_mutex); /* Determine the index node position */ if (lnum == -1) { diff --git a/fs/ubifs/tnc_misc.c b/fs/ubifs/tnc_misc.c index d90ee01076a9..d1815e959007 100644 --- a/fs/ubifs/tnc_misc.c +++ b/fs/ubifs/tnc_misc.c @@ -265,9 +265,7 @@ long ubifs_destroy_tnc_subtree(const struct ubifs_info *c, /** * read_znode - read an indexing node from flash and fill znode. * @c: UBIFS file-system description object - * @lnum: LEB of the indexing node to read - * @offs: node offset - * @len: node length + * @zzbr: the zbranch describing the node to read * @znode: znode to read to * * This function reads an indexing node from the flash media and fills znode @@ -276,9 +274,12 @@ long ubifs_destroy_tnc_subtree(const struct ubifs_info *c, * is wrong with it, this function prints complaint messages and returns * %-EINVAL. */ -static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, +static int read_znode(struct ubifs_info *c, struct ubifs_zbranch *zzbr, struct ubifs_znode *znode) { + int lnum = zzbr->lnum; + int offs = zzbr->offs; + int len = zzbr->len; int i, err, type, cmp; struct ubifs_idx_node *idx; @@ -292,6 +293,12 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, return err; } + err = ubifs_node_check_hash(c, idx, zzbr->hash); + if (err) { + ubifs_bad_hash(c, idx, zzbr->hash, lnum, offs); + return err; + } + znode->child_cnt = le16_to_cpu(idx->child_cnt); znode->level = le16_to_cpu(idx->level); @@ -308,13 +315,14 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len, } for (i = 0; i < znode->child_cnt; i++) { - const struct ubifs_branch *br = ubifs_idx_branch(c, idx, i); + struct ubifs_branch *br = ubifs_idx_branch(c, idx, i); struct ubifs_zbranch *zbr = &znode->zbranch[i]; key_read(c, &br->key, &zbr->key); zbr->lnum = le32_to_cpu(br->lnum); zbr->offs = le32_to_cpu(br->offs); zbr->len = le32_to_cpu(br->len); + ubifs_copy_hash(c, ubifs_branch_hash(c, br), zbr->hash); zbr->znode = NULL; /* Validate branch */ @@ -425,7 +433,7 @@ struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c, if (!znode) return ERR_PTR(-ENOMEM); - err = read_znode(c, zbr->lnum, zbr->offs, zbr->len, znode); + err = read_znode(c, zbr, znode); if (err) goto out; @@ -496,5 +504,11 @@ int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr, return -EINVAL; } + err = ubifs_node_check_hash(c, node, zbr->hash); + if (err) { + ubifs_bad_hash(c, node, zbr->hash, zbr->lnum, zbr->offs); + return err; + } + return 0; } diff --git a/fs/ubifs/ubifs-media.h b/fs/ubifs/ubifs-media.h index e8c23c9d4f4a..8b7c1844014f 100644 --- a/fs/ubifs/ubifs-media.h +++ b/fs/ubifs/ubifs-media.h @@ -286,6 +286,7 @@ enum { #define UBIFS_IDX_NODE_SZ sizeof(struct ubifs_idx_node) #define UBIFS_CS_NODE_SZ sizeof(struct ubifs_cs_node) #define UBIFS_ORPH_NODE_SZ sizeof(struct ubifs_orph_node) +#define UBIFS_AUTH_NODE_SZ sizeof(struct ubifs_auth_node) /* Extended attribute entry nodes are identical to directory entry nodes */ #define UBIFS_XENT_NODE_SZ UBIFS_DENT_NODE_SZ /* Only this does not have to be multiple of 8 bytes */ @@ -300,6 +301,12 @@ enum { /* The largest UBIFS node */ #define UBIFS_MAX_NODE_SZ UBIFS_MAX_INO_NODE_SZ +/* The maxmimum size of a hash, enough for sha512 */ +#define UBIFS_MAX_HASH_LEN 64 + +/* The maxmimum size of a hmac, enough for hmac(sha512) */ +#define UBIFS_MAX_HMAC_LEN 64 + /* * xattr name of UBIFS encryption context, we don't use a prefix * nor a long name to not waste space on the flash. @@ -365,6 +372,7 @@ enum { * UBIFS_IDX_NODE: index node * UBIFS_CS_NODE: commit start node * UBIFS_ORPH_NODE: orphan node + * UBIFS_AUTH_NODE: authentication node * UBIFS_NODE_TYPES_CNT: count of supported node types * * Note, we index arrays by these numbers, so keep them low and contiguous. @@ -384,6 +392,7 @@ enum { UBIFS_IDX_NODE, UBIFS_CS_NODE, UBIFS_ORPH_NODE, + UBIFS_AUTH_NODE, UBIFS_NODE_TYPES_CNT, }; @@ -421,15 +430,19 @@ enum { * UBIFS_FLG_DOUBLE_HASH: store a 32bit cookie in directory entry nodes to * support 64bit cookies for lookups by hash * UBIFS_FLG_ENCRYPTION: this filesystem contains encrypted files + * UBIFS_FLG_AUTHENTICATION: this filesystem contains hashes for authentication */ enum { UBIFS_FLG_BIGLPT = 0x02, UBIFS_FLG_SPACE_FIXUP = 0x04, UBIFS_FLG_DOUBLE_HASH = 0x08, UBIFS_FLG_ENCRYPTION = 0x10, + UBIFS_FLG_AUTHENTICATION = 0x20, }; -#define UBIFS_FLG_MASK (UBIFS_FLG_BIGLPT|UBIFS_FLG_SPACE_FIXUP|UBIFS_FLG_DOUBLE_HASH|UBIFS_FLG_ENCRYPTION) +#define UBIFS_FLG_MASK (UBIFS_FLG_BIGLPT | UBIFS_FLG_SPACE_FIXUP | \ + UBIFS_FLG_DOUBLE_HASH | UBIFS_FLG_ENCRYPTION | \ + UBIFS_FLG_AUTHENTICATION) /** * struct ubifs_ch - common header node. @@ -633,6 +646,10 @@ struct ubifs_pad_node { * @time_gran: time granularity in nanoseconds * @uuid: UUID generated when the file system image was created * @ro_compat_version: UBIFS R/O compatibility version + * @hmac: HMAC to authenticate the superblock node + * @hmac_wkm: HMAC of a well known message (the string "UBIFS") as a convenience + * to the user to check if the correct key is passed. + * @hash_algo: The hash algo used for this filesystem (one of enum hash_algo) */ struct ubifs_sb_node { struct ubifs_ch ch; @@ -660,7 +677,10 @@ struct ubifs_sb_node { __le32 time_gran; __u8 uuid[16]; __le32 ro_compat_version; - __u8 padding2[3968]; + __u8 hmac[UBIFS_MAX_HMAC_LEN]; + __u8 hmac_wkm[UBIFS_MAX_HMAC_LEN]; + __le16 hash_algo; + __u8 padding2[3838]; } __packed; /** @@ -695,6 +715,9 @@ struct ubifs_sb_node { * @empty_lebs: number of empty logical eraseblocks * @idx_lebs: number of indexing logical eraseblocks * @leb_cnt: count of LEBs used by file-system + * @hash_root_idx: the hash of the root index node + * @hash_lpt: the hash of the LPT + * @hmac: HMAC to authenticate the master node * @padding: reserved for future, zeroes */ struct ubifs_mst_node { @@ -727,7 +750,10 @@ struct ubifs_mst_node { __le32 empty_lebs; __le32 idx_lebs; __le32 leb_cnt; - __u8 padding[344]; + __u8 hash_root_idx[UBIFS_MAX_HASH_LEN]; + __u8 hash_lpt[UBIFS_MAX_HASH_LEN]; + __u8 hmac[UBIFS_MAX_HMAC_LEN]; + __u8 padding[152]; } __packed; /** @@ -747,11 +773,25 @@ struct ubifs_ref_node { } __packed; /** + * struct ubifs_auth_node - node for authenticating other nodes + * @ch: common header + * @hmac: The HMAC + */ +struct ubifs_auth_node { + struct ubifs_ch ch; + __u8 hmac[]; +} __packed; + +/** * struct ubifs_branch - key/reference/length branch * @lnum: LEB number of the target node * @offs: offset within @lnum * @len: target node length * @key: key + * + * In an authenticated UBIFS we have the hash of the referenced node after @key. + * This can't be added to the struct type definition because @key is a + * dynamically sized element already. */ struct ubifs_branch { __le32 lnum; diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 4368cde476b0..38401adaa00d 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -39,6 +39,9 @@ #include <linux/security.h> #include <linux/xattr.h> #include <linux/random.h> +#include <crypto/hash_info.h> +#include <crypto/hash.h> +#include <crypto/algapi.h> #define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_UBIFS_FS_ENCRYPTION) #include <linux/fscrypt.h> @@ -157,6 +160,14 @@ /* Maximum number of data nodes to bulk-read */ #define UBIFS_MAX_BULK_READ 32 +#ifdef CONFIG_UBIFS_FS_AUTHENTICATION +#define UBIFS_HASH_ARR_SZ UBIFS_MAX_HASH_LEN +#define UBIFS_HMAC_ARR_SZ UBIFS_MAX_HMAC_LEN +#else +#define UBIFS_HASH_ARR_SZ 0 +#define UBIFS_HMAC_ARR_SZ 0 +#endif + /* * Lockdep classes for UBIFS inode @ui_mutex. */ @@ -706,6 +717,7 @@ struct ubifs_wbuf { * @jhead: journal head number this bud belongs to * @list: link in the list buds belonging to the same journal head * @rb: link in the tree of all buds + * @log_hash: the log hash from the commit start node up to this bud */ struct ubifs_bud { int lnum; @@ -713,6 +725,7 @@ struct ubifs_bud { int jhead; struct list_head list; struct rb_node rb; + struct shash_desc *log_hash; }; /** @@ -720,6 +733,7 @@ struct ubifs_bud { * @wbuf: head's write-buffer * @buds_list: list of bud LEBs belonging to this journal head * @grouped: non-zero if UBIFS groups nodes when writing to this journal head + * @log_hash: the log hash from the commit start node up to this journal head * * Note, the @buds list is protected by the @c->buds_lock. */ @@ -727,6 +741,7 @@ struct ubifs_jhead { struct ubifs_wbuf wbuf; struct list_head buds_list; unsigned int grouped:1; + struct shash_desc *log_hash; }; /** @@ -736,6 +751,7 @@ struct ubifs_jhead { * @lnum: LEB number of the target node (indexing node or data node) * @offs: target node offset within @lnum * @len: target node length + * @hash: the hash of the target node */ struct ubifs_zbranch { union ubifs_key key; @@ -746,12 +762,15 @@ struct ubifs_zbranch { int lnum; int offs; int len; + u8 hash[UBIFS_HASH_ARR_SZ]; }; /** * struct ubifs_znode - in-memory representation of an indexing node. * @parent: parent znode or NULL if it is the root * @cnext: next znode to commit + * @cparent: parent node for this commit + * @ciip: index in cparent's zbranch array * @flags: znode flags (%DIRTY_ZNODE, %COW_ZNODE or %OBSOLETE_ZNODE) * @time: last access time (seconds) * @level: level of the entry in the TNC tree @@ -769,6 +788,8 @@ struct ubifs_zbranch { struct ubifs_znode { struct ubifs_znode *parent; struct ubifs_znode *cnext; + struct ubifs_znode *cparent; + int ciip; unsigned long flags; time64_t time; int level; @@ -983,6 +1004,7 @@ struct ubifs_debug_info; * struct ubifs_info - UBIFS file-system description data structure * (per-superblock). * @vfs_sb: VFS @struct super_block object + * @sup_node: The super block node as read from the device * * @highest_inum: highest used inode number * @max_sqnum: current global sequence number @@ -1028,6 +1050,7 @@ struct ubifs_debug_info; * @default_compr: default compression algorithm (%UBIFS_COMPR_LZO, etc) * @rw_incompat: the media is not R/W compatible * @assert_action: action to take when a ubifs_assert() fails + * @authenticated: flag indigating the FS is mounted in authenticated mode * * @tnc_mutex: protects the Tree Node Cache (TNC), @zroot, @cnext, @enext, and * @calc_idx_sz @@ -1075,6 +1098,7 @@ struct ubifs_debug_info; * @key_hash: direntry key hash function * @key_fmt: key format * @key_len: key length + * @hash_len: The length of the index node hashes * @fanout: fanout of the index tree (number of links per indexing node) * * @min_io_size: minimal input/output unit size @@ -1210,6 +1234,15 @@ struct ubifs_debug_info; * @rp_uid: reserved pool user ID * @rp_gid: reserved pool group ID * + * @hash_tfm: the hash transformation used for hashing nodes + * @hmac_tfm: the HMAC transformation for this filesystem + * @hmac_desc_len: length of the HMAC used for authentication + * @auth_key_name: the authentication key name + * @auth_hash_name: the name of the hash algorithm used for authentication + * @auth_hash_algo: the authentication hash used for this fs + * @log_hash: the log hash from the commit start node up to the latest reference + * node. + * * @empty: %1 if the UBI device is empty * @need_recovery: %1 if the file-system needs recovery * @replaying: %1 during journal replay @@ -1230,6 +1263,7 @@ struct ubifs_debug_info; */ struct ubifs_info { struct super_block *vfs_sb; + struct ubifs_sb_node *sup_node; ino_t highest_inum; unsigned long long max_sqnum; @@ -1270,6 +1304,7 @@ struct ubifs_info { unsigned int default_compr:2; unsigned int rw_incompat:1; unsigned int assert_action:2; + unsigned int authenticated:1; struct mutex tnc_mutex; struct ubifs_zbranch zroot; @@ -1314,6 +1349,7 @@ struct ubifs_info { uint32_t (*key_hash)(const char *str, int len); int key_fmt; int key_len; + int hash_len; int fanout; int min_io_size; @@ -1441,6 +1477,15 @@ struct ubifs_info { kuid_t rp_uid; kgid_t rp_gid; + struct crypto_shash *hash_tfm; + struct crypto_shash *hmac_tfm; + int hmac_desc_len; + char *auth_key_name; + char *auth_hash_name; + enum hash_algo auth_hash_algo; + + struct shash_desc *log_hash; + /* The below fields are used only during mounting and re-mounting */ unsigned int empty:1; unsigned int need_recovery:1; @@ -1471,6 +1516,195 @@ extern const struct inode_operations ubifs_dir_inode_operations; extern const struct inode_operations ubifs_symlink_inode_operations; extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; +/* auth.c */ +static inline int ubifs_authenticated(const struct ubifs_info *c) +{ + return (IS_ENABLED(CONFIG_UBIFS_FS_AUTHENTICATION)) && c->authenticated; +} + +struct shash_desc *__ubifs_hash_get_desc(const struct ubifs_info *c); +static inline struct shash_desc *ubifs_hash_get_desc(const struct ubifs_info *c) +{ + return ubifs_authenticated(c) ? __ubifs_hash_get_desc(c) : NULL; +} + +static inline int ubifs_shash_init(const struct ubifs_info *c, + struct shash_desc *desc) +{ + if (ubifs_authenticated(c)) + return crypto_shash_init(desc); + else + return 0; +} + +static inline int ubifs_shash_update(const struct ubifs_info *c, + struct shash_desc *desc, const void *buf, + unsigned int len) +{ + int err = 0; + + if (ubifs_authenticated(c)) { + err = crypto_shash_update(desc, buf, len); + if (err < 0) + return err; + } + + return 0; +} + +static inline int ubifs_shash_final(const struct ubifs_info *c, + struct shash_desc *desc, u8 *out) +{ + return ubifs_authenticated(c) ? crypto_shash_final(desc, out) : 0; +} + +int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *buf, + u8 *hash); +static inline int ubifs_node_calc_hash(const struct ubifs_info *c, + const void *buf, u8 *hash) +{ + if (ubifs_authenticated(c)) + return __ubifs_node_calc_hash(c, buf, hash); + else + return 0; +} + +int ubifs_prepare_auth_node(struct ubifs_info *c, void *node, + struct shash_desc *inhash); + +/** + * ubifs_check_hash - compare two hashes + * @c: UBIFS file-system description object + * @expected: first hash + * @got: second hash + * + * Compare two hashes @expected and @got. Returns 0 when they are equal, a + * negative error code otherwise. + */ +static inline int ubifs_check_hash(const struct ubifs_info *c, + const u8 *expected, const u8 *got) +{ + return crypto_memneq(expected, got, c->hash_len); +} + +/** + * ubifs_check_hmac - compare two HMACs + * @c: UBIFS file-system description object + * @expected: first HMAC + * @got: second HMAC + * + * Compare two hashes @expected and @got. Returns 0 when they are equal, a + * negative error code otherwise. + */ +static inline int ubifs_check_hmac(const struct ubifs_info *c, + const u8 *expected, const u8 *got) +{ + return crypto_memneq(expected, got, c->hmac_desc_len); +} + +void ubifs_bad_hash(const struct ubifs_info *c, const void *node, + const u8 *hash, int lnum, int offs); + +int __ubifs_node_check_hash(const struct ubifs_info *c, const void *buf, + const u8 *expected); +static inline int ubifs_node_check_hash(const struct ubifs_info *c, + const void *buf, const u8 *expected) +{ + if (ubifs_authenticated(c)) + return __ubifs_node_check_hash(c, buf, expected); + else + return 0; +} + +int ubifs_init_authentication(struct ubifs_info *c); +void __ubifs_exit_authentication(struct ubifs_info *c); +static inline void ubifs_exit_authentication(struct ubifs_info *c) +{ + if (ubifs_authenticated(c)) + __ubifs_exit_authentication(c); +} + +/** + * ubifs_branch_hash - returns a pointer to the hash of a branch + * @c: UBIFS file-system description object + * @br: branch to get the hash from + * + * This returns a pointer to the hash of a branch. Since the key already is a + * dynamically sized object we cannot use a struct member here. + */ +static inline u8 *ubifs_branch_hash(struct ubifs_info *c, + struct ubifs_branch *br) +{ + return (void *)br + sizeof(*br) + c->key_len; +} + +/** + * ubifs_copy_hash - copy a hash + * @c: UBIFS file-system description object + * @from: source hash + * @to: destination hash + * + * With authentication this copies a hash, otherwise does nothing. + */ +static inline void ubifs_copy_hash(const struct ubifs_info *c, const u8 *from, + u8 *to) +{ + if (ubifs_authenticated(c)) + memcpy(to, from, c->hash_len); +} + +int __ubifs_node_insert_hmac(const struct ubifs_info *c, void *buf, + int len, int ofs_hmac); +static inline int ubifs_node_insert_hmac(const struct ubifs_info *c, void *buf, + int len, int ofs_hmac) +{ + if (ubifs_authenticated(c)) + return __ubifs_node_insert_hmac(c, buf, len, ofs_hmac); + else + return 0; +} + +int __ubifs_node_verify_hmac(const struct ubifs_info *c, const void *buf, + int len, int ofs_hmac); +static inline int ubifs_node_verify_hmac(const struct ubifs_info *c, + const void *buf, int len, int ofs_hmac) +{ + if (ubifs_authenticated(c)) + return __ubifs_node_verify_hmac(c, buf, len, ofs_hmac); + else + return 0; +} + +/** + * ubifs_auth_node_sz - returns the size of an authentication node + * @c: UBIFS file-system description object + * + * This function returns the size of an authentication node which can + * be 0 for unauthenticated filesystems or the real size of an auth node + * authentication is enabled. + */ +static inline int ubifs_auth_node_sz(const struct ubifs_info *c) +{ + if (ubifs_authenticated(c)) + return sizeof(struct ubifs_auth_node) + c->hmac_desc_len; + else + return 0; +} + +int ubifs_hmac_wkm(struct ubifs_info *c, u8 *hmac); + +int __ubifs_shash_copy_state(const struct ubifs_info *c, struct shash_desc *src, + struct shash_desc *target); +static inline int ubifs_shash_copy_state(const struct ubifs_info *c, + struct shash_desc *src, + struct shash_desc *target) +{ + if (ubifs_authenticated(c)) + return __ubifs_shash_copy_state(c, src, target); + else + return 0; +} + /* io.c */ void ubifs_ro_mode(struct ubifs_info *c, int err); int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, @@ -1490,9 +1724,15 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, int lnum, int offs); int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum, int offs); +int ubifs_write_node_hmac(struct ubifs_info *c, void *buf, int len, int lnum, + int offs, int hmac_offs); int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, int offs, int quiet, int must_chk_crc); +void ubifs_init_node(struct ubifs_info *c, void *buf, int len, int pad); +void ubifs_crc_node(struct ubifs_info *c, void *buf, int len); void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad); +int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len, + int hmac_offs, int pad); void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last); int ubifs_io_init(struct ubifs_info *c); void ubifs_pad(const struct ubifs_info *c, void *buf, int pad); @@ -1592,11 +1832,12 @@ int ubifs_tnc_lookup_dh(struct ubifs_info *c, const union ubifs_key *key, int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key, void *node, int *lnum, int *offs); int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum, - int offs, int len); + int offs, int len, const u8 *hash); int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key, int old_lnum, int old_offs, int lnum, int offs, int len); int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key, - int lnum, int offs, int len, const struct fscrypt_name *nm); + int lnum, int offs, int len, const u8 *hash, + const struct fscrypt_name *nm); int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key); int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key, const struct fscrypt_name *nm); @@ -1659,12 +1900,12 @@ int ubifs_gc_should_commit(struct ubifs_info *c); void ubifs_wait_for_commit(struct ubifs_info *c); /* master.c */ +int ubifs_compare_master_node(struct ubifs_info *c, void *m1, void *m2); int ubifs_read_master(struct ubifs_info *c); int ubifs_write_master(struct ubifs_info *c); /* sb.c */ int ubifs_read_superblock(struct ubifs_info *c); -struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c); int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup); int ubifs_fixup_free_space(struct ubifs_info *c); int ubifs_enable_encryption(struct ubifs_info *c); @@ -1693,7 +1934,7 @@ int ubifs_clear_orphans(struct ubifs_info *c); /* lpt.c */ int ubifs_calc_lpt_geom(struct ubifs_info *c); int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, - int *lpt_lebs, int *big_lpt); + int *lpt_lebs, int *big_lpt, u8 *hash); int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr); struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum); struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum); @@ -1712,6 +1953,7 @@ struct ubifs_pnode *ubifs_get_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip); struct ubifs_nnode *ubifs_get_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip); +struct ubifs_pnode *ubifs_pnode_lookup(struct ubifs_info *c, int i); int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip); void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty); void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode); @@ -1720,6 +1962,7 @@ struct ubifs_nnode *ubifs_first_nnode(struct ubifs_info *c, int *hght); /* Needed only in debugging code in lpt_commit.c */ int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, struct ubifs_nnode *nnode); +int ubifs_lpt_calc_hash(struct ubifs_info *c, u8 *hash); /* lpt_commit.c */ int ubifs_lpt_start_commit(struct ubifs_info *c); @@ -1807,7 +2050,7 @@ int ubifs_clean_lebs(struct ubifs_info *c, void *sbuf); int ubifs_rcvry_gc_commit(struct ubifs_info *c); int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key, int deletion, loff_t new_size); -int ubifs_recover_size(struct ubifs_info *c); +int ubifs_recover_size(struct ubifs_info *c, bool in_place); void ubifs_destroy_size_tree(struct ubifs_info *c); /* ioctl.c */ diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 61a5ad2600e8..53c9ab8fb777 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -919,28 +919,67 @@ out_unlock: return error; } -STATIC int -xfs_file_clone_range( - struct file *file_in, - loff_t pos_in, - struct file *file_out, - loff_t pos_out, - u64 len) -{ - return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out, - len, false); -} -STATIC int -xfs_file_dedupe_range( - struct file *file_in, - loff_t pos_in, - struct file *file_out, - loff_t pos_out, - u64 len) +loff_t +xfs_file_remap_range( + struct file *file_in, + loff_t pos_in, + struct file *file_out, + loff_t pos_out, + loff_t len, + unsigned int remap_flags) { - return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out, - len, true); + struct inode *inode_in = file_inode(file_in); + struct xfs_inode *src = XFS_I(inode_in); + struct inode *inode_out = file_inode(file_out); + struct xfs_inode *dest = XFS_I(inode_out); + struct xfs_mount *mp = src->i_mount; + loff_t remapped = 0; + xfs_extlen_t cowextsize; + int ret; + + if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) + return -EINVAL; + + if (!xfs_sb_version_hasreflink(&mp->m_sb)) + return -EOPNOTSUPP; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -EIO; + + /* Prepare and then clone file data. */ + ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out, + &len, remap_flags); + if (ret < 0 || len == 0) + return ret; + + trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out); + + ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len, + &remapped); + if (ret) + goto out_unlock; + + /* + * Carry the cowextsize hint from src to dest if we're sharing the + * entire source file to the entire destination file, the source file + * has a cowextsize hint, and the destination file does not. + */ + cowextsize = 0; + if (pos_in == 0 && len == i_size_read(inode_in) && + (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) && + pos_out == 0 && len >= i_size_read(inode_out) && + !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)) + cowextsize = src->i_d.di_cowextsize; + + ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize, + remap_flags); + +out_unlock: + xfs_reflink_remap_unlock(file_in, file_out); + if (ret) + trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_); + return remapped > 0 ? remapped : ret; } STATIC int @@ -1175,8 +1214,7 @@ const struct file_operations xfs_file_operations = { .fsync = xfs_file_fsync, .get_unmapped_area = thp_get_unmapped_area, .fallocate = xfs_file_fallocate, - .clone_file_range = xfs_file_clone_range, - .dedupe_file_range = xfs_file_dedupe_range, + .remap_file_range = xfs_file_remap_range, }; const struct file_operations xfs_dir_file_operations = { diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 8eaeec9d58ed..ecdb086bc23e 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -913,18 +913,18 @@ out_error: /* * Update destination inode size & cowextsize hint, if necessary. */ -STATIC int +int xfs_reflink_update_dest( struct xfs_inode *dest, xfs_off_t newlen, xfs_extlen_t cowextsize, - bool is_dedupe) + unsigned int remap_flags) { struct xfs_mount *mp = dest->i_mount; struct xfs_trans *tp; int error; - if (is_dedupe && newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0) + if (newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0) return 0; error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); @@ -945,10 +945,6 @@ xfs_reflink_update_dest( dest->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; } - if (!is_dedupe) { - xfs_trans_ichgtime(tp, dest, - XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); - } xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE); error = xfs_trans_commit(tp); @@ -1112,19 +1108,28 @@ out: /* * Iteratively remap one file's extents (and holes) to another's. */ -STATIC int +int xfs_reflink_remap_blocks( struct xfs_inode *src, - xfs_fileoff_t srcoff, + loff_t pos_in, struct xfs_inode *dest, - xfs_fileoff_t destoff, - xfs_filblks_t len, - xfs_off_t new_isize) + loff_t pos_out, + loff_t remap_len, + loff_t *remapped) { struct xfs_bmbt_irec imap; + xfs_fileoff_t srcoff; + xfs_fileoff_t destoff; + xfs_filblks_t len; + xfs_filblks_t range_len; + xfs_filblks_t remapped_len = 0; + xfs_off_t new_isize = pos_out + remap_len; int nimaps; int error = 0; - xfs_filblks_t range_len; + + destoff = XFS_B_TO_FSBT(src->i_mount, pos_out); + srcoff = XFS_B_TO_FSBT(src->i_mount, pos_in); + len = XFS_B_TO_FSB(src->i_mount, remap_len); /* drange = (destoff, destoff + len); srange = (srcoff, srcoff + len) */ while (len) { @@ -1139,7 +1144,7 @@ xfs_reflink_remap_blocks( error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0); xfs_iunlock(src, lock_mode); if (error) - goto err; + break; ASSERT(nimaps == 1); trace_xfs_reflink_remap_imap(src, srcoff, len, XFS_IO_OVERWRITE, @@ -1153,23 +1158,24 @@ xfs_reflink_remap_blocks( error = xfs_reflink_remap_extent(dest, &imap, destoff, new_isize); if (error) - goto err; + break; if (fatal_signal_pending(current)) { error = -EINTR; - goto err; + break; } /* Advance drange/srange */ srcoff += range_len; destoff += range_len; len -= range_len; + remapped_len += range_len; } - return 0; - -err: - trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_); + if (error) + trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_); + *remapped = min_t(loff_t, remap_len, + XFS_FSB_TO_B(src->i_mount, remapped_len)); return error; } @@ -1218,7 +1224,7 @@ retry: } /* Unlock both inodes after they've been prepped for a range clone. */ -STATIC void +void xfs_reflink_remap_unlock( struct file *file_in, struct file *file_out) @@ -1286,21 +1292,20 @@ xfs_reflink_zero_posteof( * stale data in the destination file. Hence we reject these clone attempts with * -EINVAL in this case. */ -STATIC int +int xfs_reflink_remap_prep( struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, - u64 *len, - bool is_dedupe) + loff_t *len, + unsigned int remap_flags) { struct inode *inode_in = file_inode(file_in); struct xfs_inode *src = XFS_I(inode_in); struct inode *inode_out = file_inode(file_out); struct xfs_inode *dest = XFS_I(inode_out); bool same_inode = (inode_in == inode_out); - u64 blkmask = i_blocksize(inode_in) - 1; ssize_t ret; /* Lock both files against IO */ @@ -1323,29 +1328,11 @@ xfs_reflink_remap_prep( if (IS_DAX(inode_in) || IS_DAX(inode_out)) goto out_unlock; - ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out, - len, is_dedupe); - if (ret <= 0) + ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, + len, remap_flags); + if (ret < 0 || *len == 0) goto out_unlock; - /* - * If the dedupe data matches, chop off the partial EOF block - * from the source file so we don't try to dedupe the partial - * EOF block. - */ - if (is_dedupe) { - *len &= ~blkmask; - } else if (*len & blkmask) { - /* - * The user is attempting to share a partial EOF block, - * if it's inside the destination EOF then reject it. - */ - if (pos_out + *len < i_size_read(inode_out)) { - ret = -EINVAL; - goto out_unlock; - } - } - /* Attach dquots to dest inode before changing block map */ ret = xfs_qm_dqattach(dest); if (ret) @@ -1365,31 +1352,9 @@ xfs_reflink_remap_prep( goto out_unlock; /* Zap any page cache for the destination file's range. */ - truncate_inode_pages_range(&inode_out->i_data, pos_out, - PAGE_ALIGN(pos_out + *len) - 1); - - /* If we're altering the file contents... */ - if (!is_dedupe) { - /* - * ...update the timestamps (which will grab the ilock again - * from xfs_fs_dirty_inode, so we have to call it before we - * take the ilock). - */ - if (!(file_out->f_mode & FMODE_NOCMTIME)) { - ret = file_update_time(file_out); - if (ret) - goto out_unlock; - } - - /* - * ...clear the security bits if the process is not being run - * by root. This keeps people from modifying setuid and setgid - * binaries. - */ - ret = file_remove_privs(file_out); - if (ret) - goto out_unlock; - } + truncate_inode_pages_range(&inode_out->i_data, + round_down(pos_out, PAGE_SIZE), + round_up(pos_out + *len, PAGE_SIZE) - 1); return 1; out_unlock: @@ -1398,72 +1363,6 @@ out_unlock: } /* - * Link a range of blocks from one file to another. - */ -int -xfs_reflink_remap_range( - struct file *file_in, - loff_t pos_in, - struct file *file_out, - loff_t pos_out, - u64 len, - bool is_dedupe) -{ - struct inode *inode_in = file_inode(file_in); - struct xfs_inode *src = XFS_I(inode_in); - struct inode *inode_out = file_inode(file_out); - struct xfs_inode *dest = XFS_I(inode_out); - struct xfs_mount *mp = src->i_mount; - xfs_fileoff_t sfsbno, dfsbno; - xfs_filblks_t fsblen; - xfs_extlen_t cowextsize; - ssize_t ret; - - if (!xfs_sb_version_hasreflink(&mp->m_sb)) - return -EOPNOTSUPP; - - if (XFS_FORCED_SHUTDOWN(mp)) - return -EIO; - - /* Prepare and then clone file data. */ - ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out, - &len, is_dedupe); - if (ret <= 0) - return ret; - - trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out); - - dfsbno = XFS_B_TO_FSBT(mp, pos_out); - sfsbno = XFS_B_TO_FSBT(mp, pos_in); - fsblen = XFS_B_TO_FSB(mp, len); - ret = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen, - pos_out + len); - if (ret) - goto out_unlock; - - /* - * Carry the cowextsize hint from src to dest if we're sharing the - * entire source file to the entire destination file, the source file - * has a cowextsize hint, and the destination file does not. - */ - cowextsize = 0; - if (pos_in == 0 && len == i_size_read(inode_in) && - (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) && - pos_out == 0 && len >= i_size_read(inode_out) && - !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)) - cowextsize = src->i_d.di_cowextsize; - - ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize, - is_dedupe); - -out_unlock: - xfs_reflink_remap_unlock(file_in, file_out); - if (ret) - trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_); - return ret; -} - -/* * The user wants to preemptively CoW all shared blocks in this file, * which enables us to turn off the reflink flag. Iterate all * extents which are not prealloc/delalloc to see which ranges are diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h index 7f47202b5639..6d73daef1f13 100644 --- a/fs/xfs/xfs_reflink.h +++ b/fs/xfs/xfs_reflink.h @@ -27,13 +27,24 @@ extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset, extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset, xfs_off_t count); extern int xfs_reflink_recover_cow(struct xfs_mount *mp); -extern int xfs_reflink_remap_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, u64 len, bool is_dedupe); +extern loff_t xfs_reflink_remap_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, loff_t len, + unsigned int remap_flags); extern int xfs_reflink_inode_has_shared_extents(struct xfs_trans *tp, struct xfs_inode *ip, bool *has_shared); extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip, struct xfs_trans **tpp); extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset, xfs_off_t len); +extern int xfs_reflink_remap_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, loff_t *len, + unsigned int remap_flags); +extern int xfs_reflink_remap_blocks(struct xfs_inode *src, loff_t pos_in, + struct xfs_inode *dest, loff_t pos_out, loff_t remap_len, + loff_t *remapped); +extern int xfs_reflink_update_dest(struct xfs_inode *dest, xfs_off_t newlen, + xfs_extlen_t cowextsize, unsigned int remap_flags); +extern void xfs_reflink_remap_unlock(struct file *file_in, + struct file *file_out); #endif /* __XFS_REFLINK_H */ diff --git a/include/crypto/asym_tpm_subtype.h b/include/crypto/asym_tpm_subtype.h new file mode 100644 index 000000000000..48198c36d6b9 --- /dev/null +++ b/include/crypto/asym_tpm_subtype.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _LINUX_ASYM_TPM_SUBTYPE_H +#define _LINUX_ASYM_TPM_SUBTYPE_H + +#include <linux/keyctl.h> + +struct tpm_key { + void *blob; + u32 blob_len; + uint16_t key_len; /* Size in bits of the key */ + const void *pub_key; /* pointer inside blob to the public key bytes */ + uint16_t pub_key_len; /* length of the public key */ +}; + +struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len); + +extern struct asymmetric_key_subtype asym_tpm_subtype; + +#endif /* _LINUX_ASYM_TPM_SUBTYPE_H */ diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index e0b681a717ba..be626eac9113 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -14,6 +14,8 @@ #ifndef _LINUX_PUBLIC_KEY_H #define _LINUX_PUBLIC_KEY_H +#include <linux/keyctl.h> + /* * Cryptographic data for the public-key subtype of the asymmetric key type. * @@ -23,6 +25,7 @@ struct public_key { void *key; u32 keylen; + bool key_is_private; const char *id_type; const char *pkey_algo; }; @@ -40,6 +43,7 @@ struct public_key_signature { u8 digest_size; /* Number of bytes in digest */ const char *pkey_algo; const char *hash_algo; + const char *encoding; }; extern void public_key_signature_free(struct public_key_signature *sig); @@ -65,8 +69,14 @@ extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring, const union key_payload *payload, struct key *trusted); -extern int verify_signature(const struct key *key, - const struct public_key_signature *sig); +extern int query_asymmetric_key(const struct kernel_pkey_params *, + struct kernel_pkey_query *); + +extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *); +extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *); +extern int create_signature(struct kernel_pkey_params *, const void *, void *); +extern int verify_signature(const struct key *, + const struct public_key_signature *); int public_key_verify_signature(const struct public_key *pkey, const struct public_key_signature *sig); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 91a877fa00cb..9ccad6b062f2 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -82,6 +82,53 @@ enum drm_connector_status { connector_status_unknown = 3, }; +/** + * enum drm_connector_registration_status - userspace registration status for + * a &drm_connector + * + * This enum is used to track the status of initializing a connector and + * registering it with userspace, so that DRM can prevent bogus modesets on + * connectors that no longer exist. + */ +enum drm_connector_registration_state { + /** + * @DRM_CONNECTOR_INITIALIZING: The connector has just been created, + * but has yet to be exposed to userspace. There should be no + * additional restrictions to how the state of this connector may be + * modified. + */ + DRM_CONNECTOR_INITIALIZING = 0, + + /** + * @DRM_CONNECTOR_REGISTERED: The connector has been fully initialized + * and registered with sysfs, as such it has been exposed to + * userspace. There should be no additional restrictions to how the + * state of this connector may be modified. + */ + DRM_CONNECTOR_REGISTERED = 1, + + /** + * @DRM_CONNECTOR_UNREGISTERED: The connector has either been exposed + * to userspace and has since been unregistered and removed from + * userspace, or the connector was unregistered before it had a chance + * to be exposed to userspace (e.g. still in the + * @DRM_CONNECTOR_INITIALIZING state). When a connector is + * unregistered, there are additional restrictions to how its state + * may be modified: + * + * - An unregistered connector may only have its DPMS changed from + * On->Off. Once DPMS is changed to Off, it may not be switched back + * to On. + * - Modesets are not allowed on unregistered connectors, unless they + * would result in disabling its assigned CRTCs. This means + * disabling a CRTC on an unregistered connector is OK, but enabling + * one is not. + * - Removing a CRTC from an unregistered connector is OK, but new + * CRTCs may never be assigned to an unregistered connector. + */ + DRM_CONNECTOR_UNREGISTERED = 2, +}; + enum subpixel_order { SubPixelUnknown = 0, SubPixelHorizontalRGB, @@ -853,10 +900,12 @@ struct drm_connector { bool ycbcr_420_allowed; /** - * @registered: Is this connector exposed (registered) with userspace? + * @registration_state: Is this connector initializing, exposed + * (registered) with userspace, or unregistered? + * * Protected by @mutex. */ - bool registered; + enum drm_connector_registration_state registration_state; /** * @modes: @@ -1166,6 +1215,24 @@ static inline void drm_connector_unreference(struct drm_connector *connector) drm_connector_put(connector); } +/** + * drm_connector_is_unregistered - has the connector been unregistered from + * userspace? + * @connector: DRM connector + * + * Checks whether or not @connector has been unregistered from userspace. + * + * Returns: + * True if the connector was unregistered, false if the connector is + * registered or has not yet been registered with userspace. + */ +static inline bool +drm_connector_is_unregistered(struct drm_connector *connector) +{ + return READ_ONCE(connector->registration_state) == + DRM_CONNECTOR_UNREGISTERED; +} + const char *drm_get_connector_status_name(enum drm_connector_status status); const char *drm_get_subpixel_order_name(enum subpixel_order order); const char *drm_get_dpms_name(int val); diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h index b396f00e481d..86a8806e2140 100644 --- a/include/dt-bindings/clock/am3.h +++ b/include/dt-bindings/clock/am3.h @@ -16,6 +16,8 @@ #define AM3_CLKCTRL_OFFSET 0x0 #define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET) +/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */ + /* l4_per clocks */ #define AM3_L4_PER_CLKCTRL_OFFSET 0x14 #define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET) @@ -105,4 +107,121 @@ #define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET) #define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20) +/* XXX: Compatibility part end */ + +/* l4ls clocks */ +#define AM3_L4LS_CLKCTRL_OFFSET 0x38 +#define AM3_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4LS_CLKCTRL_OFFSET) +#define AM3_L4LS_UART6_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x38) +#define AM3_L4LS_MMC1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x3c) +#define AM3_L4LS_ELM_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x40) +#define AM3_L4LS_I2C3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x44) +#define AM3_L4LS_I2C2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x48) +#define AM3_L4LS_SPI0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x4c) +#define AM3_L4LS_SPI1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x50) +#define AM3_L4LS_L4_LS_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x60) +#define AM3_L4LS_UART2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x6c) +#define AM3_L4LS_UART3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x70) +#define AM3_L4LS_UART4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x74) +#define AM3_L4LS_UART5_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x78) +#define AM3_L4LS_TIMER7_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x7c) +#define AM3_L4LS_TIMER2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x80) +#define AM3_L4LS_TIMER3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x84) +#define AM3_L4LS_TIMER4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x88) +#define AM3_L4LS_RNG_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x90) +#define AM3_L4LS_GPIO2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xac) +#define AM3_L4LS_GPIO3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xb0) +#define AM3_L4LS_GPIO4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xb4) +#define AM3_L4LS_D_CAN0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xc0) +#define AM3_L4LS_D_CAN1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xc4) +#define AM3_L4LS_EPWMSS1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xcc) +#define AM3_L4LS_EPWMSS0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xd4) +#define AM3_L4LS_EPWMSS2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xd8) +#define AM3_L4LS_TIMER5_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xec) +#define AM3_L4LS_TIMER6_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xf0) +#define AM3_L4LS_MMC2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xf4) +#define AM3_L4LS_SPINLOCK_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x10c) +#define AM3_L4LS_MAILBOX_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x110) +#define AM3_L4LS_OCPWP_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x130) + +/* l3s clocks */ +#define AM3_L3S_CLKCTRL_OFFSET 0x1c +#define AM3_L3S_CLKCTRL_INDEX(offset) ((offset) - AM3_L3S_CLKCTRL_OFFSET) +#define AM3_L3S_USB_OTG_HS_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x1c) +#define AM3_L3S_GPMC_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x30) +#define AM3_L3S_MCASP0_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x34) +#define AM3_L3S_MCASP1_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x68) +#define AM3_L3S_MMC3_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0xf8) + +/* l3 clocks */ +#define AM3_L3_CLKCTRL_OFFSET 0x24 +#define AM3_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_L3_CLKCTRL_OFFSET) +#define AM3_L3_TPTC0_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x24) +#define AM3_L3_EMIF_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x28) +#define AM3_L3_OCMCRAM_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x2c) +#define AM3_L3_AES_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x94) +#define AM3_L3_SHAM_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xa0) +#define AM3_L3_TPCC_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xbc) +#define AM3_L3_L3_INSTR_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xdc) +#define AM3_L3_L3_MAIN_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xe0) +#define AM3_L3_TPTC1_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xfc) +#define AM3_L3_TPTC2_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x100) + +/* l4hs clocks */ +#define AM3_L4HS_CLKCTRL_OFFSET 0x120 +#define AM3_L4HS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4HS_CLKCTRL_OFFSET) +#define AM3_L4HS_L4_HS_CLKCTRL AM3_L4HS_CLKCTRL_INDEX(0x120) + +/* pruss_ocp clocks */ +#define AM3_PRUSS_OCP_CLKCTRL_OFFSET 0xe8 +#define AM3_PRUSS_OCP_CLKCTRL_INDEX(offset) ((offset) - AM3_PRUSS_OCP_CLKCTRL_OFFSET) +#define AM3_PRUSS_OCP_PRUSS_CLKCTRL AM3_PRUSS_OCP_CLKCTRL_INDEX(0xe8) + +/* cpsw_125mhz clocks */ +#define AM3_CPSW_125MHZ_CPGMAC0_CLKCTRL AM3_CLKCTRL_INDEX(0x14) + +/* lcdc clocks */ +#define AM3_LCDC_CLKCTRL_OFFSET 0x18 +#define AM3_LCDC_CLKCTRL_INDEX(offset) ((offset) - AM3_LCDC_CLKCTRL_OFFSET) +#define AM3_LCDC_LCDC_CLKCTRL AM3_LCDC_CLKCTRL_INDEX(0x18) + +/* clk_24mhz clocks */ +#define AM3_CLK_24MHZ_CLKCTRL_OFFSET 0x14c +#define AM3_CLK_24MHZ_CLKCTRL_INDEX(offset) ((offset) - AM3_CLK_24MHZ_CLKCTRL_OFFSET) +#define AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL AM3_CLK_24MHZ_CLKCTRL_INDEX(0x14c) + +/* l4_wkup clocks */ +#define AM3_L4_WKUP_CONTROL_CLKCTRL AM3_CLKCTRL_INDEX(0x4) +#define AM3_L4_WKUP_GPIO1_CLKCTRL AM3_CLKCTRL_INDEX(0x8) +#define AM3_L4_WKUP_L4_WKUP_CLKCTRL AM3_CLKCTRL_INDEX(0xc) +#define AM3_L4_WKUP_UART1_CLKCTRL AM3_CLKCTRL_INDEX(0xb4) +#define AM3_L4_WKUP_I2C1_CLKCTRL AM3_CLKCTRL_INDEX(0xb8) +#define AM3_L4_WKUP_ADC_TSC_CLKCTRL AM3_CLKCTRL_INDEX(0xbc) +#define AM3_L4_WKUP_SMARTREFLEX0_CLKCTRL AM3_CLKCTRL_INDEX(0xc0) +#define AM3_L4_WKUP_TIMER1_CLKCTRL AM3_CLKCTRL_INDEX(0xc4) +#define AM3_L4_WKUP_SMARTREFLEX1_CLKCTRL AM3_CLKCTRL_INDEX(0xc8) +#define AM3_L4_WKUP_WD_TIMER2_CLKCTRL AM3_CLKCTRL_INDEX(0xd4) + +/* l3_aon clocks */ +#define AM3_L3_AON_CLKCTRL_OFFSET 0x14 +#define AM3_L3_AON_CLKCTRL_INDEX(offset) ((offset) - AM3_L3_AON_CLKCTRL_OFFSET) +#define AM3_L3_AON_DEBUGSS_CLKCTRL AM3_L3_AON_CLKCTRL_INDEX(0x14) + +/* l4_wkup_aon clocks */ +#define AM3_L4_WKUP_AON_CLKCTRL_OFFSET 0xb0 +#define AM3_L4_WKUP_AON_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_AON_CLKCTRL_OFFSET) +#define AM3_L4_WKUP_AON_WKUP_M3_CLKCTRL AM3_L4_WKUP_AON_CLKCTRL_INDEX(0xb0) + +/* mpu clocks */ +#define AM3_MPU_MPU_CLKCTRL AM3_CLKCTRL_INDEX(0x4) + +/* l4_rtc clocks */ +#define AM3_L4_RTC_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0) + +/* gfx_l3 clocks */ +#define AM3_GFX_L3_GFX_CLKCTRL AM3_CLKCTRL_INDEX(0x4) + +/* l4_cefuse clocks */ +#define AM3_L4_CEFUSE_CEFUSE_CLKCTRL AM3_CLKCTRL_INDEX(0x20) + #endif diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h index d21df00b3270..0f545b5afd60 100644 --- a/include/dt-bindings/clock/am4.h +++ b/include/dt-bindings/clock/am4.h @@ -16,6 +16,8 @@ #define AM4_CLKCTRL_OFFSET 0x20 #define AM4_CLKCTRL_INDEX(offset) ((offset) - AM4_CLKCTRL_OFFSET) +/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */ + /* l4_wkup clocks */ #define AM4_ADC_TSC_CLKCTRL AM4_CLKCTRL_INDEX(0x120) #define AM4_L4_WKUP_CLKCTRL AM4_CLKCTRL_INDEX(0x220) @@ -110,4 +112,134 @@ #define AM4_DSS_CORE_CLKCTRL AM4_CLKCTRL_INDEX(0xa20) #define AM4_CPGMAC0_CLKCTRL AM4_CLKCTRL_INDEX(0xb20) +/* XXX: Compatibility part end. */ + +/* l3s_tsc clocks */ +#define AM4_L3S_TSC_CLKCTRL_OFFSET 0x120 +#define AM4_L3S_TSC_CLKCTRL_INDEX(offset) ((offset) - AM4_L3S_TSC_CLKCTRL_OFFSET) +#define AM4_L3S_TSC_ADC_TSC_CLKCTRL AM4_L3S_TSC_CLKCTRL_INDEX(0x120) + +/* l4_wkup_aon clocks */ +#define AM4_L4_WKUP_AON_CLKCTRL_OFFSET 0x228 +#define AM4_L4_WKUP_AON_CLKCTRL_INDEX(offset) ((offset) - AM4_L4_WKUP_AON_CLKCTRL_OFFSET) +#define AM4_L4_WKUP_AON_WKUP_M3_CLKCTRL AM4_L4_WKUP_AON_CLKCTRL_INDEX(0x228) +#define AM4_L4_WKUP_AON_COUNTER_32K_CLKCTRL AM4_L4_WKUP_AON_CLKCTRL_INDEX(0x230) + +/* l4_wkup clocks */ +#define AM4_L4_WKUP_CLKCTRL_OFFSET 0x220 +#define AM4_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM4_L4_WKUP_CLKCTRL_OFFSET) +#define AM4_L4_WKUP_L4_WKUP_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x220) +#define AM4_L4_WKUP_TIMER1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x328) +#define AM4_L4_WKUP_WD_TIMER2_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x338) +#define AM4_L4_WKUP_I2C1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x340) +#define AM4_L4_WKUP_UART1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x348) +#define AM4_L4_WKUP_SMARTREFLEX0_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x350) +#define AM4_L4_WKUP_SMARTREFLEX1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x358) +#define AM4_L4_WKUP_CONTROL_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x360) +#define AM4_L4_WKUP_GPIO1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x368) + +/* mpu clocks */ +#define AM4_MPU_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* gfx_l3 clocks */ +#define AM4_GFX_L3_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* l4_rtc clocks */ +#define AM4_L4_RTC_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* l3 clocks */ +#define AM4_L3_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20) +#define AM4_L3_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28) +#define AM4_L3_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30) +#define AM4_L3_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40) +#define AM4_L3_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50) +#define AM4_L3_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58) +#define AM4_L3_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78) +#define AM4_L3_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80) +#define AM4_L3_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88) +#define AM4_L3_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90) +#define AM4_L3_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0) + +/* l3s clocks */ +#define AM4_L3S_CLKCTRL_OFFSET 0x68 +#define AM4_L3S_CLKCTRL_INDEX(offset) ((offset) - AM4_L3S_CLKCTRL_OFFSET) +#define AM4_L3S_VPFE0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x68) +#define AM4_L3S_VPFE1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x70) +#define AM4_L3S_GPMC_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x220) +#define AM4_L3S_MCASP0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x238) +#define AM4_L3S_MCASP1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x240) +#define AM4_L3S_MMC3_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x248) +#define AM4_L3S_QSPI_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x258) +#define AM4_L3S_USB_OTG_SS0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x260) +#define AM4_L3S_USB_OTG_SS1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x268) + +/* pruss_ocp clocks */ +#define AM4_PRUSS_OCP_CLKCTRL_OFFSET 0x320 +#define AM4_PRUSS_OCP_CLKCTRL_INDEX(offset) ((offset) - AM4_PRUSS_OCP_CLKCTRL_OFFSET) +#define AM4_PRUSS_OCP_PRUSS_CLKCTRL AM4_PRUSS_OCP_CLKCTRL_INDEX(0x320) + +/* l4ls clocks */ +#define AM4_L4LS_CLKCTRL_OFFSET 0x420 +#define AM4_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM4_L4LS_CLKCTRL_OFFSET) +#define AM4_L4LS_L4_LS_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x420) +#define AM4_L4LS_D_CAN0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x428) +#define AM4_L4LS_D_CAN1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x430) +#define AM4_L4LS_EPWMSS0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x438) +#define AM4_L4LS_EPWMSS1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x440) +#define AM4_L4LS_EPWMSS2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x448) +#define AM4_L4LS_EPWMSS3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x450) +#define AM4_L4LS_EPWMSS4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x458) +#define AM4_L4LS_EPWMSS5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x460) +#define AM4_L4LS_ELM_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x468) +#define AM4_L4LS_GPIO2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x478) +#define AM4_L4LS_GPIO3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x480) +#define AM4_L4LS_GPIO4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x488) +#define AM4_L4LS_GPIO5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x490) +#define AM4_L4LS_GPIO6_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x498) +#define AM4_L4LS_HDQ1W_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4a0) +#define AM4_L4LS_I2C2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4a8) +#define AM4_L4LS_I2C3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4b0) +#define AM4_L4LS_MAILBOX_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4b8) +#define AM4_L4LS_MMC1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4c0) +#define AM4_L4LS_MMC2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4c8) +#define AM4_L4LS_RNG_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4e0) +#define AM4_L4LS_SPI0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x500) +#define AM4_L4LS_SPI1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x508) +#define AM4_L4LS_SPI2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x510) +#define AM4_L4LS_SPI3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x518) +#define AM4_L4LS_SPI4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x520) +#define AM4_L4LS_SPINLOCK_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x528) +#define AM4_L4LS_TIMER2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x530) +#define AM4_L4LS_TIMER3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x538) +#define AM4_L4LS_TIMER4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x540) +#define AM4_L4LS_TIMER5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x548) +#define AM4_L4LS_TIMER6_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x550) +#define AM4_L4LS_TIMER7_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x558) +#define AM4_L4LS_TIMER8_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x560) +#define AM4_L4LS_TIMER9_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x568) +#define AM4_L4LS_TIMER10_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x570) +#define AM4_L4LS_TIMER11_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x578) +#define AM4_L4LS_UART2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x580) +#define AM4_L4LS_UART3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x588) +#define AM4_L4LS_UART4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x590) +#define AM4_L4LS_UART5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x598) +#define AM4_L4LS_UART6_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x5a0) +#define AM4_L4LS_OCP2SCP0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x5b8) +#define AM4_L4LS_OCP2SCP1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x5c0) + +/* emif clocks */ +#define AM4_EMIF_CLKCTRL_OFFSET 0x720 +#define AM4_EMIF_CLKCTRL_INDEX(offset) ((offset) - AM4_EMIF_CLKCTRL_OFFSET) +#define AM4_EMIF_EMIF_CLKCTRL AM4_EMIF_CLKCTRL_INDEX(0x720) + +/* dss clocks */ +#define AM4_DSS_CLKCTRL_OFFSET 0xa20 +#define AM4_DSS_CLKCTRL_INDEX(offset) ((offset) - AM4_DSS_CLKCTRL_OFFSET) +#define AM4_DSS_DSS_CORE_CLKCTRL AM4_DSS_CLKCTRL_INDEX(0xa20) + +/* cpsw_125mhz clocks */ +#define AM4_CPSW_125MHZ_CLKCTRL_OFFSET 0xb20 +#define AM4_CPSW_125MHZ_CLKCTRL_INDEX(offset) ((offset) - AM4_CPSW_125MHZ_CLKCTRL_OFFSET) +#define AM4_CPSW_125MHZ_CPGMAC0_CLKCTRL AM4_CPSW_125MHZ_CLKCTRL_INDEX(0xb20) + #endif diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h index ab3ee241d10c..ed30da28d820 100644 --- a/include/dt-bindings/clock/at91.h +++ b/include/dt-bindings/clock/at91.h @@ -9,6 +9,20 @@ #ifndef _DT_BINDINGS_CLK_AT91_H #define _DT_BINDINGS_CLK_AT91_H +#define PMC_TYPE_CORE 0 +#define PMC_TYPE_SYSTEM 1 +#define PMC_TYPE_PERIPHERAL 2 +#define PMC_TYPE_GCK 3 + +#define PMC_SLOW 0 +#define PMC_MCK 1 +#define PMC_UTMI 2 +#define PMC_MAIN 3 +#define PMC_MCK2 4 +#define PMC_I2S0_MUX 5 +#define PMC_I2S1_MUX 6 + +#ifndef AT91_PMC_MOSCS #define AT91_PMC_MOSCS 0 /* MOSCS Flag */ #define AT91_PMC_LOCKA 1 /* PLLA Lock */ #define AT91_PMC_LOCKB 2 /* PLLB Lock */ @@ -19,5 +33,6 @@ #define AT91_PMC_MOSCRCS 17 /* Main On-Chip RC */ #define AT91_PMC_CFDEV 18 /* Clock Failure Detector Event */ #define AT91_PMC_GCKRDY 24 /* Generated Clocks */ +#endif #endif diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h index d7549c57cac3..ec969b5aeb25 100644 --- a/include/dt-bindings/clock/dra7.h +++ b/include/dt-bindings/clock/dra7.h @@ -16,19 +16,21 @@ #define DRA7_CLKCTRL_OFFSET 0x20 #define DRA7_CLKCTRL_INDEX(offset) ((offset) - DRA7_CLKCTRL_OFFSET) +/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */ + /* mpu clocks */ #define DRA7_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) /* ipu clocks */ -#define DRA7_IPU_CLKCTRL_OFFSET 0x40 -#define DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - DRA7_IPU_CLKCTRL_OFFSET) -#define DRA7_MCASP1_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x50) -#define DRA7_TIMER5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x58) -#define DRA7_TIMER6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x60) -#define DRA7_TIMER7_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x68) -#define DRA7_TIMER8_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x70) -#define DRA7_I2C5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x78) -#define DRA7_UART6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x80) +#define _DRA7_IPU_CLKCTRL_OFFSET 0x40 +#define _DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - _DRA7_IPU_CLKCTRL_OFFSET) +#define DRA7_MCASP1_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x50) +#define DRA7_TIMER5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x58) +#define DRA7_TIMER6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x60) +#define DRA7_TIMER7_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x68) +#define DRA7_TIMER8_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x70) +#define DRA7_I2C5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x78) +#define DRA7_UART6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x80) /* rtc clocks */ #define DRA7_RTC_CLKCTRL_OFFSET 0x40 @@ -99,65 +101,65 @@ #define DRA7_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0) /* l4per clocks */ -#define DRA7_L4PER_CLKCTRL_OFFSET 0x0 -#define DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER_CLKCTRL_OFFSET) -#define DRA7_L4_PER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc) -#define DRA7_L4_PER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x14) -#define DRA7_TIMER10_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x28) -#define DRA7_TIMER11_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x30) -#define DRA7_TIMER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x38) -#define DRA7_TIMER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x40) -#define DRA7_TIMER4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x48) -#define DRA7_TIMER9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x50) -#define DRA7_ELM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x58) -#define DRA7_GPIO2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x60) -#define DRA7_GPIO3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x68) -#define DRA7_GPIO4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x70) -#define DRA7_GPIO5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x78) -#define DRA7_GPIO6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x80) -#define DRA7_HDQ1W_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x88) -#define DRA7_EPWMSS1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x90) -#define DRA7_EPWMSS2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x98) -#define DRA7_I2C1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa0) -#define DRA7_I2C2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa8) -#define DRA7_I2C3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb0) -#define DRA7_I2C4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb8) -#define DRA7_L4_PER1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc0) -#define DRA7_EPWMSS0_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc4) -#define DRA7_TIMER13_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc8) -#define DRA7_TIMER14_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd0) -#define DRA7_TIMER15_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd8) -#define DRA7_MCSPI1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf0) -#define DRA7_MCSPI2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf8) -#define DRA7_MCSPI3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x100) -#define DRA7_MCSPI4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x108) -#define DRA7_GPIO7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x110) -#define DRA7_GPIO8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x118) -#define DRA7_MMC3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x120) -#define DRA7_MMC4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x128) -#define DRA7_TIMER16_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x130) -#define DRA7_QSPI_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x138) -#define DRA7_UART1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x140) -#define DRA7_UART2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x148) -#define DRA7_UART3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x150) -#define DRA7_UART4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x158) -#define DRA7_MCASP2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x160) -#define DRA7_MCASP3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x168) -#define DRA7_UART5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x170) -#define DRA7_MCASP5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x178) -#define DRA7_MCASP8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x190) -#define DRA7_MCASP4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x198) -#define DRA7_AES1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a0) -#define DRA7_AES2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a8) -#define DRA7_DES_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1b0) -#define DRA7_RNG_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c0) -#define DRA7_SHAM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c8) -#define DRA7_UART7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1d0) -#define DRA7_UART8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e0) -#define DRA7_UART9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e8) -#define DRA7_DCAN2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1f0) -#define DRA7_MCASP6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x204) -#define DRA7_MCASP7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x208) +#define _DRA7_L4PER_CLKCTRL_OFFSET 0x0 +#define _DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - _DRA7_L4PER_CLKCTRL_OFFSET) +#define DRA7_L4_PER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc) +#define DRA7_L4_PER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x14) +#define DRA7_TIMER10_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x28) +#define DRA7_TIMER11_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x30) +#define DRA7_TIMER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x38) +#define DRA7_TIMER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x40) +#define DRA7_TIMER4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x48) +#define DRA7_TIMER9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x50) +#define DRA7_ELM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x58) +#define DRA7_GPIO2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x60) +#define DRA7_GPIO3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x68) +#define DRA7_GPIO4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x70) +#define DRA7_GPIO5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x78) +#define DRA7_GPIO6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x80) +#define DRA7_HDQ1W_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x88) +#define DRA7_EPWMSS1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x90) +#define DRA7_EPWMSS2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x98) +#define DRA7_I2C1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa0) +#define DRA7_I2C2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa8) +#define DRA7_I2C3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb0) +#define DRA7_I2C4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb8) +#define DRA7_L4_PER1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc0) +#define DRA7_EPWMSS0_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc4) +#define DRA7_TIMER13_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc8) +#define DRA7_TIMER14_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd0) +#define DRA7_TIMER15_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd8) +#define DRA7_MCSPI1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf0) +#define DRA7_MCSPI2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf8) +#define DRA7_MCSPI3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x100) +#define DRA7_MCSPI4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x108) +#define DRA7_GPIO7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x110) +#define DRA7_GPIO8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x118) +#define DRA7_MMC3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x120) +#define DRA7_MMC4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x128) +#define DRA7_TIMER16_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x130) +#define DRA7_QSPI_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x138) +#define DRA7_UART1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x140) +#define DRA7_UART2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x148) +#define DRA7_UART3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x150) +#define DRA7_UART4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x158) +#define DRA7_MCASP2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x160) +#define DRA7_MCASP3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x168) +#define DRA7_UART5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x170) +#define DRA7_MCASP5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x178) +#define DRA7_MCASP8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x190) +#define DRA7_MCASP4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x198) +#define DRA7_AES1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a0) +#define DRA7_AES2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a8) +#define DRA7_DES_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1b0) +#define DRA7_RNG_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c0) +#define DRA7_SHAM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c8) +#define DRA7_UART7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1d0) +#define DRA7_UART8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e0) +#define DRA7_UART9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e8) +#define DRA7_DCAN2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1f0) +#define DRA7_MCASP6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x204) +#define DRA7_MCASP7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x208) /* wkupaon clocks */ #define DRA7_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) @@ -170,4 +172,192 @@ #define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) #define DRA7_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0) +/* XXX: Compatibility part end. */ + +/* mpu clocks */ +#define DRA7_MPU_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* dsp1 clocks */ +#define DRA7_DSP1_MMU0_DSP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* ipu1 clocks */ +#define DRA7_IPU1_MMU_IPU1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* ipu clocks */ +#define DRA7_IPU_CLKCTRL_OFFSET 0x50 +#define DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - DRA7_IPU_CLKCTRL_OFFSET) +#define DRA7_IPU_MCASP1_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x50) +#define DRA7_IPU_TIMER5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x58) +#define DRA7_IPU_TIMER6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x60) +#define DRA7_IPU_TIMER7_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x68) +#define DRA7_IPU_TIMER8_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x70) +#define DRA7_IPU_I2C5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x78) +#define DRA7_IPU_UART6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x80) + +/* dsp2 clocks */ +#define DRA7_DSP2_MMU0_DSP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* rtc clocks */ +#define DRA7_RTC_RTCSS_CLKCTRL DRA7_CLKCTRL_INDEX(0x44) + +/* coreaon clocks */ +#define DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38) + +/* l3main1 clocks */ +#define DRA7_L3MAIN1_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_L3MAIN1_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_L3MAIN1_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70) +#define DRA7_L3MAIN1_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78) +#define DRA7_L3MAIN1_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_L3MAIN1_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_L3MAIN1_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90) + +/* ipu2 clocks */ +#define DRA7_IPU2_MMU_IPU2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* dma clocks */ +#define DRA7_DMA_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* emif clocks */ +#define DRA7_EMIF_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* atl clocks */ +#define DRA7_ATL_CLKCTRL_OFFSET 0x0 +#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET) +#define DRA7_ATL_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0) + +/* l4cfg clocks */ +#define DRA7_L4CFG_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_L4CFG_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_L4CFG_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_L4CFG_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_L4CFG_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_L4CFG_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58) +#define DRA7_L4CFG_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60) +#define DRA7_L4CFG_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68) +#define DRA7_L4CFG_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70) +#define DRA7_L4CFG_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78) +#define DRA7_L4CFG_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_L4CFG_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_L4CFG_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90) +#define DRA7_L4CFG_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98) +#define DRA7_L4CFG_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0) + +/* l3instr clocks */ +#define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_L3INSTR_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) + +/* dss clocks */ +#define DRA7_DSS_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_DSS_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) + +/* l3init clocks */ +#define DRA7_L3INIT_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_L3INIT_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_L3INIT_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40) +#define DRA7_L3INIT_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_L3INIT_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_L3INIT_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_L3INIT_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0) +#define DRA7_L3INIT_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8) +#define DRA7_L3INIT_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0) + +/* pcie clocks */ +#define DRA7_PCIE_CLKCTRL_OFFSET 0xb0 +#define DRA7_PCIE_CLKCTRL_INDEX(offset) ((offset) - DRA7_PCIE_CLKCTRL_OFFSET) +#define DRA7_PCIE_PCIE1_CLKCTRL DRA7_PCIE_CLKCTRL_INDEX(0xb0) +#define DRA7_PCIE_PCIE2_CLKCTRL DRA7_PCIE_CLKCTRL_INDEX(0xb8) + +/* gmac clocks */ +#define DRA7_GMAC_CLKCTRL_OFFSET 0xd0 +#define DRA7_GMAC_CLKCTRL_INDEX(offset) ((offset) - DRA7_GMAC_CLKCTRL_OFFSET) +#define DRA7_GMAC_GMAC_CLKCTRL DRA7_GMAC_CLKCTRL_INDEX(0xd0) + +/* l4per clocks */ +#define DRA7_L4PER_CLKCTRL_OFFSET 0x28 +#define DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER_CLKCTRL_OFFSET) +#define DRA7_L4PER_TIMER10_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x28) +#define DRA7_L4PER_TIMER11_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x30) +#define DRA7_L4PER_TIMER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x38) +#define DRA7_L4PER_TIMER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x40) +#define DRA7_L4PER_TIMER4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x48) +#define DRA7_L4PER_TIMER9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x50) +#define DRA7_L4PER_ELM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x58) +#define DRA7_L4PER_GPIO2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x60) +#define DRA7_L4PER_GPIO3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x68) +#define DRA7_L4PER_GPIO4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x70) +#define DRA7_L4PER_GPIO5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x78) +#define DRA7_L4PER_GPIO6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x80) +#define DRA7_L4PER_HDQ1W_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x88) +#define DRA7_L4PER_I2C1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa0) +#define DRA7_L4PER_I2C2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa8) +#define DRA7_L4PER_I2C3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb0) +#define DRA7_L4PER_I2C4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb8) +#define DRA7_L4PER_L4_PER1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc0) +#define DRA7_L4PER_MCSPI1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf0) +#define DRA7_L4PER_MCSPI2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf8) +#define DRA7_L4PER_MCSPI3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x100) +#define DRA7_L4PER_MCSPI4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x108) +#define DRA7_L4PER_GPIO7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x110) +#define DRA7_L4PER_GPIO8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x118) +#define DRA7_L4PER_MMC3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x120) +#define DRA7_L4PER_MMC4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x128) +#define DRA7_L4PER_UART1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x140) +#define DRA7_L4PER_UART2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x148) +#define DRA7_L4PER_UART3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x150) +#define DRA7_L4PER_UART4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x158) +#define DRA7_L4PER_UART5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x170) + +/* l4sec clocks */ +#define DRA7_L4SEC_CLKCTRL_OFFSET 0x1a0 +#define DRA7_L4SEC_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4SEC_CLKCTRL_OFFSET) +#define DRA7_L4SEC_AES1_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1a0) +#define DRA7_L4SEC_AES2_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1a8) +#define DRA7_L4SEC_DES_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1b0) +#define DRA7_L4SEC_RNG_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c0) +#define DRA7_L4SEC_SHAM_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c8) + +/* l4per2 clocks */ +#define DRA7_L4PER2_CLKCTRL_OFFSET 0xc +#define DRA7_L4PER2_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER2_CLKCTRL_OFFSET) +#define DRA7_L4PER2_L4_PER2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0xc) +#define DRA7_L4PER2_PRUSS1_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x18) +#define DRA7_L4PER2_PRUSS2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x20) +#define DRA7_L4PER2_EPWMSS1_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x90) +#define DRA7_L4PER2_EPWMSS2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x98) +#define DRA7_L4PER2_EPWMSS0_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0xc4) +#define DRA7_L4PER2_QSPI_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x138) +#define DRA7_L4PER2_MCASP2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x160) +#define DRA7_L4PER2_MCASP3_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x168) +#define DRA7_L4PER2_MCASP5_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x178) +#define DRA7_L4PER2_MCASP8_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x190) +#define DRA7_L4PER2_MCASP4_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x198) +#define DRA7_L4PER2_UART7_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1d0) +#define DRA7_L4PER2_UART8_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1e0) +#define DRA7_L4PER2_UART9_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1e8) +#define DRA7_L4PER2_DCAN2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1f0) +#define DRA7_L4PER2_MCASP6_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x204) +#define DRA7_L4PER2_MCASP7_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x208) + +/* l4per3 clocks */ +#define DRA7_L4PER3_CLKCTRL_OFFSET 0x14 +#define DRA7_L4PER3_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER3_CLKCTRL_OFFSET) +#define DRA7_L4PER3_L4_PER3_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0x14) +#define DRA7_L4PER3_TIMER13_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0xc8) +#define DRA7_L4PER3_TIMER14_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0xd0) +#define DRA7_L4PER3_TIMER15_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0xd8) +#define DRA7_L4PER3_TIMER16_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0x130) + +/* wkupaon clocks */ +#define DRA7_WKUPAON_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_WKUPAON_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_WKUPAON_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38) +#define DRA7_WKUPAON_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40) +#define DRA7_WKUPAON_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_WKUPAON_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_WKUPAON_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_WKUPAON_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_WKUPAON_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0) + #endif diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h index 5b1d68512360..a0439ce8e8d3 100644 --- a/include/dt-bindings/clock/exynos4.h +++ b/include/dt-bindings/clock/exynos4.h @@ -187,32 +187,6 @@ #define CLK_MIPI_HSI 349 /* Exynos4210 only */ #define CLK_PIXELASYNCM0 351 #define CLK_PIXELASYNCM1 352 -#define CLK_FIMC_LITE0 353 /* Exynos4x12 only */ -#define CLK_FIMC_LITE1 354 /* Exynos4x12 only */ -#define CLK_PPMUISPX 355 /* Exynos4x12 only */ -#define CLK_PPMUISPMX 356 /* Exynos4x12 only */ -#define CLK_FIMC_ISP 357 /* Exynos4x12 only */ -#define CLK_FIMC_DRC 358 /* Exynos4x12 only */ -#define CLK_FIMC_FD 359 /* Exynos4x12 only */ -#define CLK_MCUISP 360 /* Exynos4x12 only */ -#define CLK_GICISP 361 /* Exynos4x12 only */ -#define CLK_SMMU_ISP 362 /* Exynos4x12 only */ -#define CLK_SMMU_DRC 363 /* Exynos4x12 only */ -#define CLK_SMMU_FD 364 /* Exynos4x12 only */ -#define CLK_SMMU_LITE0 365 /* Exynos4x12 only */ -#define CLK_SMMU_LITE1 366 /* Exynos4x12 only */ -#define CLK_MCUCTL_ISP 367 /* Exynos4x12 only */ -#define CLK_MPWM_ISP 368 /* Exynos4x12 only */ -#define CLK_I2C0_ISP 369 /* Exynos4x12 only */ -#define CLK_I2C1_ISP 370 /* Exynos4x12 only */ -#define CLK_MTCADC_ISP 371 /* Exynos4x12 only */ -#define CLK_PWM_ISP 372 /* Exynos4x12 only */ -#define CLK_WDT_ISP 373 /* Exynos4x12 only */ -#define CLK_UART_ISP 374 /* Exynos4x12 only */ -#define CLK_ASYNCAXIM 375 /* Exynos4x12 only */ -#define CLK_SMMU_ISPCX 376 /* Exynos4x12 only */ -#define CLK_SPI0_ISP 377 /* Exynos4x12 only */ -#define CLK_SPI1_ISP 378 /* Exynos4x12 only */ #define CLK_PWM_ISP_SCLK 379 /* Exynos4x12 only */ #define CLK_SPI0_ISP_SCLK 380 /* Exynos4x12 only */ #define CLK_SPI1_ISP_SCLK 381 /* Exynos4x12 only */ @@ -254,10 +228,6 @@ #define CLK_PPMUACP 415 /* div clocks */ -#define CLK_DIV_ISP0 450 /* Exynos4x12 only */ -#define CLK_DIV_ISP1 451 /* Exynos4x12 only */ -#define CLK_DIV_MCUISP0 452 /* Exynos4x12 only */ -#define CLK_DIV_MCUISP1 453 /* Exynos4x12 only */ #define CLK_DIV_ACLK200 454 /* Exynos4x12 only */ #define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */ #define CLK_DIV_ACP 456 diff --git a/include/dt-bindings/clock/hi3670-clock.h b/include/dt-bindings/clock/hi3670-clock.h new file mode 100644 index 000000000000..fa48583f87d6 --- /dev/null +++ b/include/dt-bindings/clock/hi3670-clock.h @@ -0,0 +1,348 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Device Tree binding constants for HiSilicon Hi3670 SoC + * + * Copyright (c) 2001-2021, Huawei Tech. Co., Ltd. + * Copyright (c) 2018 Linaro Ltd. + */ + +#ifndef __DT_BINDINGS_CLOCK_HI3670_H +#define __DT_BINDINGS_CLOCK_HI3670_H + +/* clk in stub clock */ +#define HI3670_CLK_STUB_CLUSTER0 0 +#define HI3670_CLK_STUB_CLUSTER1 1 +#define HI3670_CLK_STUB_GPU 2 +#define HI3670_CLK_STUB_DDR 3 +#define HI3670_CLK_STUB_DDR_VOTE 4 +#define HI3670_CLK_STUB_DDR_LIMIT 5 +#define HI3670_CLK_STUB_NUM 6 + +/* clk in crg clock */ +#define HI3670_CLKIN_SYS 0 +#define HI3670_CLKIN_REF 1 +#define HI3670_CLK_FLL_SRC 2 +#define HI3670_CLK_PPLL0 3 +#define HI3670_CLK_PPLL1 4 +#define HI3670_CLK_PPLL2 5 +#define HI3670_CLK_PPLL3 6 +#define HI3670_CLK_PPLL4 7 +#define HI3670_CLK_PPLL6 8 +#define HI3670_CLK_PPLL7 9 +#define HI3670_CLK_PPLL_PCIE 10 +#define HI3670_CLK_PCIEPLL_REV 11 +#define HI3670_CLK_SCPLL 12 +#define HI3670_PCLK 13 +#define HI3670_CLK_UART0_DBG 14 +#define HI3670_CLK_UART6 15 +#define HI3670_OSC32K 16 +#define HI3670_OSC19M 17 +#define HI3670_CLK_480M 18 +#define HI3670_CLK_INVALID 19 +#define HI3670_CLK_DIV_SYSBUS 20 +#define HI3670_CLK_FACTOR_MMC 21 +#define HI3670_CLK_SD_SYS 22 +#define HI3670_CLK_SDIO_SYS 23 +#define HI3670_CLK_DIV_A53HPM 24 +#define HI3670_CLK_DIV_320M 25 +#define HI3670_PCLK_GATE_UART0 26 +#define HI3670_CLK_FACTOR_UART0 27 +#define HI3670_CLK_FACTOR_USB3PHY_PLL 28 +#define HI3670_CLK_GATE_ABB_USB 29 +#define HI3670_CLK_GATE_UFSPHY_REF 30 +#define HI3670_ICS_VOLT_HIGH 31 +#define HI3670_ICS_VOLT_MIDDLE 32 +#define HI3670_VENC_VOLT_HOLD 33 +#define HI3670_VDEC_VOLT_HOLD 34 +#define HI3670_EDC_VOLT_HOLD 35 +#define HI3670_CLK_ISP_SNCLK_FAC 36 +#define HI3670_CLK_FACTOR_RXDPHY 37 +#define HI3670_AUTODIV_SYSBUS 38 +#define HI3670_AUTODIV_EMMC0BUS 39 +#define HI3670_PCLK_ANDGT_MMC1_PCIE 40 +#define HI3670_CLK_GATE_VCODECBUS_GT 41 +#define HI3670_CLK_ANDGT_SD 42 +#define HI3670_CLK_SD_SYS_GT 43 +#define HI3670_CLK_ANDGT_SDIO 44 +#define HI3670_CLK_SDIO_SYS_GT 45 +#define HI3670_CLK_A53HPM_ANDGT 46 +#define HI3670_CLK_320M_PLL_GT 47 +#define HI3670_CLK_ANDGT_UARTH 48 +#define HI3670_CLK_ANDGT_UARTL 49 +#define HI3670_CLK_ANDGT_UART0 50 +#define HI3670_CLK_ANDGT_SPI 51 +#define HI3670_CLK_ANDGT_PCIEAXI 52 +#define HI3670_CLK_DIV_AO_ASP_GT 53 +#define HI3670_CLK_GATE_CSI_TRANS 54 +#define HI3670_CLK_GATE_DSI_TRANS 55 +#define HI3670_CLK_ANDGT_PTP 56 +#define HI3670_CLK_ANDGT_OUT0 57 +#define HI3670_CLK_ANDGT_OUT1 58 +#define HI3670_CLKGT_DP_AUDIO_PLL_AO 59 +#define HI3670_CLK_ANDGT_VDEC 60 +#define HI3670_CLK_ANDGT_VENC 61 +#define HI3670_CLK_ISP_SNCLK_ANGT 62 +#define HI3670_CLK_ANDGT_RXDPHY 63 +#define HI3670_CLK_ANDGT_ICS 64 +#define HI3670_AUTODIV_DMABUS 65 +#define HI3670_CLK_MUX_SYSBUS 66 +#define HI3670_CLK_MUX_VCODECBUS 67 +#define HI3670_CLK_MUX_SD_SYS 68 +#define HI3670_CLK_MUX_SD_PLL 69 +#define HI3670_CLK_MUX_SDIO_SYS 70 +#define HI3670_CLK_MUX_SDIO_PLL 71 +#define HI3670_CLK_MUX_A53HPM 72 +#define HI3670_CLK_MUX_320M 73 +#define HI3670_CLK_MUX_UARTH 74 +#define HI3670_CLK_MUX_UARTL 75 +#define HI3670_CLK_MUX_UART0 76 +#define HI3670_CLK_MUX_I2C 77 +#define HI3670_CLK_MUX_SPI 78 +#define HI3670_CLK_MUX_PCIEAXI 79 +#define HI3670_CLK_MUX_AO_ASP 80 +#define HI3670_CLK_MUX_VDEC 81 +#define HI3670_CLK_MUX_VENC 82 +#define HI3670_CLK_ISP_SNCLK_MUX0 83 +#define HI3670_CLK_ISP_SNCLK_MUX1 84 +#define HI3670_CLK_ISP_SNCLK_MUX2 85 +#define HI3670_CLK_MUX_RXDPHY_CFG 86 +#define HI3670_CLK_MUX_ICS 87 +#define HI3670_CLK_DIV_CFGBUS 88 +#define HI3670_CLK_DIV_MMC0BUS 89 +#define HI3670_CLK_DIV_MMC1BUS 90 +#define HI3670_PCLK_DIV_MMC1_PCIE 91 +#define HI3670_CLK_DIV_VCODECBUS 92 +#define HI3670_CLK_DIV_SD 93 +#define HI3670_CLK_DIV_SDIO 94 +#define HI3670_CLK_DIV_UARTH 95 +#define HI3670_CLK_DIV_UARTL 96 +#define HI3670_CLK_DIV_UART0 97 +#define HI3670_CLK_DIV_I2C 98 +#define HI3670_CLK_DIV_SPI 99 +#define HI3670_CLK_DIV_PCIEAXI 100 +#define HI3670_CLK_DIV_AO_ASP 101 +#define HI3670_CLK_DIV_CSI_TRANS 102 +#define HI3670_CLK_DIV_DSI_TRANS 103 +#define HI3670_CLK_DIV_PTP 104 +#define HI3670_CLK_DIV_CLKOUT0_PLL 105 +#define HI3670_CLK_DIV_CLKOUT1_PLL 106 +#define HI3670_CLKDIV_DP_AUDIO_PLL_AO 107 +#define HI3670_CLK_DIV_VDEC 108 +#define HI3670_CLK_DIV_VENC 109 +#define HI3670_CLK_ISP_SNCLK_DIV0 110 +#define HI3670_CLK_ISP_SNCLK_DIV1 111 +#define HI3670_CLK_ISP_SNCLK_DIV2 112 +#define HI3670_CLK_DIV_ICS 113 +#define HI3670_PPLL1_EN_ACPU 114 +#define HI3670_PPLL2_EN_ACPU 115 +#define HI3670_PPLL3_EN_ACPU 116 +#define HI3670_PPLL1_GT_CPU 117 +#define HI3670_PPLL2_GT_CPU 118 +#define HI3670_PPLL3_GT_CPU 119 +#define HI3670_CLK_GATE_PPLL2_MEDIA 120 +#define HI3670_CLK_GATE_PPLL3_MEDIA 121 +#define HI3670_CLK_GATE_PPLL4_MEDIA 122 +#define HI3670_CLK_GATE_PPLL6_MEDIA 123 +#define HI3670_CLK_GATE_PPLL7_MEDIA 124 +#define HI3670_PCLK_GPIO0 125 +#define HI3670_PCLK_GPIO1 126 +#define HI3670_PCLK_GPIO2 127 +#define HI3670_PCLK_GPIO3 128 +#define HI3670_PCLK_GPIO4 129 +#define HI3670_PCLK_GPIO5 130 +#define HI3670_PCLK_GPIO6 131 +#define HI3670_PCLK_GPIO7 132 +#define HI3670_PCLK_GPIO8 133 +#define HI3670_PCLK_GPIO9 134 +#define HI3670_PCLK_GPIO10 135 +#define HI3670_PCLK_GPIO11 136 +#define HI3670_PCLK_GPIO12 137 +#define HI3670_PCLK_GPIO13 138 +#define HI3670_PCLK_GPIO14 139 +#define HI3670_PCLK_GPIO15 140 +#define HI3670_PCLK_GPIO16 141 +#define HI3670_PCLK_GPIO17 142 +#define HI3670_PCLK_GPIO20 143 +#define HI3670_PCLK_GPIO21 144 +#define HI3670_PCLK_GATE_DSI0 145 +#define HI3670_PCLK_GATE_DSI1 146 +#define HI3670_HCLK_GATE_USB3OTG 147 +#define HI3670_ACLK_GATE_USB3DVFS 148 +#define HI3670_HCLK_GATE_SDIO 149 +#define HI3670_PCLK_GATE_PCIE_SYS 150 +#define HI3670_PCLK_GATE_PCIE_PHY 151 +#define HI3670_PCLK_GATE_MMC1_PCIE 152 +#define HI3670_PCLK_GATE_MMC0_IOC 153 +#define HI3670_PCLK_GATE_MMC1_IOC 154 +#define HI3670_CLK_GATE_DMAC 155 +#define HI3670_CLK_GATE_VCODECBUS2DDR 156 +#define HI3670_CLK_CCI400_BYPASS 157 +#define HI3670_CLK_GATE_CCI400 158 +#define HI3670_CLK_GATE_SD 159 +#define HI3670_HCLK_GATE_SD 160 +#define HI3670_CLK_GATE_SDIO 161 +#define HI3670_CLK_GATE_A57HPM 162 +#define HI3670_CLK_GATE_A53HPM 163 +#define HI3670_CLK_GATE_PA_A53 164 +#define HI3670_CLK_GATE_PA_A57 165 +#define HI3670_CLK_GATE_PA_G3D 166 +#define HI3670_CLK_GATE_GPUHPM 167 +#define HI3670_CLK_GATE_PERIHPM 168 +#define HI3670_CLK_GATE_AOHPM 169 +#define HI3670_CLK_GATE_UART1 170 +#define HI3670_CLK_GATE_UART4 171 +#define HI3670_PCLK_GATE_UART1 172 +#define HI3670_PCLK_GATE_UART4 173 +#define HI3670_CLK_GATE_UART2 174 +#define HI3670_CLK_GATE_UART5 175 +#define HI3670_PCLK_GATE_UART2 176 +#define HI3670_PCLK_GATE_UART5 177 +#define HI3670_CLK_GATE_UART0 178 +#define HI3670_CLK_GATE_I2C3 179 +#define HI3670_CLK_GATE_I2C4 180 +#define HI3670_CLK_GATE_I2C7 181 +#define HI3670_PCLK_GATE_I2C3 182 +#define HI3670_PCLK_GATE_I2C4 183 +#define HI3670_PCLK_GATE_I2C7 184 +#define HI3670_CLK_GATE_SPI1 185 +#define HI3670_CLK_GATE_SPI4 186 +#define HI3670_PCLK_GATE_SPI1 187 +#define HI3670_PCLK_GATE_SPI4 188 +#define HI3670_CLK_GATE_USB3OTG_REF 189 +#define HI3670_CLK_GATE_USB2PHY_REF 190 +#define HI3670_CLK_GATE_PCIEAUX 191 +#define HI3670_ACLK_GATE_PCIE 192 +#define HI3670_CLK_GATE_MMC1_PCIEAXI 193 +#define HI3670_CLK_GATE_PCIEPHY_REF 194 +#define HI3670_CLK_GATE_PCIE_DEBOUNCE 195 +#define HI3670_CLK_GATE_PCIEIO 196 +#define HI3670_CLK_GATE_PCIE_HP 197 +#define HI3670_CLK_GATE_AO_ASP 198 +#define HI3670_PCLK_GATE_PCTRL 199 +#define HI3670_CLK_CSI_TRANS_GT 200 +#define HI3670_CLK_DSI_TRANS_GT 201 +#define HI3670_CLK_GATE_PWM 202 +#define HI3670_ABB_AUDIO_EN0 203 +#define HI3670_ABB_AUDIO_EN1 204 +#define HI3670_ABB_AUDIO_GT_EN0 205 +#define HI3670_ABB_AUDIO_GT_EN1 206 +#define HI3670_CLK_GATE_DP_AUDIO_PLL_AO 207 +#define HI3670_PERI_VOLT_HOLD 208 +#define HI3670_PERI_VOLT_MIDDLE 209 +#define HI3670_CLK_GATE_ISP_SNCLK0 210 +#define HI3670_CLK_GATE_ISP_SNCLK1 211 +#define HI3670_CLK_GATE_ISP_SNCLK2 212 +#define HI3670_CLK_GATE_RXDPHY0_CFG 213 +#define HI3670_CLK_GATE_RXDPHY1_CFG 214 +#define HI3670_CLK_GATE_RXDPHY2_CFG 215 +#define HI3670_CLK_GATE_TXDPHY0_CFG 216 +#define HI3670_CLK_GATE_TXDPHY0_REF 217 +#define HI3670_CLK_GATE_TXDPHY1_CFG 218 +#define HI3670_CLK_GATE_TXDPHY1_REF 219 +#define HI3670_CLK_GATE_MEDIA_TCXO 220 + +/* clk in sctrl */ +#define HI3670_CLK_ANDGT_IOPERI 0 +#define HI3670_CLKANDGT_ASP_SUBSYS_PERI 1 +#define HI3670_CLK_ANGT_ASP_SUBSYS 2 +#define HI3670_CLK_MUX_UFS_SUBSYS 3 +#define HI3670_CLK_MUX_CLKOUT0 4 +#define HI3670_CLK_MUX_CLKOUT1 5 +#define HI3670_CLK_MUX_ASP_SUBSYS_PERI 6 +#define HI3670_CLK_MUX_ASP_PLL 7 +#define HI3670_CLK_DIV_AOBUS 8 +#define HI3670_CLK_DIV_UFS_SUBSYS 9 +#define HI3670_CLK_DIV_IOPERI 10 +#define HI3670_CLK_DIV_CLKOUT0_TCXO 11 +#define HI3670_CLK_DIV_CLKOUT1_TCXO 12 +#define HI3670_CLK_ASP_SUBSYS_PERI_DIV 13 +#define HI3670_CLK_DIV_ASP_SUBSYS 14 +#define HI3670_PPLL0_EN_ACPU 15 +#define HI3670_PPLL0_GT_CPU 16 +#define HI3670_CLK_GATE_PPLL0_MEDIA 17 +#define HI3670_PCLK_GPIO18 18 +#define HI3670_PCLK_GPIO19 19 +#define HI3670_CLK_GATE_SPI 20 +#define HI3670_PCLK_GATE_SPI 21 +#define HI3670_CLK_GATE_UFS_SUBSYS 22 +#define HI3670_CLK_GATE_UFSIO_REF 23 +#define HI3670_PCLK_AO_GPIO0 24 +#define HI3670_PCLK_AO_GPIO1 25 +#define HI3670_PCLK_AO_GPIO2 26 +#define HI3670_PCLK_AO_GPIO3 27 +#define HI3670_PCLK_AO_GPIO4 28 +#define HI3670_PCLK_AO_GPIO5 29 +#define HI3670_PCLK_AO_GPIO6 30 +#define HI3670_CLK_GATE_OUT0 31 +#define HI3670_CLK_GATE_OUT1 32 +#define HI3670_PCLK_GATE_SYSCNT 33 +#define HI3670_CLK_GATE_SYSCNT 34 +#define HI3670_CLK_GATE_ASP_SUBSYS_PERI 35 +#define HI3670_CLK_GATE_ASP_SUBSYS 36 +#define HI3670_CLK_GATE_ASP_TCXO 37 +#define HI3670_CLK_GATE_DP_AUDIO_PLL 38 + +/* clk in pmuctrl */ +#define HI3670_GATE_ABB_192 0 + +/* clk in pctrl */ +#define HI3670_GATE_UFS_TCXO_EN 0 +#define HI3670_GATE_USB_TCXO_EN 1 + +/* clk in iomcu */ +#define HI3670_CLK_GATE_I2C0 0 +#define HI3670_CLK_GATE_I2C1 1 +#define HI3670_CLK_GATE_I2C2 2 +#define HI3670_CLK_GATE_SPI0 3 +#define HI3670_CLK_GATE_SPI2 4 +#define HI3670_CLK_GATE_UART3 5 +#define HI3670_CLK_I2C0_GATE_IOMCU 6 +#define HI3670_CLK_I2C1_GATE_IOMCU 7 +#define HI3670_CLK_I2C2_GATE_IOMCU 8 +#define HI3670_CLK_SPI0_GATE_IOMCU 9 +#define HI3670_CLK_SPI2_GATE_IOMCU 10 +#define HI3670_CLK_UART3_GATE_IOMCU 11 +#define HI3670_CLK_GATE_PERI0_IOMCU 12 + +/* clk in media1 */ +#define HI3670_CLK_GATE_VIVOBUS_ANDGT 0 +#define HI3670_CLK_ANDGT_EDC0 1 +#define HI3670_CLK_ANDGT_LDI0 2 +#define HI3670_CLK_ANDGT_LDI1 3 +#define HI3670_CLK_MMBUF_PLL_ANDGT 4 +#define HI3670_PCLK_MMBUF_ANDGT 5 +#define HI3670_CLK_MUX_VIVOBUS 6 +#define HI3670_CLK_MUX_EDC0 7 +#define HI3670_CLK_MUX_LDI0 8 +#define HI3670_CLK_MUX_LDI1 9 +#define HI3670_CLK_SW_MMBUF 10 +#define HI3670_CLK_DIV_VIVOBUS 11 +#define HI3670_CLK_DIV_EDC0 12 +#define HI3670_CLK_DIV_LDI0 13 +#define HI3670_CLK_DIV_LDI1 14 +#define HI3670_ACLK_DIV_MMBUF 15 +#define HI3670_PCLK_DIV_MMBUF 16 +#define HI3670_ACLK_GATE_NOC_DSS 17 +#define HI3670_PCLK_GATE_NOC_DSS_CFG 18 +#define HI3670_PCLK_GATE_MMBUF_CFG 19 +#define HI3670_PCLK_GATE_DISP_NOC_SUBSYS 20 +#define HI3670_ACLK_GATE_DISP_NOC_SUBSYS 21 +#define HI3670_PCLK_GATE_DSS 22 +#define HI3670_ACLK_GATE_DSS 23 +#define HI3670_CLK_GATE_VIVOBUSFREQ 24 +#define HI3670_CLK_GATE_EDC0 25 +#define HI3670_CLK_GATE_LDI0 26 +#define HI3670_CLK_GATE_LDI1FREQ 27 +#define HI3670_CLK_GATE_BRG 28 +#define HI3670_ACLK_GATE_ASC 29 +#define HI3670_CLK_GATE_DSS_AXI_MM 30 +#define HI3670_CLK_GATE_MMBUF 31 +#define HI3670_PCLK_GATE_MMBUF 32 +#define HI3670_CLK_GATE_ATDIV_VIVO 33 + +/* clk in media2 */ +#define HI3670_CLK_GATE_VDECFREQ 0 +#define HI3670_CLK_GATE_VENCFREQ 1 +#define HI3670_CLK_GATE_ICSFREQ 2 + +#endif /* __DT_BINDINGS_CLOCK_HI3670_H */ diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h index 7ad171b8f3bf..87b068f4a998 100644 --- a/include/dt-bindings/clock/imx6qdl-clock.h +++ b/include/dt-bindings/clock/imx6qdl-clock.h @@ -273,6 +273,7 @@ #define IMX6QDL_CLK_MLB_PODF 260 #define IMX6QDL_CLK_EPIT1 261 #define IMX6QDL_CLK_EPIT2 262 -#define IMX6QDL_CLK_END 263 +#define IMX6QDL_CLK_MMDC_P0_IPG 263 +#define IMX6QDL_CLK_END 264 #endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */ diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h index e14573e293c5..cfbfc39d1878 100644 --- a/include/dt-bindings/clock/imx6sl-clock.h +++ b/include/dt-bindings/clock/imx6sl-clock.h @@ -175,6 +175,8 @@ #define IMX6SL_CLK_SSI2_IPG 162 #define IMX6SL_CLK_SSI3_IPG 163 #define IMX6SL_CLK_SPDIF_GCLK 164 -#define IMX6SL_CLK_END 165 +#define IMX6SL_CLK_MMDC_P0_IPG 165 +#define IMX6SL_CLK_MMDC_P1_IPG 166 +#define IMX6SL_CLK_END 167 #endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */ diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h index 1036475f997d..f446710fe63d 100644 --- a/include/dt-bindings/clock/imx6sll-clock.h +++ b/include/dt-bindings/clock/imx6sll-clock.h @@ -203,7 +203,8 @@ #define IMX6SLL_CLK_GPIO4 176 #define IMX6SLL_CLK_GPIO5 177 #define IMX6SLL_CLK_GPIO6 178 +#define IMX6SLL_CLK_MMDC_P1_IPG 179 -#define IMX6SLL_CLK_END 179 +#define IMX6SLL_CLK_END 180 #endif /* __DT_BINDINGS_CLOCK_IMX6SLL_H */ diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h index cd2d6c570e86..fb420c734774 100644 --- a/include/dt-bindings/clock/imx6sx-clock.h +++ b/include/dt-bindings/clock/imx6sx-clock.h @@ -279,6 +279,7 @@ #define IMX6SX_CLK_LVDS2_OUT 266 #define IMX6SX_CLK_LVDS2_IN 267 #define IMX6SX_CLK_ANACLK2 268 -#define IMX6SX_CLK_CLK_END 269 +#define IMX6SX_CLK_MMDC_P1_IPG 269 +#define IMX6SX_CLK_CLK_END 270 #endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */ diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h index f8e0476a3a0e..f718aac9b9da 100644 --- a/include/dt-bindings/clock/imx6ul-clock.h +++ b/include/dt-bindings/clock/imx6ul-clock.h @@ -259,7 +259,8 @@ #define IMX6UL_CLK_GPIO3 246 #define IMX6UL_CLK_GPIO4 247 #define IMX6UL_CLK_GPIO5 248 +#define IMX6UL_CLK_MMDC_P1_IPG 249 -#define IMX6UL_CLK_END 249 +#define IMX6UL_CLK_END 250 #endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */ diff --git a/include/dt-bindings/clock/jz4725b-cgu.h b/include/dt-bindings/clock/jz4725b-cgu.h new file mode 100644 index 000000000000..460bbeff6ab8 --- /dev/null +++ b/include/dt-bindings/clock/jz4725b-cgu.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,jz4725b-cgu DT binding. + */ + +#ifndef __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__ +#define __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__ + +#define JZ4725B_CLK_EXT 0 +#define JZ4725B_CLK_OSC32K 1 +#define JZ4725B_CLK_PLL 2 +#define JZ4725B_CLK_PLL_HALF 3 +#define JZ4725B_CLK_CCLK 4 +#define JZ4725B_CLK_HCLK 5 +#define JZ4725B_CLK_PCLK 6 +#define JZ4725B_CLK_MCLK 7 +#define JZ4725B_CLK_IPU 8 +#define JZ4725B_CLK_LCD 9 +#define JZ4725B_CLK_I2S 10 +#define JZ4725B_CLK_SPI 11 +#define JZ4725B_CLK_MMC_MUX 12 +#define JZ4725B_CLK_UDC 13 +#define JZ4725B_CLK_UART 14 +#define JZ4725B_CLK_DMA 15 +#define JZ4725B_CLK_ADC 16 +#define JZ4725B_CLK_I2C 17 +#define JZ4725B_CLK_AIC 18 +#define JZ4725B_CLK_MMC0 19 +#define JZ4725B_CLK_MMC1 20 +#define JZ4725B_CLK_BCH 21 +#define JZ4725B_CLK_TCU 22 +#define JZ4725B_CLK_EXT512 23 +#define JZ4725B_CLK_RTC 24 + +#endif /* __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__ */ diff --git a/include/dt-bindings/clock/maxim,max77686.h b/include/dt-bindings/clock/maxim,max77686.h index 7b28b0905869..af8261dcace1 100644 --- a/include/dt-bindings/clock/maxim,max77686.h +++ b/include/dt-bindings/clock/maxim,max77686.h @@ -1,10 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Google, Inc * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Device Tree binding constants clocks for the Maxim 77686 PMIC. */ diff --git a/include/dt-bindings/clock/maxim,max77802.h b/include/dt-bindings/clock/maxim,max77802.h index 997312edcbb5..51adcbaed697 100644 --- a/include/dt-bindings/clock/maxim,max77802.h +++ b/include/dt-bindings/clock/maxim,max77802.h @@ -1,10 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Google, Inc * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Device Tree binding constants clocks for the Maxim 77802 PMIC. */ diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h new file mode 100644 index 000000000000..4f7a2d2320bf --- /dev/null +++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_SDM_CAM_CC_SDM845_H +#define _DT_BINDINGS_CLK_SDM_CAM_CC_SDM845_H + +/* CAM_CC clock registers */ +#define CAM_CC_BPS_AHB_CLK 0 +#define CAM_CC_BPS_AREG_CLK 1 +#define CAM_CC_BPS_AXI_CLK 2 +#define CAM_CC_BPS_CLK 3 +#define CAM_CC_BPS_CLK_SRC 4 +#define CAM_CC_CAMNOC_ATB_CLK 5 +#define CAM_CC_CAMNOC_AXI_CLK 6 +#define CAM_CC_CCI_CLK 7 +#define CAM_CC_CCI_CLK_SRC 8 +#define CAM_CC_CPAS_AHB_CLK 9 +#define CAM_CC_CPHY_RX_CLK_SRC 10 +#define CAM_CC_CSI0PHYTIMER_CLK 11 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 12 +#define CAM_CC_CSI1PHYTIMER_CLK 13 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 14 +#define CAM_CC_CSI2PHYTIMER_CLK 15 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 16 +#define CAM_CC_CSI3PHYTIMER_CLK 17 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 18 +#define CAM_CC_CSIPHY0_CLK 19 +#define CAM_CC_CSIPHY1_CLK 20 +#define CAM_CC_CSIPHY2_CLK 21 +#define CAM_CC_CSIPHY3_CLK 22 +#define CAM_CC_FAST_AHB_CLK_SRC 23 +#define CAM_CC_FD_CORE_CLK 24 +#define CAM_CC_FD_CORE_CLK_SRC 25 +#define CAM_CC_FD_CORE_UAR_CLK 26 +#define CAM_CC_ICP_APB_CLK 27 +#define CAM_CC_ICP_ATB_CLK 28 +#define CAM_CC_ICP_CLK 29 +#define CAM_CC_ICP_CLK_SRC 30 +#define CAM_CC_ICP_CTI_CLK 31 +#define CAM_CC_ICP_TS_CLK 32 +#define CAM_CC_IFE_0_AXI_CLK 33 +#define CAM_CC_IFE_0_CLK 34 +#define CAM_CC_IFE_0_CLK_SRC 35 +#define CAM_CC_IFE_0_CPHY_RX_CLK 36 +#define CAM_CC_IFE_0_CSID_CLK 37 +#define CAM_CC_IFE_0_CSID_CLK_SRC 38 +#define CAM_CC_IFE_0_DSP_CLK 39 +#define CAM_CC_IFE_1_AXI_CLK 40 +#define CAM_CC_IFE_1_CLK 41 +#define CAM_CC_IFE_1_CLK_SRC 42 +#define CAM_CC_IFE_1_CPHY_RX_CLK 43 +#define CAM_CC_IFE_1_CSID_CLK 44 +#define CAM_CC_IFE_1_CSID_CLK_SRC 45 +#define CAM_CC_IFE_1_DSP_CLK 46 +#define CAM_CC_IFE_LITE_CLK 47 +#define CAM_CC_IFE_LITE_CLK_SRC 48 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 49 +#define CAM_CC_IFE_LITE_CSID_CLK 50 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 51 +#define CAM_CC_IPE_0_AHB_CLK 52 +#define CAM_CC_IPE_0_AREG_CLK 53 +#define CAM_CC_IPE_0_AXI_CLK 54 +#define CAM_CC_IPE_0_CLK 55 +#define CAM_CC_IPE_0_CLK_SRC 56 +#define CAM_CC_IPE_1_AHB_CLK 57 +#define CAM_CC_IPE_1_AREG_CLK 58 +#define CAM_CC_IPE_1_AXI_CLK 59 +#define CAM_CC_IPE_1_CLK 60 +#define CAM_CC_IPE_1_CLK_SRC 61 +#define CAM_CC_JPEG_CLK 62 +#define CAM_CC_JPEG_CLK_SRC 63 +#define CAM_CC_LRME_CLK 64 +#define CAM_CC_LRME_CLK_SRC 65 +#define CAM_CC_MCLK0_CLK 66 +#define CAM_CC_MCLK0_CLK_SRC 67 +#define CAM_CC_MCLK1_CLK 68 +#define CAM_CC_MCLK1_CLK_SRC 69 +#define CAM_CC_MCLK2_CLK 70 +#define CAM_CC_MCLK2_CLK_SRC 71 +#define CAM_CC_MCLK3_CLK 72 +#define CAM_CC_MCLK3_CLK_SRC 73 +#define CAM_CC_PLL0 74 +#define CAM_CC_PLL0_OUT_EVEN 75 +#define CAM_CC_PLL1 76 +#define CAM_CC_PLL1_OUT_EVEN 77 +#define CAM_CC_PLL2 78 +#define CAM_CC_PLL2_OUT_EVEN 79 +#define CAM_CC_PLL3 80 +#define CAM_CC_PLL3_OUT_EVEN 81 +#define CAM_CC_SLOW_AHB_CLK_SRC 82 +#define CAM_CC_SOC_AHB_CLK 83 +#define CAM_CC_SYS_TMR_CLK 84 + +/* CAM_CC Resets */ +#define TITAN_CAM_CC_CCI_BCR 0 +#define TITAN_CAM_CC_CPAS_BCR 1 +#define TITAN_CAM_CC_CSI0PHY_BCR 2 +#define TITAN_CAM_CC_CSI1PHY_BCR 3 +#define TITAN_CAM_CC_CSI2PHY_BCR 4 +#define TITAN_CAM_CC_MCLK0_BCR 5 +#define TITAN_CAM_CC_MCLK1_BCR 6 +#define TITAN_CAM_CC_MCLK2_BCR 7 +#define TITAN_CAM_CC_MCLK3_BCR 8 +#define TITAN_CAM_CC_TITAN_TOP_BCR 9 + +/* CAM_CC GDSCRs */ +#define BPS_GDSC 0 +#define IPE_0_GDSC 1 +#define IPE_1_GDSC 2 +#define IFE_0_GDSC 3 +#define IFE_1_GDSC 4 +#define TITAN_TOP_GDSC 5 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8960.h b/include/dt-bindings/clock/qcom,gcc-msm8960.h index 7d20eedfee98..e02742fc81cc 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8960.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8960.h @@ -319,5 +319,7 @@ #define CE3_SRC 303 #define CE3_CORE_CLK 304 #define CE3_H_CLK 305 +#define PLL16 306 +#define PLL17 307 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h index 75b07cf5eed0..db80f2ee571b 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8996.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h @@ -235,6 +235,15 @@ #define GCC_RX1_USB2_CLKREF_CLK 218 #define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK 219 #define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 220 +#define GCC_EDP_CLKREF_CLK 221 +#define GCC_MSS_CFG_AHB_CLK 222 +#define GCC_MSS_Q6_BIMC_AXI_CLK 223 +#define GCC_MSS_SNOC_AXI_CLK 224 +#define GCC_MSS_MNOC_BIMC_AXI_CLK 225 +#define GCC_DCC_AHB_CLK 226 +#define GCC_AGGRE0_NOC_MPU_CFG_AHB_CLK 227 +#define GCC_MMSS_GPLL0_DIV_CLK 228 +#define GCC_MSS_GPLL0_DIV_CLK 229 #define GCC_SYSTEM_NOC_BCR 0 #define GCC_CONFIG_NOC_BCR 1 diff --git a/include/dt-bindings/clock/qcom,gcc-qcs404.h b/include/dt-bindings/clock/qcom,gcc-qcs404.h new file mode 100644 index 000000000000..6ceb55ed72c6 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-qcs404.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCS404_H +#define _DT_BINDINGS_CLK_QCOM_GCC_QCS404_H + +#define GCC_APSS_AHB_CLK_SRC 0 +#define GCC_BLSP1_QUP0_I2C_APPS_CLK_SRC 1 +#define GCC_BLSP1_QUP0_SPI_APPS_CLK_SRC 2 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 3 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 4 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 5 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 6 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 7 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 8 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 9 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 10 +#define GCC_BLSP1_UART0_APPS_CLK_SRC 11 +#define GCC_BLSP1_UART1_APPS_CLK_SRC 12 +#define GCC_BLSP1_UART2_APPS_CLK_SRC 13 +#define GCC_BLSP1_UART3_APPS_CLK_SRC 14 +#define GCC_BLSP2_QUP0_I2C_APPS_CLK_SRC 15 +#define GCC_BLSP2_QUP0_SPI_APPS_CLK_SRC 16 +#define GCC_BLSP2_UART0_APPS_CLK_SRC 17 +#define GCC_BYTE0_CLK_SRC 18 +#define GCC_EMAC_CLK_SRC 19 +#define GCC_EMAC_PTP_CLK_SRC 20 +#define GCC_ESC0_CLK_SRC 21 +#define GCC_APSS_AHB_CLK 22 +#define GCC_APSS_AXI_CLK 23 +#define GCC_BIMC_APSS_AXI_CLK 24 +#define GCC_BIMC_GFX_CLK 25 +#define GCC_BIMC_MDSS_CLK 26 +#define GCC_BLSP1_AHB_CLK 27 +#define GCC_BLSP1_QUP0_I2C_APPS_CLK 28 +#define GCC_BLSP1_QUP0_SPI_APPS_CLK 29 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 30 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 31 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 32 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 33 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 34 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 35 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 36 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 37 +#define GCC_BLSP1_UART0_APPS_CLK 38 +#define GCC_BLSP1_UART1_APPS_CLK 39 +#define GCC_BLSP1_UART2_APPS_CLK 40 +#define GCC_BLSP1_UART3_APPS_CLK 41 +#define GCC_BLSP2_AHB_CLK 42 +#define GCC_BLSP2_QUP0_I2C_APPS_CLK 43 +#define GCC_BLSP2_QUP0_SPI_APPS_CLK 44 +#define GCC_BLSP2_UART0_APPS_CLK 45 +#define GCC_BOOT_ROM_AHB_CLK 46 +#define GCC_DCC_CLK 47 +#define GCC_GENI_IR_H_CLK 48 +#define GCC_ETH_AXI_CLK 49 +#define GCC_ETH_PTP_CLK 50 +#define GCC_ETH_RGMII_CLK 51 +#define GCC_ETH_SLAVE_AHB_CLK 52 +#define GCC_GENI_IR_S_CLK 53 +#define GCC_GP1_CLK 54 +#define GCC_GP2_CLK 55 +#define GCC_GP3_CLK 56 +#define GCC_MDSS_AHB_CLK 57 +#define GCC_MDSS_AXI_CLK 58 +#define GCC_MDSS_BYTE0_CLK 59 +#define GCC_MDSS_ESC0_CLK 60 +#define GCC_MDSS_HDMI_APP_CLK 61 +#define GCC_MDSS_HDMI_PCLK_CLK 62 +#define GCC_MDSS_MDP_CLK 63 +#define GCC_MDSS_PCLK0_CLK 64 +#define GCC_MDSS_VSYNC_CLK 65 +#define GCC_OXILI_AHB_CLK 66 +#define GCC_OXILI_GFX3D_CLK 67 +#define GCC_PCIE_0_AUX_CLK 68 +#define GCC_PCIE_0_CFG_AHB_CLK 69 +#define GCC_PCIE_0_MSTR_AXI_CLK 70 +#define GCC_PCIE_0_PIPE_CLK 71 +#define GCC_PCIE_0_SLV_AXI_CLK 72 +#define GCC_PCNOC_USB2_CLK 73 +#define GCC_PCNOC_USB3_CLK 74 +#define GCC_PDM2_CLK 75 +#define GCC_PDM_AHB_CLK 76 +#define GCC_VSYNC_CLK_SRC 77 +#define GCC_PRNG_AHB_CLK 78 +#define GCC_PWM0_XO512_CLK 79 +#define GCC_PWM1_XO512_CLK 80 +#define GCC_PWM2_XO512_CLK 81 +#define GCC_SDCC1_AHB_CLK 82 +#define GCC_SDCC1_APPS_CLK 83 +#define GCC_SDCC1_ICE_CORE_CLK 84 +#define GCC_SDCC2_AHB_CLK 85 +#define GCC_SDCC2_APPS_CLK 86 +#define GCC_SYS_NOC_USB3_CLK 87 +#define GCC_USB20_MOCK_UTMI_CLK 88 +#define GCC_USB2A_PHY_SLEEP_CLK 89 +#define GCC_USB30_MASTER_CLK 90 +#define GCC_USB30_MOCK_UTMI_CLK 91 +#define GCC_USB30_SLEEP_CLK 92 +#define GCC_USB3_PHY_AUX_CLK 93 +#define GCC_USB3_PHY_PIPE_CLK 94 +#define GCC_USB_HS_PHY_CFG_AHB_CLK 95 +#define GCC_USB_HS_SYSTEM_CLK 96 +#define GCC_GFX3D_CLK_SRC 97 +#define GCC_GP1_CLK_SRC 98 +#define GCC_GP2_CLK_SRC 99 +#define GCC_GP3_CLK_SRC 100 +#define GCC_GPLL0_OUT_MAIN 101 +#define GCC_GPLL1_OUT_MAIN 102 +#define GCC_GPLL3_OUT_MAIN 103 +#define GCC_GPLL4_OUT_MAIN 104 +#define GCC_HDMI_APP_CLK_SRC 105 +#define GCC_HDMI_PCLK_CLK_SRC 106 +#define GCC_MDP_CLK_SRC 107 +#define GCC_PCIE_0_AUX_CLK_SRC 108 +#define GCC_PCIE_0_PIPE_CLK_SRC 109 +#define GCC_PCLK0_CLK_SRC 110 +#define GCC_PDM2_CLK_SRC 111 +#define GCC_SDCC1_APPS_CLK_SRC 112 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 113 +#define GCC_SDCC2_APPS_CLK_SRC 114 +#define GCC_USB20_MOCK_UTMI_CLK_SRC 115 +#define GCC_USB30_MASTER_CLK_SRC 116 +#define GCC_USB30_MOCK_UTMI_CLK_SRC 117 +#define GCC_USB3_PHY_AUX_CLK_SRC 118 +#define GCC_USB_HS_SYSTEM_CLK_SRC 119 +#define GCC_GPLL0_AO_CLK_SRC 120 +#define GCC_USB_HS_INACTIVITY_TIMERS_CLK 122 +#define GCC_GPLL0_AO_OUT_MAIN 123 +#define GCC_GPLL0_SLEEP_CLK_SRC 124 +#define GCC_GPLL6 125 +#define GCC_GPLL6_OUT_AUX 126 +#define GCC_MDSS_MDP_VOTE_CLK 127 +#define GCC_MDSS_ROTATOR_VOTE_CLK 128 +#define GCC_BIMC_GPU_CLK 129 +#define GCC_GTCU_AHB_CLK 130 +#define GCC_GFX_TCU_CLK 131 +#define GCC_GFX_TBU_CLK 132 +#define GCC_SMMU_CFG_CLK 133 +#define GCC_APSS_TCU_CLK 134 +#define GCC_CRYPTO_AHB_CLK 135 +#define GCC_CRYPTO_AXI_CLK 136 +#define GCC_CRYPTO_CLK 137 +#define GCC_MDP_TBU_CLK 138 +#define GCC_QDSS_DAP_CLK 139 +#define GCC_DCC_XO_CLK 140 + +#define GCC_GENI_IR_BCR 0 +#define GCC_USB_HS_BCR 1 +#define GCC_USB2_HS_PHY_ONLY_BCR 2 +#define GCC_QUSB2_PHY_BCR 3 +#define GCC_USB_HS_PHY_CFG_AHB_BCR 4 +#define GCC_USB2A_PHY_BCR 5 +#define GCC_USB3_PHY_BCR 6 +#define GCC_USB_30_BCR 7 +#define GCC_USB3PHY_PHY_BCR 8 +#define GCC_PCIE_0_BCR 9 +#define GCC_PCIE_0_PHY_BCR 10 +#define GCC_PCIE_0_LINK_DOWN_BCR 11 +#define GCC_PCIEPHY_0_PHY_BCR 12 +#define GCC_EMAC_BCR 13 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sdm660.h b/include/dt-bindings/clock/qcom,gcc-sdm660.h new file mode 100644 index 000000000000..468302282913 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sdm660.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2018, Craig Tatlor. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_660_H +#define _DT_BINDINGS_CLK_MSM_GCC_660_H + +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 0 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 1 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 2 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 3 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 4 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 5 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 6 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 7 +#define BLSP1_UART1_APPS_CLK_SRC 8 +#define BLSP1_UART2_APPS_CLK_SRC 9 +#define BLSP2_QUP1_I2C_APPS_CLK_SRC 10 +#define BLSP2_QUP1_SPI_APPS_CLK_SRC 11 +#define BLSP2_QUP2_I2C_APPS_CLK_SRC 12 +#define BLSP2_QUP2_SPI_APPS_CLK_SRC 13 +#define BLSP2_QUP3_I2C_APPS_CLK_SRC 14 +#define BLSP2_QUP3_SPI_APPS_CLK_SRC 15 +#define BLSP2_QUP4_I2C_APPS_CLK_SRC 16 +#define BLSP2_QUP4_SPI_APPS_CLK_SRC 17 +#define BLSP2_UART1_APPS_CLK_SRC 18 +#define BLSP2_UART2_APPS_CLK_SRC 19 +#define GCC_AGGRE2_UFS_AXI_CLK 20 +#define GCC_AGGRE2_USB3_AXI_CLK 21 +#define GCC_BIMC_GFX_CLK 22 +#define GCC_BIMC_HMSS_AXI_CLK 23 +#define GCC_BIMC_MSS_Q6_AXI_CLK 24 +#define GCC_BLSP1_AHB_CLK 25 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 26 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 27 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 28 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 29 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 30 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 31 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 32 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 33 +#define GCC_BLSP1_UART1_APPS_CLK 34 +#define GCC_BLSP1_UART2_APPS_CLK 35 +#define GCC_BLSP2_AHB_CLK 36 +#define GCC_BLSP2_QUP1_I2C_APPS_CLK 37 +#define GCC_BLSP2_QUP1_SPI_APPS_CLK 38 +#define GCC_BLSP2_QUP2_I2C_APPS_CLK 39 +#define GCC_BLSP2_QUP2_SPI_APPS_CLK 40 +#define GCC_BLSP2_QUP3_I2C_APPS_CLK 41 +#define GCC_BLSP2_QUP3_SPI_APPS_CLK 42 +#define GCC_BLSP2_QUP4_I2C_APPS_CLK 43 +#define GCC_BLSP2_QUP4_SPI_APPS_CLK 44 +#define GCC_BLSP2_UART1_APPS_CLK 45 +#define GCC_BLSP2_UART2_APPS_CLK 46 +#define GCC_BOOT_ROM_AHB_CLK 47 +#define GCC_CFG_NOC_USB2_AXI_CLK 48 +#define GCC_CFG_NOC_USB3_AXI_CLK 49 +#define GCC_DCC_AHB_CLK 50 +#define GCC_GP1_CLK 51 +#define GCC_GP2_CLK 52 +#define GCC_GP3_CLK 53 +#define GCC_GPU_BIMC_GFX_CLK 54 +#define GCC_GPU_CFG_AHB_CLK 55 +#define GCC_GPU_GPLL0_CLK 56 +#define GCC_GPU_GPLL0_DIV_CLK 57 +#define GCC_HMSS_DVM_BUS_CLK 58 +#define GCC_HMSS_RBCPR_CLK 59 +#define GCC_MMSS_GPLL0_CLK 60 +#define GCC_MMSS_GPLL0_DIV_CLK 61 +#define GCC_MMSS_NOC_CFG_AHB_CLK 62 +#define GCC_MMSS_SYS_NOC_AXI_CLK 63 +#define GCC_MSS_CFG_AHB_CLK 64 +#define GCC_MSS_GPLL0_DIV_CLK 65 +#define GCC_MSS_MNOC_BIMC_AXI_CLK 66 +#define GCC_MSS_Q6_BIMC_AXI_CLK 67 +#define GCC_MSS_SNOC_AXI_CLK 68 +#define GCC_PDM2_CLK 69 +#define GCC_PDM_AHB_CLK 70 +#define GCC_PRNG_AHB_CLK 71 +#define GCC_QSPI_AHB_CLK 72 +#define GCC_QSPI_SER_CLK 73 +#define GCC_SDCC1_AHB_CLK 74 +#define GCC_SDCC1_APPS_CLK 75 +#define GCC_SDCC1_ICE_CORE_CLK 76 +#define GCC_SDCC2_AHB_CLK 77 +#define GCC_SDCC2_APPS_CLK 78 +#define GCC_UFS_AHB_CLK 79 +#define GCC_UFS_AXI_CLK 80 +#define GCC_UFS_CLKREF_CLK 81 +#define GCC_UFS_ICE_CORE_CLK 82 +#define GCC_UFS_PHY_AUX_CLK 83 +#define GCC_UFS_RX_SYMBOL_0_CLK 84 +#define GCC_UFS_RX_SYMBOL_1_CLK 85 +#define GCC_UFS_TX_SYMBOL_0_CLK 86 +#define GCC_UFS_UNIPRO_CORE_CLK 87 +#define GCC_USB20_MASTER_CLK 88 +#define GCC_USB20_MOCK_UTMI_CLK 89 +#define GCC_USB20_SLEEP_CLK 90 +#define GCC_USB30_MASTER_CLK 91 +#define GCC_USB30_MOCK_UTMI_CLK 92 +#define GCC_USB30_SLEEP_CLK 93 +#define GCC_USB3_CLKREF_CLK 94 +#define GCC_USB3_PHY_AUX_CLK 95 +#define GCC_USB3_PHY_PIPE_CLK 96 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 97 +#define GP1_CLK_SRC 98 +#define GP2_CLK_SRC 99 +#define GP3_CLK_SRC 100 +#define GPLL0 101 +#define GPLL0_EARLY 102 +#define GPLL1 103 +#define GPLL1_EARLY 104 +#define GPLL4 105 +#define GPLL4_EARLY 106 +#define HMSS_GPLL0_CLK_SRC 107 +#define HMSS_GPLL4_CLK_SRC 108 +#define HMSS_RBCPR_CLK_SRC 109 +#define PDM2_CLK_SRC 110 +#define QSPI_SER_CLK_SRC 111 +#define SDCC1_APPS_CLK_SRC 112 +#define SDCC1_ICE_CORE_CLK_SRC 113 +#define SDCC2_APPS_CLK_SRC 114 +#define UFS_AXI_CLK_SRC 115 +#define UFS_ICE_CORE_CLK_SRC 116 +#define UFS_PHY_AUX_CLK_SRC 117 +#define UFS_UNIPRO_CORE_CLK_SRC 118 +#define USB20_MASTER_CLK_SRC 119 +#define USB20_MOCK_UTMI_CLK_SRC 120 +#define USB30_MASTER_CLK_SRC 121 +#define USB30_MOCK_UTMI_CLK_SRC 122 +#define USB3_PHY_AUX_CLK_SRC 123 +#define GPLL0_OUT_MSSCC 124 +#define GCC_UFS_AXI_HW_CTL_CLK 125 +#define GCC_UFS_ICE_CORE_HW_CTL_CLK 126 +#define GCC_UFS_PHY_AUX_HW_CTL_CLK 127 +#define GCC_UFS_UNIPRO_CORE_HW_CTL_CLK 128 +#define GCC_RX0_USB2_CLKREF_CLK 129 +#define GCC_RX1_USB2_CLKREF_CLK 130 + +#define PCIE_0_GDSC 0 +#define UFS_GDSC 1 +#define USB_30_GDSC 2 + +#define GCC_QUSB2PHY_PRIM_BCR 0 +#define GCC_QUSB2PHY_SEC_BCR 1 +#define GCC_UFS_BCR 2 +#define GCC_USB3_DP_PHY_BCR 3 +#define GCC_USB3_PHY_BCR 4 +#define GCC_USB3PHY_PHY_BCR 5 +#define GCC_USB_20_BCR 6 +#define GCC_USB_30_BCR 7 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 8 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h index f96fc2dbf60e..b8eae5a76503 100644 --- a/include/dt-bindings/clock/qcom,gcc-sdm845.h +++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h @@ -194,6 +194,9 @@ #define GPLL4 184 #define GCC_CPUSS_DVM_BUS_CLK 185 #define GCC_CPUSS_GNOC_CLK 186 +#define GCC_QSPI_CORE_CLK_SRC 187 +#define GCC_QSPI_CORE_CLK 188 +#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 189 /* GCC Resets */ #define GCC_MMSS_BCR 0 diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h index 0dcb3e87d44c..a267ac250143 100644 --- a/include/dt-bindings/clock/r7s72100-clock.h +++ b/include/dt-bindings/clock/r7s72100-clock.h @@ -1,10 +1,7 @@ -/* +/* SPDX-License-Identifier: GPL-2.0 + * * Copyright (C) 2014 Renesas Solutions Corp. * Copyright (C) 2014 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. */ #ifndef __DT_BINDINGS_CLOCK_R7S72100_H__ diff --git a/include/dt-bindings/clock/r7s9210-cpg-mssr.h b/include/dt-bindings/clock/r7s9210-cpg-mssr.h new file mode 100644 index 000000000000..b6f85ca149aa --- /dev/null +++ b/include/dt-bindings/clock/r7s9210-cpg-mssr.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + * + */ + +#ifndef __DT_BINDINGS_CLOCK_R7S9210_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R7S9210_CPG_MSSR_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* R7S9210 CPG Core Clocks */ +#define R7S9210_CLK_I 0 +#define R7S9210_CLK_G 1 +#define R7S9210_CLK_B 2 +#define R7S9210_CLK_P1 3 +#define R7S9210_CLK_P1C 4 +#define R7S9210_CLK_P0 5 + +#endif /* __DT_BINDINGS_CLOCK_R7S9210_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7743-cpg-mssr.h b/include/dt-bindings/clock/r8a7743-cpg-mssr.h index e1d1f3c6a99e..3ba936029d9f 100644 --- a/include/dt-bindings/clock/r8a7743-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7743-cpg-mssr.h @@ -1,10 +1,6 @@ -/* - * Copyright (C) 2016 Cogent Embedded Inc. +/* SPDX-License-Identifier: GPL-2.0+ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Copyright (C) 2016 Cogent Embedded Inc. */ #ifndef __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__ #define __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/r8a7744-cpg-mssr.h b/include/dt-bindings/clock/r8a7744-cpg-mssr.h new file mode 100644 index 000000000000..2690be0c3e22 --- /dev/null +++ b/include/dt-bindings/clock/r8a7744-cpg-mssr.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A7744_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7744_CPG_MSSR_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* r8a7744 CPG Core Clocks */ +#define R8A7744_CLK_Z 0 +#define R8A7744_CLK_ZG 1 +#define R8A7744_CLK_ZTR 2 +#define R8A7744_CLK_ZTRD2 3 +#define R8A7744_CLK_ZT 4 +#define R8A7744_CLK_ZX 5 +#define R8A7744_CLK_ZS 6 +#define R8A7744_CLK_HP 7 +#define R8A7744_CLK_B 9 +#define R8A7744_CLK_LB 10 +#define R8A7744_CLK_P 11 +#define R8A7744_CLK_CL 12 +#define R8A7744_CLK_M2 13 +#define R8A7744_CLK_ZB3 15 +#define R8A7744_CLK_ZB3D2 16 +#define R8A7744_CLK_DDR 17 +#define R8A7744_CLK_SDH 18 +#define R8A7744_CLK_SD0 19 +#define R8A7744_CLK_SD2 20 +#define R8A7744_CLK_SD3 21 +#define R8A7744_CLK_MMC0 22 +#define R8A7744_CLK_MP 23 +#define R8A7744_CLK_QSPI 26 +#define R8A7744_CLK_CP 27 +#define R8A7744_CLK_RCAN 28 +#define R8A7744_CLK_R 29 +#define R8A7744_CLK_OSC 30 + +#endif /* __DT_BINDINGS_CLOCK_R8A7744_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7745-cpg-mssr.h b/include/dt-bindings/clock/r8a7745-cpg-mssr.h index 56ad6f0c6760..f81066c9d192 100644 --- a/include/dt-bindings/clock/r8a7745-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7745-cpg-mssr.h @@ -1,10 +1,6 @@ -/* - * Copyright (C) 2016 Cogent Embedded Inc. +/* SPDX-License-Identifier: GPL-2.0+ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Copyright (C) 2016 Cogent Embedded Inc. */ #ifndef __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__ #define __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/r8a774a1-cpg-mssr.h b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h new file mode 100644 index 000000000000..9bc5d45ff4b5 --- /dev/null +++ b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* r8a774a1 CPG Core Clocks */ +#define R8A774A1_CLK_Z 0 +#define R8A774A1_CLK_Z2 1 +#define R8A774A1_CLK_ZG 2 +#define R8A774A1_CLK_ZTR 3 +#define R8A774A1_CLK_ZTRD2 4 +#define R8A774A1_CLK_ZT 5 +#define R8A774A1_CLK_ZX 6 +#define R8A774A1_CLK_S0D1 7 +#define R8A774A1_CLK_S0D2 8 +#define R8A774A1_CLK_S0D3 9 +#define R8A774A1_CLK_S0D4 10 +#define R8A774A1_CLK_S0D6 11 +#define R8A774A1_CLK_S0D8 12 +#define R8A774A1_CLK_S0D12 13 +#define R8A774A1_CLK_S1D2 14 +#define R8A774A1_CLK_S1D4 15 +#define R8A774A1_CLK_S2D1 16 +#define R8A774A1_CLK_S2D2 17 +#define R8A774A1_CLK_S2D4 18 +#define R8A774A1_CLK_S3D1 19 +#define R8A774A1_CLK_S3D2 20 +#define R8A774A1_CLK_S3D4 21 +#define R8A774A1_CLK_LB 22 +#define R8A774A1_CLK_CL 23 +#define R8A774A1_CLK_ZB3 24 +#define R8A774A1_CLK_ZB3D2 25 +#define R8A774A1_CLK_ZB3D4 26 +#define R8A774A1_CLK_CR 27 +#define R8A774A1_CLK_CRD2 28 +#define R8A774A1_CLK_SD0H 29 +#define R8A774A1_CLK_SD0 30 +#define R8A774A1_CLK_SD1H 31 +#define R8A774A1_CLK_SD1 32 +#define R8A774A1_CLK_SD2H 33 +#define R8A774A1_CLK_SD2 34 +#define R8A774A1_CLK_SD3H 35 +#define R8A774A1_CLK_SD3 36 +#define R8A774A1_CLK_RPC 37 +#define R8A774A1_CLK_RPCD2 38 +#define R8A774A1_CLK_MSO 39 +#define R8A774A1_CLK_HDMI 40 +#define R8A774A1_CLK_CSI0 41 +#define R8A774A1_CLK_CP 42 +#define R8A774A1_CLK_CPEX 43 +#define R8A774A1_CLK_R 44 +#define R8A774A1_CLK_OSC 45 + +#endif /* __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a774c0-cpg-mssr.h b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h new file mode 100644 index 000000000000..8fe51b6aca28 --- /dev/null +++ b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* r8a774c0 CPG Core Clocks */ +#define R8A774C0_CLK_Z2 0 +#define R8A774C0_CLK_ZG 1 +#define R8A774C0_CLK_ZTR 2 +#define R8A774C0_CLK_ZT 3 +#define R8A774C0_CLK_ZX 4 +#define R8A774C0_CLK_S0D1 5 +#define R8A774C0_CLK_S0D3 6 +#define R8A774C0_CLK_S0D6 7 +#define R8A774C0_CLK_S0D12 8 +#define R8A774C0_CLK_S0D24 9 +#define R8A774C0_CLK_S1D1 10 +#define R8A774C0_CLK_S1D2 11 +#define R8A774C0_CLK_S1D4 12 +#define R8A774C0_CLK_S2D1 13 +#define R8A774C0_CLK_S2D2 14 +#define R8A774C0_CLK_S2D4 15 +#define R8A774C0_CLK_S3D1 16 +#define R8A774C0_CLK_S3D2 17 +#define R8A774C0_CLK_S3D4 18 +#define R8A774C0_CLK_S0D6C 19 +#define R8A774C0_CLK_S3D1C 20 +#define R8A774C0_CLK_S3D2C 21 +#define R8A774C0_CLK_S3D4C 22 +#define R8A774C0_CLK_LB 23 +#define R8A774C0_CLK_CL 24 +#define R8A774C0_CLK_ZB3 25 +#define R8A774C0_CLK_ZB3D2 26 +#define R8A774C0_CLK_CR 27 +#define R8A774C0_CLK_CRD2 28 +#define R8A774C0_CLK_SD0H 29 +#define R8A774C0_CLK_SD0 30 +#define R8A774C0_CLK_SD1H 31 +#define R8A774C0_CLK_SD1 32 +#define R8A774C0_CLK_SD3H 33 +#define R8A774C0_CLK_SD3 34 +#define R8A774C0_CLK_RPC 35 +#define R8A774C0_CLK_RPCD2 36 +#define R8A774C0_CLK_ZA2 37 +#define R8A774C0_CLK_ZA8 38 +#define R8A774C0_CLK_Z2D 39 +#define R8A774C0_CLK_MSO 40 +#define R8A774C0_CLK_R 41 +#define R8A774C0_CLK_OSC 42 +#define R8A774C0_CLK_LV0 43 +#define R8A774C0_CLK_LV1 44 +#define R8A774C0_CLK_CSI0 45 +#define R8A774C0_CLK_CP 46 +#define R8A774C0_CLK_CPEX 47 + +#endif /* __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/r8a7790-cpg-mssr.h b/include/dt-bindings/clock/r8a7790-cpg-mssr.h index 1625b8bf3482..c5955b56b36d 100644 --- a/include/dt-bindings/clock/r8a7790-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7790-cpg-mssr.h @@ -1,10 +1,6 @@ -/* - * Copyright (C) 2015 Renesas Electronics Corp. +/* SPDX-License-Identifier: GPL-2.0+ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Copyright (C) 2015 Renesas Electronics Corp. */ #ifndef __DT_BINDINGS_CLOCK_R8A7790_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/r8a7791-cpg-mssr.h b/include/dt-bindings/clock/r8a7791-cpg-mssr.h index e8823410c01c..aadd06c566c0 100644 --- a/include/dt-bindings/clock/r8a7791-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7791-cpg-mssr.h @@ -1,10 +1,6 @@ -/* - * Copyright (C) 2015 Renesas Electronics Corp. +/* SPDX-License-Identifier: GPL-2.0+ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Copyright (C) 2015 Renesas Electronics Corp. */ #ifndef __DT_BINDINGS_CLOCK_R8A7791_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/r8a7792-cpg-mssr.h b/include/dt-bindings/clock/r8a7792-cpg-mssr.h index 72ce85cb2f94..829c44db0271 100644 --- a/include/dt-bindings/clock/r8a7792-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7792-cpg-mssr.h @@ -1,10 +1,6 @@ -/* - * Copyright (C) 2015 Renesas Electronics Corp. +/* SPDX-License-Identifier: GPL-2.0+ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Copyright (C) 2015 Renesas Electronics Corp. */ #ifndef __DT_BINDINGS_CLOCK_R8A7792_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h index 7318d45d4e7e..49c66d8ed178 100644 --- a/include/dt-bindings/clock/r8a7793-clock.h +++ b/include/dt-bindings/clock/r8a7793-clock.h @@ -1,16 +1,8 @@ -/* +/* SPDX-License-Identifier: GPL-2.0 + * * r8a7793 clock definition * * Copyright (C) 2014 Renesas Electronics Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __DT_BINDINGS_CLOCK_R8A7793_H__ diff --git a/include/dt-bindings/clock/r8a7793-cpg-mssr.h b/include/dt-bindings/clock/r8a7793-cpg-mssr.h index 8809b0f62d61..d1ff646c31f2 100644 --- a/include/dt-bindings/clock/r8a7793-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7793-cpg-mssr.h @@ -1,10 +1,6 @@ -/* - * Copyright (C) 2015 Renesas Electronics Corp. +/* SPDX-License-Identifier: GPL-2.0+ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Copyright (C) 2015 Renesas Electronics Corp. */ #ifndef __DT_BINDINGS_CLOCK_R8A7793_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h index 93e99c3ffc8d..649f005782d0 100644 --- a/include/dt-bindings/clock/r8a7794-clock.h +++ b/include/dt-bindings/clock/r8a7794-clock.h @@ -1,11 +1,7 @@ -/* +/* SPDX-License-Identifier: GPL-2.0+ + * * Copyright (C) 2014 Renesas Electronics Corporation * Copyright 2013 Ideas On Board SPRL - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_R8A7794_H__ diff --git a/include/dt-bindings/clock/r8a7794-cpg-mssr.h b/include/dt-bindings/clock/r8a7794-cpg-mssr.h index 9d720311ae3a..6314e23b51af 100644 --- a/include/dt-bindings/clock/r8a7794-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7794-cpg-mssr.h @@ -1,10 +1,6 @@ -/* - * Copyright (C) 2015 Renesas Electronics Corp. +/* SPDX-License-Identifier: GPL-2.0+ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Copyright (C) 2015 Renesas Electronics Corp. */ #ifndef __DT_BINDINGS_CLOCK_R8A7794_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h index f047eaf261f3..948389641565 100644 --- a/include/dt-bindings/clock/r8a7795-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h @@ -1,10 +1,6 @@ -/* - * Copyright (C) 2015 Renesas Electronics Corp. +/* SPDX-License-Identifier: GPL-2.0+ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Copyright (C) 2015 Renesas Electronics Corp. */ #ifndef __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ #define __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/r8a7796-cpg-mssr.h b/include/dt-bindings/clock/r8a7796-cpg-mssr.h index 1e5942695f0d..e6087f2f7e3a 100644 --- a/include/dt-bindings/clock/r8a7796-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7796-cpg-mssr.h @@ -1,10 +1,6 @@ -/* - * Copyright (C) 2016 Renesas Electronics Corp. +/* SPDX-License-Identifier: GPL-2.0+ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Copyright (C) 2016 Renesas Electronics Corp. */ #ifndef __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__ #define __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/r8a77970-cpg-mssr.h b/include/dt-bindings/clock/r8a77970-cpg-mssr.h index 4146395595b1..6145ebe66361 100644 --- a/include/dt-bindings/clock/r8a77970-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a77970-cpg-mssr.h @@ -1,11 +1,7 @@ -/* +/* SPDX-License-Identifier: GPL-2.0+ + * * Copyright (C) 2016 Renesas Electronics Corp. * Copyright (C) 2017 Cogent Embedded, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__ #define __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/r8a77995-cpg-mssr.h b/include/dt-bindings/clock/r8a77995-cpg-mssr.h index 4e8ae3dee590..1eb11acfa563 100644 --- a/include/dt-bindings/clock/r8a77995-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a77995-cpg-mssr.h @@ -1,10 +1,6 @@ -/* - * Copyright (C) 2017 Glider bvba +/* SPDX-License-Identifier: GPL-2.0+ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Copyright (C) 2017 Glider bvba */ #ifndef __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__ #define __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/renesas-cpg-mssr.h b/include/dt-bindings/clock/renesas-cpg-mssr.h index 569a3cc33ffb..8169ad063f0a 100644 --- a/include/dt-bindings/clock/renesas-cpg-mssr.h +++ b/include/dt-bindings/clock/renesas-cpg-mssr.h @@ -1,10 +1,6 @@ -/* - * Copyright (C) 2015 Renesas Electronics Corp. +/* SPDX-License-Identifier: GPL-2.0+ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Copyright (C) 2015 Renesas Electronics Corp. */ #ifndef __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ #define __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h index b9462b7d3dfe..dc2101a634be 100644 --- a/include/dt-bindings/clock/rk3188-cru-common.h +++ b/include/dt-bindings/clock/rk3188-cru-common.h @@ -139,8 +139,9 @@ #define HCLK_CIF1 470 #define HCLK_VEPU 471 #define HCLK_VDPU 472 +#define HCLK_HDMI 473 -#define CLK_NR_CLKS (HCLK_VDPU + 1) +#define CLK_NR_CLKS (HCLK_HDMI + 1) /* soft-reset indices */ #define SRST_MCORE 2 diff --git a/include/dt-bindings/clock/samsung,s2mps11.h b/include/dt-bindings/clock/samsung,s2mps11.h index b903d7de27c9..5ece35d429ff 100644 --- a/include/dt-bindings/clock/samsung,s2mps11.h +++ b/include/dt-bindings/clock/samsung,s2mps11.h @@ -1,10 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2015 Markus Reichl * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Device Tree binding constants clocks for the Samsung S2MPS11 PMIC. */ diff --git a/include/dt-bindings/clock/samsung,s3c64xx-clock.h b/include/dt-bindings/clock/samsung,s3c64xx-clock.h index ad95c7f50090..19d233f37e2f 100644 --- a/include/dt-bindings/clock/samsung,s3c64xx-clock.h +++ b/include/dt-bindings/clock/samsung,s3c64xx-clock.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Tomasz Figa <tomasz.figa at gmail.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Device Tree binding constants for Samsung S3C64xx clock controller. -*/ + */ #ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H #define _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h index d66432c6e675..a8ac4cfcdcbc 100644 --- a/include/dt-bindings/clock/sun50i-a64-ccu.h +++ b/include/dt-bindings/clock/sun50i-a64-ccu.h @@ -43,6 +43,7 @@ #ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_ #define _DT_BINDINGS_CLK_SUN50I_A64_H_ +#define CLK_PLL_VIDEO0 7 #define CLK_PLL_PERIPH0 11 #define CLK_BUS_MIPI_DSI 28 diff --git a/include/dt-bindings/reset/actions,s700-reset.h b/include/dt-bindings/reset/actions,s700-reset.h new file mode 100644 index 000000000000..5e3b16b8ef53 --- /dev/null +++ b/include/dt-bindings/reset/actions,s700-reset.h @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) +// +// Device Tree binding constants for Actions Semi S700 Reset Management Unit +// +// Copyright (c) 2018 Linaro Ltd. + +#ifndef __DT_BINDINGS_ACTIONS_S700_RESET_H +#define __DT_BINDINGS_ACTIONS_S700_RESET_H + +#define RESET_AUDIO 0 +#define RESET_CSI 1 +#define RESET_DE 2 +#define RESET_DSI 3 +#define RESET_GPIO 4 +#define RESET_I2C0 5 +#define RESET_I2C1 6 +#define RESET_I2C2 7 +#define RESET_I2C3 8 +#define RESET_KEY 9 +#define RESET_LCD0 10 +#define RESET_SI 11 +#define RESET_SPI0 12 +#define RESET_SPI1 13 +#define RESET_SPI2 14 +#define RESET_SPI3 15 +#define RESET_UART0 16 +#define RESET_UART1 17 +#define RESET_UART2 18 +#define RESET_UART3 19 +#define RESET_UART4 20 +#define RESET_UART5 21 +#define RESET_UART6 22 + +#endif /* __DT_BINDINGS_ACTIONS_S700_RESET_H */ diff --git a/include/dt-bindings/reset/actions,s900-reset.h b/include/dt-bindings/reset/actions,s900-reset.h new file mode 100644 index 000000000000..42c19d02e43b --- /dev/null +++ b/include/dt-bindings/reset/actions,s900-reset.h @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) +// +// Device Tree binding constants for Actions Semi S900 Reset Management Unit +// +// Copyright (c) 2018 Linaro Ltd. + +#ifndef __DT_BINDINGS_ACTIONS_S900_RESET_H +#define __DT_BINDINGS_ACTIONS_S900_RESET_H + +#define RESET_CHIPID 0 +#define RESET_CPU_SCNT 1 +#define RESET_SRAMI 2 +#define RESET_DDR_CTL_PHY 3 +#define RESET_DMAC 4 +#define RESET_GPIO 5 +#define RESET_BISP_AXI 6 +#define RESET_CSI0 7 +#define RESET_CSI1 8 +#define RESET_DE 9 +#define RESET_DSI 10 +#define RESET_GPU3D_PA 11 +#define RESET_GPU3D_PB 12 +#define RESET_HDE 13 +#define RESET_I2C0 14 +#define RESET_I2C1 15 +#define RESET_I2C2 16 +#define RESET_I2C3 17 +#define RESET_I2C4 18 +#define RESET_I2C5 19 +#define RESET_IMX 20 +#define RESET_NANDC0 21 +#define RESET_NANDC1 22 +#define RESET_SD0 23 +#define RESET_SD1 24 +#define RESET_SD2 25 +#define RESET_SD3 26 +#define RESET_SPI0 27 +#define RESET_SPI1 28 +#define RESET_SPI2 29 +#define RESET_SPI3 30 +#define RESET_UART0 31 +#define RESET_UART1 32 +#define RESET_UART2 33 +#define RESET_UART3 34 +#define RESET_UART4 35 +#define RESET_UART5 36 +#define RESET_UART6 37 +#define RESET_HDMI 38 +#define RESET_LVDS 39 +#define RESET_EDP 40 +#define RESET_USB2HUB 41 +#define RESET_USB2HSIC 42 +#define RESET_USB3 43 +#define RESET_PCM1 44 +#define RESET_AUDIO 45 +#define RESET_PCM0 46 +#define RESET_SE 47 +#define RESET_GIC 48 +#define RESET_DDR_CTL_PHY_AXI 49 +#define RESET_CMU_DDR 50 +#define RESET_DMM 51 +#define RESET_HDCP2TX 52 +#define RESET_ETHERNET 53 + +#endif /* __DT_BINDINGS_ACTIONS_S900_RESET_H */ diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h index e0a9c2368872..9ce2f0fae57e 100644 --- a/include/keys/asymmetric-subtype.h +++ b/include/keys/asymmetric-subtype.h @@ -17,6 +17,8 @@ #include <linux/seq_file.h> #include <keys/asymmetric-type.h> +struct kernel_pkey_query; +struct kernel_pkey_params; struct public_key_signature; /* @@ -34,6 +36,13 @@ struct asymmetric_key_subtype { /* Destroy a key of this subtype */ void (*destroy)(void *payload_crypto, void *payload_auth); + int (*query)(const struct kernel_pkey_params *params, + struct kernel_pkey_query *info); + + /* Encrypt/decrypt/sign data */ + int (*eds_op)(struct kernel_pkey_params *params, + const void *in, void *out); + /* Verify the signature on a key of this subtype (optional) */ int (*verify_signature)(const struct key *key, const struct public_key_signature *sig); diff --git a/security/keys/trusted.h b/include/keys/trusted.h index 8d5fe9eafb22..adbcb6817826 100644 --- a/security/keys/trusted.h +++ b/include/keys/trusted.h @@ -3,7 +3,7 @@ #define __TRUSTED_KEY_H /* implementation specific TPM constants */ -#define MAX_BUF_SIZE 512 +#define MAX_BUF_SIZE 1024 #define TPM_GETRANDOM_SIZE 14 #define TPM_OSAP_SIZE 36 #define TPM_OIAP_SIZE 10 @@ -36,6 +36,18 @@ enum { SRK_keytype = 4 }; +int TSS_authhmac(unsigned char *digest, const unsigned char *key, + unsigned int keylen, unsigned char *h1, + unsigned char *h2, unsigned char h3, ...); +int TSS_checkhmac1(unsigned char *buffer, + const uint32_t command, + const unsigned char *ononce, + const unsigned char *key, + unsigned int keylen, ...); + +int trusted_tpm_send(unsigned char *cmd, size_t buflen); +int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce); + #define TPM_DEBUG 0 #if TPM_DEBUG diff --git a/include/linux/adxl.h b/include/linux/adxl.h index 2a629acb4c3f..2d29f55923e3 100644 --- a/include/linux/adxl.h +++ b/include/linux/adxl.h @@ -7,7 +7,12 @@ #ifndef _LINUX_ADXL_H #define _LINUX_ADXL_H +#ifdef CONFIG_ACPI_ADXL const char * const *adxl_get_component_names(void); int adxl_decode(u64 addr, u64 component_values[]); +#else +static inline const char * const *adxl_get_component_names(void) { return NULL; } +static inline int adxl_decode(u64 addr, u64 component_values[]) { return -EOPNOTSUPP; } +#endif #endif /* _LINUX_ADXL_H */ diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 2c9756bd9c4c..b2488055fd1d 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -62,13 +62,19 @@ /* Error Codes */ enum virtchnl_status_code { VIRTCHNL_STATUS_SUCCESS = 0, - VIRTCHNL_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, - VIRTCHNL_STATUS_NOT_SUPPORTED = -64, + VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, }; +/* Backward compatibility */ +#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM +#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED + #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 @@ -831,7 +837,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: default: - return VIRTCHNL_ERR_PARAM; + return VIRTCHNL_STATUS_ERR_PARAM; } /* few more checks */ if (err_msg_format || valid_len != msglen) diff --git a/include/linux/bio.h b/include/linux/bio.h index b47c7f716731..056fb627edb3 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -503,31 +503,23 @@ do { \ disk_devt((bio)->bi_disk) #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) -int bio_associate_blkg_from_page(struct bio *bio, struct page *page); +int bio_associate_blkcg_from_page(struct bio *bio, struct page *page); #else -static inline int bio_associate_blkg_from_page(struct bio *bio, - struct page *page) { return 0; } +static inline int bio_associate_blkcg_from_page(struct bio *bio, + struct page *page) { return 0; } #endif #ifdef CONFIG_BLK_CGROUP +int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg); -int bio_associate_blkg_from_css(struct bio *bio, - struct cgroup_subsys_state *css); -int bio_associate_create_blkg(struct request_queue *q, struct bio *bio); -int bio_reassociate_blkg(struct request_queue *q, struct bio *bio); void bio_disassociate_task(struct bio *bio); -void bio_clone_blkg_association(struct bio *dst, struct bio *src); +void bio_clone_blkcg_association(struct bio *dst, struct bio *src); #else /* CONFIG_BLK_CGROUP */ -static inline int bio_associate_blkg_from_css(struct bio *bio, - struct cgroup_subsys_state *css) -{ return 0; } -static inline int bio_associate_create_blkg(struct request_queue *q, - struct bio *bio) { return 0; } -static inline int bio_reassociate_blkg(struct request_queue *q, struct bio *bio) -{ return 0; } +static inline int bio_associate_blkcg(struct bio *bio, + struct cgroup_subsys_state *blkcg_css) { return 0; } static inline void bio_disassociate_task(struct bio *bio) { } -static inline void bio_clone_blkg_association(struct bio *dst, - struct bio *src) { } +static inline void bio_clone_blkcg_association(struct bio *dst, + struct bio *src) { } #endif /* CONFIG_BLK_CGROUP */ #ifdef CONFIG_HIGHMEM diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 7ddb1349394d..705f7c442691 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -236,33 +236,33 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, #ifdef __KERNEL__ #ifndef set_mask_bits -#define set_mask_bits(ptr, _mask, _bits) \ +#define set_mask_bits(ptr, mask, bits) \ ({ \ - const typeof(*ptr) mask = (_mask), bits = (_bits); \ - typeof(*ptr) old, new; \ + const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \ + typeof(*(ptr)) old__, new__; \ \ do { \ - old = READ_ONCE(*ptr); \ - new = (old & ~mask) | bits; \ - } while (cmpxchg(ptr, old, new) != old); \ + old__ = READ_ONCE(*(ptr)); \ + new__ = (old__ & ~mask__) | bits__; \ + } while (cmpxchg(ptr, old__, new__) != old__); \ \ - new; \ + new__; \ }) #endif #ifndef bit_clear_unless -#define bit_clear_unless(ptr, _clear, _test) \ +#define bit_clear_unless(ptr, clear, test) \ ({ \ - const typeof(*ptr) clear = (_clear), test = (_test); \ - typeof(*ptr) old, new; \ + const typeof(*(ptr)) clear__ = (clear), test__ = (test);\ + typeof(*(ptr)) old__, new__; \ \ do { \ - old = READ_ONCE(*ptr); \ - new = old & ~clear; \ - } while (!(old & test) && \ - cmpxchg(ptr, old, new) != old); \ + old__ = READ_ONCE(*(ptr)); \ + new__ = old__ & ~clear__; \ + } while (!(old__ & test__) && \ + cmpxchg(ptr, old__, new__) != old__); \ \ - !(old & test); \ + !(old__ & test__); \ }) #endif diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 1e76ceebeb5d..6d766a19f2bb 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -126,7 +126,7 @@ struct blkcg_gq { struct request_list rl; /* reference count */ - struct percpu_ref refcnt; + atomic_t refcnt; /* is this blkg online? protected by both blkcg and q locks */ bool online; @@ -184,8 +184,6 @@ extern struct cgroup_subsys_state * const blkcg_root_css; struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); -struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q); struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q); int blkcg_init_queue(struct request_queue *q); @@ -232,59 +230,22 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, char *input, struct blkg_conf_ctx *ctx); void blkg_conf_finish(struct blkg_conf_ctx *ctx); -/** - * blkcg_css - find the current css - * - * Find the css associated with either the kthread or the current task. - * This may return a dying css, so it is up to the caller to use tryget logic - * to confirm it is alive and well. - */ -static inline struct cgroup_subsys_state *blkcg_css(void) -{ - struct cgroup_subsys_state *css; - - css = kthread_blkcg(); - if (css) - return css; - return task_css(current, io_cgrp_id); -} static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) { return css ? container_of(css, struct blkcg, css) : NULL; } -/** - * __bio_blkcg - internal version of bio_blkcg for bfq and cfq - * - * DO NOT USE. - * There is a flaw using this version of the function. In particular, this was - * used in a broken paradigm where association was called on the given css. It - * is possible though that the returned css from task_css() is in the process - * of dying due to migration of the current task. So it is improper to assume - * *_get() is going to succeed. Both BFQ and CFQ rely on this logic and will - * take additional work to handle more gracefully. - */ -static inline struct blkcg *__bio_blkcg(struct bio *bio) -{ - if (bio && bio->bi_blkg) - return bio->bi_blkg->blkcg; - return css_to_blkcg(blkcg_css()); -} - -/** - * bio_blkcg - grab the blkcg associated with a bio - * @bio: target bio - * - * This returns the blkcg associated with a bio, NULL if not associated. - * Callers are expected to either handle NULL or know association has been - * done prior to calling this. - */ static inline struct blkcg *bio_blkcg(struct bio *bio) { - if (bio && bio->bi_blkg) - return bio->bi_blkg->blkcg; - return NULL; + struct cgroup_subsys_state *css; + + if (bio && bio->bi_css) + return css_to_blkcg(bio->bi_css); + css = kthread_blkcg(); + if (css) + return css_to_blkcg(css); + return css_to_blkcg(task_css(current, io_cgrp_id)); } static inline bool blk_cgroup_congested(void) @@ -490,35 +451,26 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) */ static inline void blkg_get(struct blkcg_gq *blkg) { - percpu_ref_get(&blkg->refcnt); + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + atomic_inc(&blkg->refcnt); } /** - * blkg_tryget - try and get a blkg reference + * blkg_try_get - try and get a blkg reference * @blkg: blkg to get * * This is for use when doing an RCU lookup of the blkg. We may be in the midst * of freeing this blkg, so we can only use it if the refcnt is not zero. */ -static inline bool blkg_tryget(struct blkcg_gq *blkg) +static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg) { - return percpu_ref_tryget(&blkg->refcnt); + if (atomic_inc_not_zero(&blkg->refcnt)) + return blkg; + return NULL; } -/** - * blkg_tryget_closest - try and get a blkg ref on the closet blkg - * @blkg: blkg to get - * - * This walks up the blkg tree to find the closest non-dying blkg and returns - * the blkg that it did association with as it may not be the passed in blkg. - */ -static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) -{ - while (!percpu_ref_tryget(&blkg->refcnt)) - blkg = blkg->parent; - return blkg; -} +void __blkg_release_rcu(struct rcu_head *rcu); /** * blkg_put - put a blkg reference @@ -526,7 +478,9 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) */ static inline void blkg_put(struct blkcg_gq *blkg) { - percpu_ref_put(&blkg->refcnt); + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + if (atomic_dec_and_test(&blkg->refcnt)) + call_rcu(&blkg->rcu_head, __blkg_release_rcu); } /** @@ -579,36 +533,25 @@ static inline struct request_list *blk_get_rl(struct request_queue *q, rcu_read_lock(); - if (bio && bio->bi_blkg) { - blkcg = bio->bi_blkg->blkcg; - if (blkcg == &blkcg_root) - goto rl_use_root; - - blkg_get(bio->bi_blkg); - rcu_read_unlock(); - return &bio->bi_blkg->rl; - } + blkcg = bio_blkcg(bio); - blkcg = css_to_blkcg(blkcg_css()); + /* bypass blkg lookup and use @q->root_rl directly for root */ if (blkcg == &blkcg_root) - goto rl_use_root; + goto root_rl; + /* + * Try to use blkg->rl. blkg lookup may fail under memory pressure + * or if either the blkcg or queue is going away. Fall back to + * root_rl in such cases. + */ blkg = blkg_lookup(blkcg, q); if (unlikely(!blkg)) - blkg = __blkg_lookup_create(blkcg, q); - - if (blkg->blkcg == &blkcg_root || !blkg_tryget(blkg)) - goto rl_use_root; + goto root_rl; + blkg_get(blkg); rcu_read_unlock(); return &blkg->rl; - - /* - * Each blkg has its own request_list, however, the root blkcg - * uses the request_queue's root_rl. This is to avoid most - * overhead for the root blkcg. - */ -rl_use_root: +root_rl: rcu_read_unlock(); return &q->root_rl; } @@ -854,26 +797,32 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg struct bio *bio) { return false; } #endif - -static inline void blkcg_bio_issue_init(struct bio *bio) -{ - bio_issue_init(&bio->bi_issue, bio_sectors(bio)); -} - static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { + struct blkcg *blkcg; struct blkcg_gq *blkg; bool throtl = false; rcu_read_lock(); + blkcg = bio_blkcg(bio); + + /* associate blkcg if bio hasn't attached one */ + bio_associate_blkcg(bio, &blkcg->css); - bio_associate_create_blkg(q, bio); - blkg = bio->bi_blkg; + blkg = blkg_lookup(blkcg, q); + if (unlikely(!blkg)) { + spin_lock_irq(q->queue_lock); + blkg = blkg_lookup_create(blkcg, q); + if (IS_ERR(blkg)) + blkg = NULL; + spin_unlock_irq(q->queue_lock); + } throtl = blk_throtl_bio(q, blkg, bio); if (!throtl) { + blkg = blkg ?: q->root_blkg; /* * If the bio is flagged with BIO_QUEUE_ENTERED it means this * is a split bio and we would have already accounted for the @@ -885,8 +834,6 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); } - blkcg_bio_issue_init(bio); - rcu_read_unlock(); return !throtl; } @@ -983,7 +930,6 @@ static inline int blkcg_activate_policy(struct request_queue *q, static inline void blkcg_deactivate_policy(struct request_queue *q, const struct blkcg_policy *pol) { } -static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, @@ -999,7 +945,6 @@ static inline void blk_put_rl(struct request_list *rl) { } static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } -static inline void blkcg_bio_issue_init(struct bio *bio) { } static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { return true; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 093a818c5b68..1dcf652ba0aa 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -178,6 +178,7 @@ struct bio { * release. Read comment on top of bio_associate_current(). */ struct io_context *bi_ioc; + struct cgroup_subsys_state *bi_css; struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; #endif diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 9e8056ec20fa..d93e89761a8b 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -51,6 +51,9 @@ struct bpf_reg_state { * PTR_TO_MAP_VALUE_OR_NULL */ struct bpf_map *map_ptr; + + /* Max size from any of the above. */ + unsigned long raw; }; /* Fixed part of pointer offset, pointer types only */ s32 off; diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 49c93b9308d7..68bb09c29ce8 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -81,7 +81,13 @@ struct ceph_options { #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) -#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) + +/* + * Handle the largest possible rbd object in one message. + * There is no limit on the size of cephfs objects, but it has to obey + * rsize and wsize mount options anyway. + */ +#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024) #define CEPH_AUTH_NAME_DEFAULT "guest" diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index fc2b4491ee0a..800a2128d411 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -82,22 +82,6 @@ enum ceph_msg_data_type { CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */ }; -static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) -{ - switch (type) { - case CEPH_MSG_DATA_NONE: - case CEPH_MSG_DATA_PAGES: - case CEPH_MSG_DATA_PAGELIST: -#ifdef CONFIG_BLOCK - case CEPH_MSG_DATA_BIO: -#endif /* CONFIG_BLOCK */ - case CEPH_MSG_DATA_BVECS: - return true; - default: - return false; - } -} - #ifdef CONFIG_BLOCK struct ceph_bio_iter { @@ -181,7 +165,6 @@ struct ceph_bvec_iter { } while (0) struct ceph_msg_data { - struct list_head links; /* ceph_msg->data */ enum ceph_msg_data_type type; union { #ifdef CONFIG_BLOCK @@ -202,7 +185,6 @@ struct ceph_msg_data { struct ceph_msg_data_cursor { size_t total_resid; /* across all data items */ - struct list_head *data_head; /* = &ceph_msg->data */ struct ceph_msg_data *data; /* current data item */ size_t resid; /* bytes not yet consumed */ @@ -240,7 +222,9 @@ struct ceph_msg { struct ceph_buffer *middle; size_t data_length; - struct list_head data; + struct ceph_msg_data *data; + int num_data_items; + int max_data_items; struct ceph_msg_data_cursor cursor; struct ceph_connection *con; @@ -381,6 +365,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos, void ceph_msg_data_add_bvecs(struct ceph_msg *msg, struct ceph_bvec_iter *bvec_pos); +struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items, + gfp_t flags, bool can_fail); extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail); diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h index 76c98a512758..729cdf700eae 100644 --- a/include/linux/ceph/msgpool.h +++ b/include/linux/ceph/msgpool.h @@ -13,14 +13,15 @@ struct ceph_msgpool { mempool_t *pool; int type; /* preallocated message type */ int front_len; /* preallocated payload size */ + int max_data_items; }; -extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type, - int front_len, int size, bool blocking, - const char *name); +int ceph_msgpool_init(struct ceph_msgpool *pool, int type, + int front_len, int max_data_items, int size, + const char *name); extern void ceph_msgpool_destroy(struct ceph_msgpool *pool); -extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *, - int front_len); +struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len, + int max_data_items); extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *); #endif diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 02096da01845..7a2af5034278 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -136,6 +136,13 @@ struct ceph_osd_req_op { u64 expected_object_size; u64 expected_write_size; } alloc_hint; + struct { + u64 snapid; + u64 src_version; + u8 flags; + u32 src_fadvise_flags; + struct ceph_osd_data osd_data; + } copy_from; }; }; @@ -444,9 +451,8 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages); -extern int osd_req_op_cls_init(struct ceph_osd_request *osd_req, - unsigned int which, u16 opcode, - const char *class, const char *method); +int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, + const char *class, const char *method); extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *name, const void *value, size_t size, u8 cmp_op, u8 cmp_mode); @@ -511,6 +517,16 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct timespec64 *mtime, struct page **pages, int nr_pages); +int ceph_osdc_copy_from(struct ceph_osd_client *osdc, + u64 src_snapid, u64 src_version, + struct ceph_object_id *src_oid, + struct ceph_object_locator *src_oloc, + u32 src_fadvise_flags, + struct ceph_object_id *dst_oid, + struct ceph_object_locator *dst_oloc, + u32 dst_fadvise_flags, + u8 copy_from_flags); + /* watch/notify */ struct ceph_osd_linger_request * ceph_osdc_watch(struct ceph_osd_client *osdc, diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h index d0223364349f..5dead8486fd8 100644 --- a/include/linux/ceph/pagelist.h +++ b/include/linux/ceph/pagelist.h @@ -23,16 +23,7 @@ struct ceph_pagelist_cursor { size_t room; /* room remaining to reset to */ }; -static inline void ceph_pagelist_init(struct ceph_pagelist *pl) -{ - INIT_LIST_HEAD(&pl->head); - pl->mapped_tail = NULL; - pl->length = 0; - pl->room = 0; - INIT_LIST_HEAD(&pl->free_list); - pl->num_pages_free = 0; - refcount_set(&pl->refcnt, 1); -} +struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags); extern void ceph_pagelist_release(struct ceph_pagelist *pl); diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index f1988387c5ad..3eb0e55665b4 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -410,6 +410,14 @@ enum { enum { CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */ CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */ + CEPH_OSD_OP_FLAG_FADVISE_RANDOM = 0x4, /* the op is random */ + CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL = 0x8, /* the op is sequential */ + CEPH_OSD_OP_FLAG_FADVISE_WILLNEED = 0x10,/* data will be accessed in + the near future */ + CEPH_OSD_OP_FLAG_FADVISE_DONTNEED = 0x20,/* data will not be accessed + in the near future */ + CEPH_OSD_OP_FLAG_FADVISE_NOCACHE = 0x40,/* data will be accessed only + once by this client */ }; #define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/ @@ -432,6 +440,15 @@ enum { }; enum { + CEPH_OSD_COPY_FROM_FLAG_FLUSH = 1, /* part of a flush operation */ + CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY = 2, /* ignore pool overlay */ + CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE = 4, /* ignore osd cache logic */ + CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to + * cloneid */ + CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16, /* order with write */ +}; + +enum { CEPH_OSD_WATCH_OP_UNWATCH = 0, CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1, /* note: use only ODD ids to prevent pre-giant code from @@ -497,6 +514,17 @@ struct ceph_osd_op { __le64 expected_object_size; __le64 expected_write_size; } __attribute__ ((packed)) alloc_hint; + struct { + __le64 snapid; + __le64 src_version; + __u8 flags; /* CEPH_OSD_COPY_FROM_FLAG_* */ + /* + * CEPH_OSD_OP_FLAG_FADVISE_*: fadvise flags + * for src object, flags for dest object are in + * ceph_osd_op::flags. + */ + __le32 src_fadvise_flags; + } __attribute__ ((packed)) copy_from; }; __le32 payload_len; } __attribute__ ((packed)); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 9968332cceed..9d12757a65b0 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -93,8 +93,6 @@ extern struct css_set init_css_set; bool css_has_online_children(struct cgroup_subsys_state *css); struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); -struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup, - struct cgroup_subsys *ss); struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, struct cgroup_subsys *ss); struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 08b1aa70a38d..60c51871b04b 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -119,6 +119,11 @@ struct clk_duty { * Called with enable_lock held. This function must not * sleep. * + * @save_context: Save the context of the clock in prepration for poweroff. + * + * @restore_context: Restore the context of the clock after a restoration + * of power. + * * @recalc_rate Recalculate the rate of this clock, by querying hardware. The * parent rate is an input parameter. It is up to the caller to * ensure that the prepare_mutex is held across this call. @@ -223,6 +228,8 @@ struct clk_ops { void (*disable)(struct clk_hw *hw); int (*is_enabled)(struct clk_hw *hw); void (*disable_unused)(struct clk_hw *hw); + int (*save_context)(struct clk_hw *hw); + void (*restore_context)(struct clk_hw *hw); unsigned long (*recalc_rate)(struct clk_hw *hw, unsigned long parent_rate); long (*round_rate)(struct clk_hw *hw, unsigned long rate, @@ -1011,5 +1018,7 @@ static inline void clk_writel(u32 val, u32 __iomem *reg) #endif /* platform dependent I/O accessors */ +void clk_gate_restore_context(struct clk_hw *hw); + #endif /* CONFIG_COMMON_CLK */ #endif /* CLK_PROVIDER_H */ diff --git a/include/linux/clk.h b/include/linux/clk.h index 4f750c481b82..a7773b5c0b9f 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -312,7 +312,26 @@ struct clk *clk_get(struct device *dev, const char *id); */ int __must_check clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks); - +/** + * clk_bulk_get_all - lookup and obtain all available references to clock + * producer. + * @dev: device for clock "consumer" + * @clks: pointer to the clk_bulk_data table of consumer + * + * This helper function allows drivers to get all clk consumers in one + * operation. If any of the clk cannot be acquired then any clks + * that were obtained will be freed before returning to the caller. + * + * Returns a positive value for the number of clocks obtained while the + * clock references are stored in the clk_bulk_data table in @clks field. + * Returns 0 if there're none and a negative value if something failed. + * + * Drivers must assume that the clock source is not enabled. + * + * clk_bulk_get should not be called from within interrupt context. + */ +int __must_check clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks); /** * devm_clk_bulk_get - managed get multiple clk consumers * @dev: device for clock "consumer" @@ -327,6 +346,22 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks, */ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks); +/** + * devm_clk_bulk_get_all - managed get multiple clk consumers + * @dev: device for clock "consumer" + * @clks: pointer to the clk_bulk_data table of consumer + * + * Returns a positive value for the number of clocks obtained while the + * clock references are stored in the clk_bulk_data table in @clks field. + * Returns 0 if there're none and a negative value if something failed. + * + * This helper function allows drivers to get several clk + * consumers in one operation with management, the clks will + * automatically be freed when the device is unbound. + */ + +int __must_check devm_clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks); /** * devm_clk_get - lookup and obtain a managed reference to a clock producer. @@ -488,6 +523,19 @@ void clk_put(struct clk *clk); void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); /** + * clk_bulk_put_all - "free" all the clock source + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * Note: drivers must ensure that all clk_bulk_enable calls made on this + * clock source are balanced by clk_bulk_disable calls prior to calling + * this function. + * + * clk_bulk_put_all should not be called from within interrupt context. + */ +void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); + +/** * devm_clk_put - "free" a managed clock source * @dev: device used to acquire the clock * @clk: clock source acquired with devm_clk_get() @@ -629,6 +677,23 @@ struct clk *clk_get_parent(struct clk *clk); */ struct clk *clk_get_sys(const char *dev_id, const char *con_id); +/** + * clk_save_context - save clock context for poweroff + * + * Saves the context of the clock register for powerstates in which the + * contents of the registers will be lost. Occurs deep within the suspend + * code so locking is not necessary. + */ +int clk_save_context(void); + +/** + * clk_restore_context - restore clock context after poweroff + * + * This occurs with all clocks enabled. Occurs deep within the resume code + * so locking is not necessary. + */ +void clk_restore_context(void); + #else /* !CONFIG_HAVE_CLK */ static inline struct clk *clk_get(struct device *dev, const char *id) @@ -642,6 +707,12 @@ static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, return 0; } +static inline int __must_check clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks) +{ + return 0; +} + static inline struct clk *devm_clk_get(struct device *dev, const char *id) { return NULL; @@ -653,6 +724,13 @@ static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clk return 0; } +static inline int __must_check devm_clk_bulk_get_all(struct device *dev, + struct clk_bulk_data **clks) +{ + + return 0; +} + static inline struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id) { @@ -663,6 +741,8 @@ static inline void clk_put(struct clk *clk) {} static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} +static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} + static inline void devm_clk_put(struct device *dev, struct clk *clk) {} @@ -728,6 +808,14 @@ static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) { return NULL; } + +static inline int clk_save_context(void) +{ + return 0; +} + +static inline void clk_restore_context(void) {} + #endif /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h index 9ebf1f8243bb..0ebbe2f0b45e 100644 --- a/include/linux/clk/renesas.h +++ b/include/linux/clk/renesas.h @@ -1,14 +1,10 @@ -/* +/* SPDX-License-Identifier: GPL-2.0+ + * * Copyright 2013 Ideas On Board SPRL * Copyright 2013, 2014 Horms Solutions Ltd. * * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Contact: Simon Horman <horms@verge.net.au> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __LINUX_CLK_RENESAS_H_ diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index a8faa38b1ed6..eacc5df57b99 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -159,6 +159,7 @@ struct clk_hw_omap { const char *clkdm_name; struct clockdomain *clkdm; const struct clk_hw_omap_ops *ops; + u32 context; }; /* @@ -290,9 +291,15 @@ struct ti_clk_features { #define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1) #define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2) #define TI_CLK_ERRATA_I810 BIT(3) +#define TI_CLK_CLKCTRL_COMPAT BIT(4) void ti_clk_setup_features(struct ti_clk_features *features); const struct ti_clk_features *ti_clk_get_features(void); +int omap3_noncore_dpll_save_context(struct clk_hw *hw); +void omap3_noncore_dpll_restore_context(struct clk_hw *hw); + +int omap3_core_dpll_save_context(struct clk_hw *hw); +void omap3_core_dpll_restore_context(struct clk_hw *hw); extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; diff --git a/include/linux/compat.h b/include/linux/compat.h index 06e77473f175..88720b443cd6 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -1032,9 +1032,9 @@ int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, #else /* !CONFIG_COMPAT */ #define is_compat_task() (0) -#ifndef in_compat_syscall +/* Ensure no one redefines in_compat_syscall() under !CONFIG_COMPAT */ +#define in_compat_syscall in_compat_syscall static inline bool in_compat_syscall(void) { return false; } -#endif #endif /* CONFIG_COMPAT */ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index b1ce500fe8b3..3e7dafb3ea80 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -21,8 +21,6 @@ #define __SANITIZE_ADDRESS__ #endif -#define __no_sanitize_address __attribute__((no_sanitize("address"))) - /* * Not all versions of clang implement the the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements @@ -41,6 +39,3 @@ * compilers, like ICC. */ #define barrier() __asm__ __volatile__("" : : : "memory") -#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) -#define __assume_aligned(a, ...) \ - __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 90ddfefb6c2b..c0f5db3a9621 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -68,31 +68,20 @@ */ #define uninitialized_var(x) x = x -#ifdef __CHECKER__ -#define __must_be_array(a) 0 -#else -/* &a[0] degrades to a pointer: a different type from an array */ -#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) -#endif - #ifdef RETPOLINE -#define __noretpoline __attribute__((indirect_branch("keep"))) +#define __noretpoline __attribute__((__indirect_branch__("keep"))) #endif #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) -#define __optimize(level) __attribute__((__optimize__(level))) - #define __compiletime_object_size(obj) __builtin_object_size(obj, 0) -#ifndef __CHECKER__ -#define __compiletime_warning(message) __attribute__((warning(message))) -#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_warning(message) __attribute__((__warning__(message))) +#define __compiletime_error(message) __attribute__((__error__(message))) -#ifdef LATENT_ENTROPY_PLUGIN +#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) #define __latent_entropy __attribute__((latent_entropy)) #endif -#endif /* __CHECKER__ */ /* * calling noreturn functions, __builtin_unreachable() and __builtin_trap() @@ -107,10 +96,6 @@ * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer * control elsewhere. - * - * Early snapshots of gcc 4.5 don't support this and we can't detect - * this in the preprocessor, but we can live with this because they're - * unreleased. Really, we need to have autoconf for the kernel. */ #define unreachable() \ do { \ @@ -119,9 +104,6 @@ __builtin_unreachable(); \ } while (0) -/* Mark a function definition as prohibited from being cloned. */ -#define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) - #if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__) #define __randomize_layout __attribute__((randomize_layout)) #define __no_randomize_layout __attribute__((no_randomize_layout)) @@ -131,32 +113,6 @@ #endif /* - * When used with Link Time Optimization, gcc can optimize away C functions or - * variables which are referenced only from assembly code. __visible tells the - * optimizer that something else uses this function or variable, thus preventing - * this. - */ -#define __visible __attribute__((externally_visible)) - -/* gcc version specific checks */ - -#if GCC_VERSION >= 40900 && !defined(__CHECKER__) -/* - * __assume_aligned(n, k): Tell the optimizer that the returned - * pointer can be assumed to be k modulo n. The second argument is - * optional (default 0), so we use a variadic macro to make the - * shorthand. - * - * Beware: Do not apply this to functions which may return - * ERR_PTRs. Also, it is probably unwise to apply it to functions - * returning extra information in the low bits (but in that case the - * compiler should see some alignment anyway, when the return value is - * massaged by 'flags = ptr & 3; ptr &= ~3;'). - */ -#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) -#endif - -/* * GCC 'asm goto' miscompiles certain code sequences: * * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 @@ -187,39 +143,22 @@ #define KASAN_ABI_VERSION 3 #endif -#if GCC_VERSION >= 40902 /* - * Tell the compiler that address safety instrumentation (KASAN) - * should not be applied to that function. - * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * Because __no_sanitize_address conflicts with inlining: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * we do one or the other. */ -#define __no_sanitize_address __attribute__((no_sanitize_address)) #ifdef CONFIG_KASAN #define __no_sanitize_address_or_inline \ __no_sanitize_address __maybe_unused notrace #else #define __no_sanitize_address_or_inline inline #endif -#endif #if GCC_VERSION >= 50100 -/* - * Mark structures as requiring designated initializers. - * https://gcc.gnu.org/onlinedocs/gcc/Designated-Inits.html - */ -#define __designated_init __attribute__((designated_init)) #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif -#if !defined(__noclone) -#define __noclone /* not needed */ -#endif - -#if !defined(__no_sanitize_address) -#define __no_sanitize_address -#define __no_sanitize_address_or_inline inline -#endif - /* * Turn individual warnings and errors on and off locally, depending * on version. diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 4c7f9befa9f6..517bd14e1222 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -29,17 +29,8 @@ */ #define OPTIMIZER_HIDE_VAR(var) barrier() -/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */ -#define __must_be_array(a) 0 - #endif /* icc has this, but it's called _bswap16 */ #define __HAVE_BUILTIN_BSWAP16__ #define __builtin_bswap16 _bswap16 - -/* The following are for compatibility with GCC, from compiler-gcc.h, - * and may be redefined here because they should not be shared with other - * compilers, like clang. - */ -#define __visible __attribute__((externally_visible)) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 4170fcee5adb..18c80cfa4fc4 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -23,8 +23,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #define __branch_check__(x, expect, is_constant) ({ \ long ______r; \ static struct ftrace_likely_data \ - __attribute__((__aligned__(4))) \ - __attribute__((section("_ftrace_annotated_branch"))) \ + __aligned(4) \ + __section("_ftrace_annotated_branch") \ ______f = { \ .data.func = __func__, \ .data.file = __FILE__, \ @@ -59,8 +59,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, ({ \ int ______r; \ static struct ftrace_branch_data \ - __attribute__((__aligned__(4))) \ - __attribute__((section("_ftrace_branch"))) \ + __aligned(4) \ + __section("_ftrace_branch") \ ______f = { \ .func = __func__, \ .file = __FILE__, \ @@ -115,7 +115,10 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, # define ASM_UNREACHABLE #endif #ifndef unreachable -# define unreachable() do { annotate_reachable(); do { } while (1); } while (0) +# define unreachable() do { \ + annotate_unreachable(); \ + __builtin_unreachable(); \ +} while (0) #endif /* @@ -137,7 +140,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, extern typeof(sym) sym; \ static const unsigned long __kentry_##sym \ __used \ - __attribute__((section("___kentry" "+" #sym ), used)) \ + __section("___kentry" "+" #sym ) \ = (unsigned long)&sym; #endif @@ -278,7 +281,7 @@ unsigned long read_word_at_a_time(const void *addr) * visible to the compiler. */ #define __ADDRESSABLE(sym) \ - static void * __attribute__((section(".discard.addressable"), used)) \ + static void * __section(".discard.addressable") __used \ __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; /** @@ -331,10 +334,6 @@ static inline void *offset_to_ptr(const int *off) #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ -#ifndef __optimize -# define __optimize(level) -#endif - /* Compile time object size, -1 for unknown */ #ifndef __compiletime_object_size # define __compiletime_object_size(obj) -1 @@ -376,4 +375,7 @@ static inline void *offset_to_ptr(const int *off) compiletime_assert(__native_word(t), \ "Need native word sized stores/loads for atomicity.") +/* &a[0] degrades to a pointer: a different type from an array */ +#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) + #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h new file mode 100644 index 000000000000..6b28c1b7310c --- /dev/null +++ b/include/linux/compiler_attributes.h @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_ATTRIBUTES_H +#define __LINUX_COMPILER_ATTRIBUTES_H + +/* + * The attributes in this file are unconditionally defined and they directly + * map to compiler attribute(s) -- except those that are optional. + * + * Any other "attributes" (i.e. those that depend on a configuration option, + * on a compiler, on an architecture, on plugins, on other attributes...) + * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h). + * + * This file is meant to be sorted (by actual attribute name, + * not by #define identifier). Use the __attribute__((__name__)) syntax + * (i.e. with underscores) to avoid future collisions with other macros. + * If an attribute is optional, state the reason in the comment. + */ + +/* + * To check for optional attributes, we use __has_attribute, which is supported + * on gcc >= 5, clang >= 2.9 and icc >= 17. In the meantime, to support + * 4.6 <= gcc < 5, we implement __has_attribute by hand. + * + * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__ + * depending on the compiler used to build it; however, these attributes have + * no semantic effects for sparse, so it does not matter. Also note that, + * in order to avoid sparse's warnings, even the unsupported ones must be + * defined to 0. + */ +#ifndef __has_attribute +# define __has_attribute(x) __GCC4_has_attribute_##x +# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) +# define __GCC4_has_attribute___designated_init__ 0 +# define __GCC4_has_attribute___externally_visible__ 1 +# define __GCC4_has_attribute___noclone__ 1 +# define __GCC4_has_attribute___optimize__ 1 +# define __GCC4_has_attribute___nonstring__ 0 +# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute + */ +#define __alias(symbol) __attribute__((__alias__(#symbol))) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-aligned-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-aligned-type-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-aligned-variable-attribute + */ +#define __aligned(x) __attribute__((__aligned__(x))) +#define __aligned_largest __attribute__((__aligned__)) + +/* + * Note: users of __always_inline currently do not write "inline" themselves, + * which seems to be required by gcc to apply the attribute according + * to its docs (and also "warning: always_inline function might not be + * inlinable [-Wattributes]" is emitted). + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-always_005finline-function-attribute + * clang: mentioned + */ +#define __always_inline inline __attribute__((__always_inline__)) + +/* + * The second argument is optional (default 0), so we use a variadic macro + * to make the shorthand. + * + * Beware: Do not apply this to functions which may return + * ERR_PTRs. Also, it is probably unwise to apply it to functions + * returning extra information in the low bits (but in that case the + * compiler should see some alignment anyway, when the return value is + * massaged by 'flags = ptr & 3; ptr &= ~3;'). + * + * Optional: only supported since gcc >= 4.9 + * Optional: not supported by icc + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#assume-aligned + */ +#if __has_attribute(__assume_aligned__) +# define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) +#else +# define __assume_aligned(a, ...) +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute + */ +#define __cold __attribute__((__cold__)) + +/* + * Note the long name. + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute + */ +#define __attribute_const__ __attribute__((__const__)) + +/* + * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' + * attribute warnings entirely and for good") for more information. + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-deprecated-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-deprecated-type-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-deprecated-variable-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Enumerator-Attributes.html#index-deprecated-enumerator-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#deprecated + */ +#define __deprecated + +/* + * Optional: only supported since gcc >= 5.1 + * Optional: not supported by clang + * Optional: not supported by icc + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-designated_005finit-type-attribute + */ +#if __has_attribute(__designated_init__) +# define __designated_init __attribute__((__designated_init__)) +#else +# define __designated_init +#endif + +/* + * Optional: not supported by clang + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute + */ +#if __has_attribute(__externally_visible__) +# define __visible __attribute__((__externally_visible__)) +#else +# define __visible +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-format-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#format + */ +#define __printf(a, b) __attribute__((__format__(printf, a, b))) +#define __scanf(a, b) __attribute__((__format__(scanf, a, b))) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-gnu_005finline-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#gnu-inline + */ +#define __gnu_inline __attribute__((__gnu_inline__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute + */ +#define __malloc __attribute__((__malloc__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-mode-type-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-mode-variable-attribute + */ +#define __mode(x) __attribute__((__mode__(x))) + +/* + * Optional: not supported by clang + * Note: icc does not recognize gcc's no-tracer + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-optimize-function-attribute + */ +#if __has_attribute(__noclone__) +# if __has_attribute(__optimize__) +# define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) +# else +# define __noclone __attribute__((__noclone__)) +# endif +#else +# define __noclone +#endif + +/* + * Note the missing underscores. + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute + * clang: mentioned + */ +#define noinline __attribute__((__noinline__)) + +/* + * Optional: only supported since gcc >= 8 + * Optional: not supported by clang + * Optional: not supported by icc + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-nonstring-variable-attribute + */ +#if __has_attribute(__nonstring__) +# define __nonstring __attribute__((__nonstring__)) +#else +# define __nonstring +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn + * clang: https://clang.llvm.org/docs/AttributeReference.html#id1 + */ +#define __noreturn __attribute__((__noreturn__)) + +/* + * Optional: only supported since gcc >= 4.8 + * Optional: not supported by icc + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fsanitize_005faddress-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#no-sanitize-address-no-address-safety-analysis + */ +#if __has_attribute(__no_sanitize_address__) +# define __no_sanitize_address __attribute__((__no_sanitize_address__)) +#else +# define __no_sanitize_address +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute + * clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute + */ +#define __packed __attribute__((__packed__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute + */ +#define __pure __attribute__((__pure__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-section-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-section-variable-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#section-declspec-allocate + */ +#define __section(S) __attribute__((__section__(#S))) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-unused-variable-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-unused-label-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#maybe-unused-unused + */ +#define __always_unused __attribute__((__unused__)) +#define __maybe_unused __attribute__((__unused__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-used-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-used-variable-attribute + */ +#define __used __attribute__((__used__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute + */ +#define __weak __attribute__((__weak__)) + +#endif /* __LINUX_COMPILER_ATTRIBUTES_H */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 97cfe29b3f0a..3439d7d0249a 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPILER_TYPES_H #define __LINUX_COMPILER_TYPES_H @@ -54,6 +55,9 @@ extern void __chk_io_ptr(const volatile void __iomem *); #ifdef __KERNEL__ +/* Attributes */ +#include <linux/compiler_attributes.h> + /* Compiler specific macros. */ #ifdef __clang__ #include <linux/compiler-clang.h> @@ -78,12 +82,6 @@ extern void __chk_io_ptr(const volatile void __iomem *); #include <asm/compiler.h> #endif -/* - * Generic compiler-independent macros required for kernel - * build go below this comment. Actual compiler/compiler version - * specific implementations come from the above header files - */ - struct ftrace_branch_data { const char *func; const char *file; @@ -106,10 +104,6 @@ struct ftrace_likely_data { unsigned long constant; }; -/* Don't. Just don't. */ -#define __deprecated -#define __deprecated_for_modules - #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ @@ -119,10 +113,6 @@ struct ftrace_likely_data { * compilers. We don't consider that to be an error, so set them to nothing. * For example, some of them are for compiler specific plugins. */ -#ifndef __designated_init -# define __designated_init -#endif - #ifndef __latent_entropy # define __latent_entropy #endif @@ -140,17 +130,6 @@ struct ftrace_likely_data { # define randomized_struct_fields_end #endif -#ifndef __visible -#define __visible -#endif - -/* - * Assume alignment of return value. - */ -#ifndef __assume_aligned -#define __assume_aligned(a, ...) -#endif - /* Are two types/vars the same type (ignoring qualifiers)? */ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) @@ -159,14 +138,6 @@ struct ftrace_likely_data { (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) -#ifndef __attribute_const__ -#define __attribute_const__ __attribute__((__const__)) -#endif - -#ifndef __noclone -#define __noclone -#endif - /* Helpers for emitting diagnostics in pragmas. */ #ifndef __diag #define __diag(string) @@ -186,43 +157,16 @@ struct ftrace_likely_data { #define __diag_error(compiler, version, option, comment) \ __diag_ ## compiler(version, error, option) -/* - * From the GCC manual: - * - * Many functions have no effects except the return value and their - * return value depends only on the parameters and/or global - * variables. Such a function can be subject to common subexpression - * elimination and loop optimization just as an arithmetic operator - * would be. - * [...] - */ -#define __pure __attribute__((pure)) -#define __aligned(x) __attribute__((aligned(x))) -#define __printf(a, b) __attribute__((format(printf, a, b))) -#define __scanf(a, b) __attribute__((format(scanf, a, b))) -#define __maybe_unused __attribute__((unused)) -#define __always_unused __attribute__((unused)) -#define __mode(x) __attribute__((mode(x))) -#define __malloc __attribute__((__malloc__)) -#define __used __attribute__((__used__)) -#define __noreturn __attribute__((noreturn)) -#define __packed __attribute__((packed)) -#define __weak __attribute__((weak)) -#define __alias(symbol) __attribute__((alias(#symbol))) -#define __cold __attribute__((cold)) -#define __section(S) __attribute__((__section__(#S))) - - #ifdef CONFIG_ENABLE_MUST_CHECK -#define __must_check __attribute__((warn_unused_result)) +#define __must_check __attribute__((__warn_unused_result__)) #else #define __must_check #endif -#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) +#if defined(CC_USING_HOTPATCH) #define notrace __attribute__((hotpatch(0, 0))) #else -#define notrace __attribute__((no_instrument_function)) +#define notrace __attribute__((__no_instrument_function__)) #endif /* @@ -231,23 +175,11 @@ struct ftrace_likely_data { * stack and frame pointer being set up and there is no chance to * restore the lr register to the value before mcount was called. */ -#define __naked __attribute__((naked)) notrace +#define __naked __attribute__((__naked__)) notrace #define __compiler_offsetof(a, b) __builtin_offsetof(a, b) /* - * Feature detection for gnu_inline (gnu89 extern inline semantics). Either - * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics, - * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not - * defined so the gnu89 semantics are the default. - */ -#ifdef __GNUC_STDC_INLINE__ -# define __gnu_inline __attribute__((gnu_inline)) -#else -# define __gnu_inline -#endif - -/* * Force always-inline if the user requests it so via the .config. * GCC does not warn about unused static inline functions for * -Wunused-function. This turns out to avoid the need for complex #ifdef @@ -258,22 +190,20 @@ struct ftrace_likely_data { * semantics rather than c99. This prevents multiple symbol definition errors * of extern inline functions at link time. * A lot of inline functions can cause havoc with function tracing. + * Do not use __always_inline here, since currently it expands to inline again + * (which would break users of __always_inline). */ #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ !defined(CONFIG_OPTIMIZE_INLINING) -#define inline \ - inline __attribute__((always_inline, unused)) notrace __gnu_inline +#define inline inline __attribute__((__always_inline__)) __gnu_inline \ + __maybe_unused notrace #else -#define inline inline __attribute__((unused)) notrace __gnu_inline +#define inline inline __gnu_inline \ + __maybe_unused notrace #endif #define __inline__ inline -#define __inline inline -#define noinline __attribute__((noinline)) - -#ifndef __always_inline -#define __always_inline inline __attribute__((always_inline)) -#endif +#define __inline inline /* * Rather then using noinline to prevent stack consumption, use diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index caf40ad0bbc6..e0cd2baa8380 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -126,6 +126,7 @@ enum cpuhp_state { CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_ARC_TIMER_STARTING, CPUHP_AP_RISCV_TIMER_STARTING, + CPUHP_AP_CSKY_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, CPUHP_AP_KVM_ARM_VGIC_STARTING, diff --git a/include/linux/fs.h b/include/linux/fs.h index 8252df30b9a1..c95c0807471f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1752,6 +1752,25 @@ struct block_device_operations; #define NOMMU_VMFLAGS \ (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC) +/* + * These flags control the behavior of the remap_file_range function pointer. + * If it is called with len == 0 that means "remap to end of source file". + * See Documentation/filesystems/vfs.txt for more details about this call. + * + * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate) + * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request + */ +#define REMAP_FILE_DEDUP (1 << 0) +#define REMAP_FILE_CAN_SHORTEN (1 << 1) + +/* + * These flags signal that the caller is ok with altering various aspects of + * the behavior of the remap operation. The changes must be made by the + * implementation; the vfs remap helper functions can take advantage of them. + * Flags in this category exist to preserve the quirky behavior of the hoisted + * btrfs clone/dedupe ioctls. + */ +#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN) struct iov_iter; @@ -1790,10 +1809,9 @@ struct file_operations { #endif ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); - int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, - u64); - int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, - u64); + loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags); int (*fadvise)(struct file *, loff_t, loff_t, int); } __randomize_layout; @@ -1856,21 +1874,21 @@ extern ssize_t vfs_readv(struct file *, const struct iovec __user *, unsigned long, loff_t *, rwf_t); extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, loff_t, size_t, unsigned int); -extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, - struct inode *inode_out, loff_t pos_out, - u64 *len, bool is_dedupe); -extern int do_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, u64 len); -extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, u64 len); -extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, - struct inode *dest, loff_t destoff, - loff_t len, bool *is_same); +extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *count, + unsigned int remap_flags); +extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags); +extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags); extern int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same); -extern int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, - struct file *dst_file, loff_t dst_pos, - u64 len); +extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, + struct file *dst_file, loff_t dst_pos, + loff_t len, unsigned int remap_flags); struct super_operations { @@ -2998,6 +3016,9 @@ extern int sb_min_blocksize(struct super_block *, int); extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); +extern int generic_remap_checks(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *count, unsigned int remap_flags); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 24bcc5eec6b4..76f8db0b0e71 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -510,22 +510,18 @@ alloc_pages(gfp_t gfp_mask, unsigned int order) } extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, struct vm_area_struct *vma, unsigned long addr, - int node, bool hugepage); -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ - alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) + int node); #else #define alloc_pages(gfp_mask, order) \ alloc_pages_node(numa_node_id(), gfp_mask, order) -#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ - alloc_pages(gfp_mask, order) -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ +#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\ alloc_pages(gfp_mask, order) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) #define alloc_page_vma(gfp_mask, vma, addr) \ - alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) + alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ - alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) + alloc_pages_vma(gfp_mask, 0, vma, addr, node) extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index c759d1cbcedd..a64f21a97369 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -37,7 +37,9 @@ struct in_device { unsigned long mr_v1_seen; unsigned long mr_v2_seen; unsigned long mr_maxdelay; - unsigned char mr_qrv; + unsigned long mr_qi; /* Query Interval */ + unsigned long mr_qri; /* Query Response Interval */ + unsigned char mr_qrv; /* Query Robustness Variable */ unsigned char mr_gq_running; unsigned char mr_ifc_count; struct timer_list mr_gq_timer; /* general query timer */ diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 05d8fb5a06c4..bc9af551fc83 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -17,6 +17,9 @@ #ifdef CONFIG_KEYS +struct kernel_pkey_query; +struct kernel_pkey_params; + /* * key under-construction record * - passed to the request_key actor if supplied @@ -155,6 +158,14 @@ struct key_type { */ struct key_restriction *(*lookup_restriction)(const char *params); + /* Asymmetric key accessor functions. */ + int (*asym_query)(const struct kernel_pkey_params *params, + struct kernel_pkey_query *info); + int (*asym_eds_op)(struct kernel_pkey_params *params, + const void *in, void *out); + int (*asym_verify_signature)(struct kernel_pkey_params *params, + const void *in, const void *in2); + /* internal fields */ struct list_head link; /* link in types list */ struct lock_class_key lock_class; /* key->sem lock class */ diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h new file mode 100644 index 000000000000..c7c48c79ce0e --- /dev/null +++ b/include/linux/keyctl.h @@ -0,0 +1,46 @@ +/* keyctl kernel bits + * + * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef __LINUX_KEYCTL_H +#define __LINUX_KEYCTL_H + +#include <uapi/linux/keyctl.h> + +struct kernel_pkey_query { + __u32 supported_ops; /* Which ops are supported */ + __u32 key_size; /* Size of the key in bits */ + __u16 max_data_size; /* Maximum size of raw data to sign in bytes */ + __u16 max_sig_size; /* Maximum size of signature in bytes */ + __u16 max_enc_size; /* Maximum size of encrypted blob in bytes */ + __u16 max_dec_size; /* Maximum size of decrypted blob in bytes */ +}; + +enum kernel_pkey_operation { + kernel_pkey_encrypt, + kernel_pkey_decrypt, + kernel_pkey_sign, + kernel_pkey_verify, +}; + +struct kernel_pkey_params { + struct key *key; + const char *encoding; /* Encoding (eg. "oaep" or "raw" for none) */ + const char *hash_algo; /* Digest algorithm used (eg. "sha1") or NULL if N/A */ + char *info; /* Modified info string to be released later */ + __u32 in_len; /* Input data size */ + union { + __u32 out_len; /* Output buffer size (enc/dec/sign) */ + __u32 in2_len; /* 2nd input data size (verify) */ + }; + enum kernel_pkey_operation op : 8; +}; + +#endif /* __LINUX_KEYCTL_H */ diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5228c62af416..bac395f1d00a 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -139,6 +139,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr); +struct mempolicy *get_vma_policy(struct vm_area_struct *vma, + unsigned long addr); bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 20949dde35cd..e44e3ec8a9c7 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -36,7 +36,7 @@ * I2C requires 1 additional byte for requests. * I2C requires 2 additional bytes for responses. * SPI requires up to 32 additional bytes for responses. - * */ + */ #define EC_PROTO_VERSION_UNKNOWN 0 #define EC_MAX_REQUEST_OVERHEAD 1 #define EC_MAX_RESPONSE_OVERHEAD 32 @@ -58,13 +58,14 @@ enum { EC_MAX_MSG_BYTES = 64 * 1024, }; -/* - * @version: Command version number (often 0) - * @command: Command to send (EC_CMD_...) - * @outsize: Outgoing length in bytes - * @insize: Max number of bytes to accept from EC - * @result: EC's response to the command (separate from communication failure) - * @data: Where to put the incoming data from EC and outgoing data to EC +/** + * struct cros_ec_command - Information about a ChromeOS EC command. + * @version: Command version number (often 0). + * @command: Command to send (EC_CMD_...). + * @outsize: Outgoing length in bytes. + * @insize: Max number of bytes to accept from the EC. + * @result: EC's response to the command (separate from communication failure). + * @data: Where to put the incoming data from EC and outgoing data to EC. */ struct cros_ec_command { uint32_t version; @@ -76,48 +77,55 @@ struct cros_ec_command { }; /** - * struct cros_ec_device - Information about a ChromeOS EC device - * - * @phys_name: name of physical comms layer (e.g. 'i2c-4') + * struct cros_ec_device - Information about a ChromeOS EC device. + * @phys_name: Name of physical comms layer (e.g. 'i2c-4'). * @dev: Device pointer for physical comms device - * @was_wake_device: true if this device was set to wake the system from - * sleep at the last suspend - * @cmd_readmem: direct read of the EC memory-mapped region, if supported - * @offset is within EC_LPC_ADDR_MEMMAP region. - * @bytes: number of bytes to read. zero means "read a string" (including - * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read. - * Caller must ensure that the buffer is large enough for the result when - * reading a string. - * - * @priv: Private data - * @irq: Interrupt to use - * @id: Device id - * @din: input buffer (for data from EC) - * @dout: output buffer (for data to EC) - * \note - * These two buffers will always be dword-aligned and include enough - * space for up to 7 word-alignment bytes also, so we can ensure that - * the body of the message is always dword-aligned (64-bit). - * We use this alignment to keep ARM and x86 happy. Probably word - * alignment would be OK, there might be a small performance advantage - * to using dword. - * @din_size: size of din buffer to allocate (zero to use static din) - * @dout_size: size of dout buffer to allocate (zero to use static dout) - * @wake_enabled: true if this device can wake the system from sleep - * @suspended: true if this device had been suspended - * @cmd_xfer: send command to EC and get response - * Returns the number of bytes received if the communication succeeded, but - * that doesn't mean the EC was happy with the command. The caller - * should check msg.result for the EC's result code. - * @pkt_xfer: send packet to EC and get response - * @lock: one transaction at a time - * @mkbp_event_supported: true if this EC supports the MKBP event protocol. - * @event_notifier: interrupt event notifier for transport devices. - * @event_data: raw payload transferred with the MKBP event. - * @event_size: size in bytes of the event data. + * @was_wake_device: True if this device was set to wake the system from + * sleep at the last suspend. + * @cros_class: The class structure for this device. + * @cmd_readmem: Direct read of the EC memory-mapped region, if supported. + * @offset: Is within EC_LPC_ADDR_MEMMAP region. + * @bytes: Number of bytes to read. zero means "read a string" (including + * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be + * read. Caller must ensure that the buffer is large enough for the + * result when reading a string. + * @max_request: Max size of message requested. + * @max_response: Max size of message response. + * @max_passthru: Max sice of passthru message. + * @proto_version: The protocol version used for this device. + * @priv: Private data. + * @irq: Interrupt to use. + * @id: Device id. + * @din: Input buffer (for data from EC). This buffer will always be + * dword-aligned and include enough space for up to 7 word-alignment + * bytes also, so we can ensure that the body of the message is always + * dword-aligned (64-bit). We use this alignment to keep ARM and x86 + * happy. Probably word alignment would be OK, there might be a small + * performance advantage to using dword. + * @dout: Output buffer (for data to EC). This buffer will always be + * dword-aligned and include enough space for up to 7 word-alignment + * bytes also, so we can ensure that the body of the message is always + * dword-aligned (64-bit). We use this alignment to keep ARM and x86 + * happy. Probably word alignment would be OK, there might be a small + * performance advantage to using dword. + * @din_size: Size of din buffer to allocate (zero to use static din). + * @dout_size: Size of dout buffer to allocate (zero to use static dout). + * @wake_enabled: True if this device can wake the system from sleep. + * @suspended: True if this device had been suspended. + * @cmd_xfer: Send command to EC and get response. + * Returns the number of bytes received if the communication + * succeeded, but that doesn't mean the EC was happy with the + * command. The caller should check msg.result for the EC's result + * code. + * @pkt_xfer: Send packet to EC and get response. + * @lock: One transaction at a time. + * @mkbp_event_supported: True if this EC supports the MKBP event protocol. + * @event_notifier: Interrupt event notifier for transport devices. + * @event_data: Raw payload transferred with the MKBP event. + * @event_size: Size in bytes of the event data. + * @host_event_wake_mask: Mask of host events that cause wake from suspend. */ struct cros_ec_device { - /* These are used by other drivers that want to talk to the EC */ const char *phys_name; struct device *dev; @@ -153,20 +161,19 @@ struct cros_ec_device { }; /** - * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information - * + * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information. * @sensor_num: Id of the sensor, as reported by the EC. */ struct cros_ec_sensor_platform { u8 sensor_num; }; -/* struct cros_ec_platform - ChromeOS EC platform information - * - * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...) - * used in /dev/ and sysfs. - * @cmd_offset: offset to apply for each command. Set when - * registering a devicde behind another one. +/** + * struct cros_ec_platform - ChromeOS EC platform information. + * @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...) + * used in /dev/ and sysfs. + * @cmd_offset: Offset to apply for each command. Set when + * registering a device behind another one. */ struct cros_ec_platform { const char *ec_name; @@ -175,16 +182,16 @@ struct cros_ec_platform { struct cros_ec_debugfs; -/* - * struct cros_ec_dev - ChromeOS EC device entry point - * - * @class_dev: Device structure used in sysfs - * @cdev: Character device structure in /dev - * @ec_dev: cros_ec_device structure to talk to the physical device - * @dev: pointer to the platform device - * @debug_info: cros_ec_debugfs structure for debugging information - * @has_kb_wake_angle: true if at least 2 accelerometer are connected to the EC. - * @cmd_offset: offset to apply for each command. +/** + * struct cros_ec_dev - ChromeOS EC device entry point. + * @class_dev: Device structure used in sysfs. + * @cdev: Character device structure in /dev. + * @ec_dev: cros_ec_device structure to talk to the physical device. + * @dev: Pointer to the platform device. + * @debug_info: cros_ec_debugfs structure for debugging information. + * @has_kb_wake_angle: True if at least 2 accelerometer are connected to the EC. + * @cmd_offset: Offset to apply for each command. + * @features: Features supported by the EC. */ struct cros_ec_dev { struct device class_dev; @@ -200,124 +207,129 @@ struct cros_ec_dev { #define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev) /** - * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device + * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device. + * @ec_dev: Device to suspend. * * This can be called by drivers to handle a suspend event. * - * ec_dev: Device to suspend - * @return 0 if ok, -ve on error + * Return: 0 on success or negative error code. */ int cros_ec_suspend(struct cros_ec_device *ec_dev); /** - * cros_ec_resume - Handle a resume operation for the ChromeOS EC device + * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device. + * @ec_dev: Device to resume. * * This can be called by drivers to handle a resume event. * - * @ec_dev: Device to resume - * @return 0 if ok, -ve on error + * Return: 0 on success or negative error code. */ int cros_ec_resume(struct cros_ec_device *ec_dev); /** - * cros_ec_prepare_tx - Prepare an outgoing message in the output buffer + * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer. + * @ec_dev: Device to register. + * @msg: Message to write. * * This is intended to be used by all ChromeOS EC drivers, but at present * only SPI uses it. Once LPC uses the same protocol it can start using it. * I2C could use it now, with a refactor of the existing code. * - * @ec_dev: Device to register - * @msg: Message to write + * Return: 0 on success or negative error code. */ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** - * cros_ec_check_result - Check ec_msg->result + * cros_ec_check_result() - Check ec_msg->result. + * @ec_dev: EC device. + * @msg: Message to check. * * This is used by ChromeOS EC drivers to check the ec_msg->result for * errors and to warn about them. * - * @ec_dev: EC device - * @msg: Message to check + * Return: 0 on success or negative error code. */ int cros_ec_check_result(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** - * cros_ec_cmd_xfer - Send a command to the ChromeOS EC + * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC. + * @ec_dev: EC device. + * @msg: Message to write. * * Call this to send a command to the ChromeOS EC. This should be used * instead of calling the EC's cmd_xfer() callback directly. * - * @ec_dev: EC device - * @msg: Message to write + * Return: 0 on success or negative error code. */ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** - * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC + * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC. + * @ec_dev: EC device. + * @msg: Message to write. * * This function is identical to cros_ec_cmd_xfer, except it returns success * status only if both the command was transmitted successfully and the EC * replied with success status. It's not necessary to check msg->result when * using this function. * - * @ec_dev: EC device - * @msg: Message to write - * @return: Num. of bytes transferred on success, <0 on failure + * Return: The number of bytes transferred on success or negative error code. */ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** - * cros_ec_remove - Remove a ChromeOS EC + * cros_ec_remove() - Remove a ChromeOS EC. + * @ec_dev: Device to register. * * Call this to deregister a ChromeOS EC, then clean up any private data. * - * @ec_dev: Device to register - * @return 0 if ok, -ve on error + * Return: 0 on success or negative error code. */ int cros_ec_remove(struct cros_ec_device *ec_dev); /** - * cros_ec_register - Register a new ChromeOS EC, using the provided info + * cros_ec_register() - Register a new ChromeOS EC, using the provided info. + * @ec_dev: Device to register. * * Before calling this, allocate a pointer to a new device and then fill * in all the fields up to the --private-- marker. * - * @ec_dev: Device to register - * @return 0 if ok, -ve on error + * Return: 0 on success or negative error code. */ int cros_ec_register(struct cros_ec_device *ec_dev); /** - * cros_ec_query_all - Query the protocol version supported by the ChromeOS EC + * cros_ec_query_all() - Query the protocol version supported by the + * ChromeOS EC. + * @ec_dev: Device to register. * - * @ec_dev: Device to register - * @return 0 if ok, -ve on error + * Return: 0 on success or negative error code. */ int cros_ec_query_all(struct cros_ec_device *ec_dev); /** - * cros_ec_get_next_event - Fetch next event from the ChromeOS EC - * - * @ec_dev: Device to fetch event from + * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC. + * @ec_dev: Device to fetch event from. * @wake_event: Pointer to a bool set to true upon return if the event might be * treated as a wake event. Ignored if null. * - * Returns: 0 on success, Linux error number on failure + * Return: 0 on success or negative error code. */ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event); /** - * cros_ec_get_host_event - Return a mask of event set by the EC. + * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC. + * @ec_dev: Device to fetch event from. * - * When MKBP is supported, when the EC raises an interrupt, - * We collect the events raised and call the functions in the ec notifier. + * When MKBP is supported, when the EC raises an interrupt, we collect the + * events raised and call the functions in the ec notifier. This function + * is a helper to know which events are raised. * - * This function is a helper to know which events are raised. + * Return: 0 on success or negative error code. */ u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 5fd0e429f472..9a9631f0559e 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -306,15 +306,18 @@ enum host_event_code { /* Host event mask */ #define EC_HOST_EVENT_MASK(event_code) (1UL << ((event_code) - 1)) -/* Arguments at EC_LPC_ADDR_HOST_ARGS */ +/** + * struct ec_lpc_host_args - Arguments at EC_LPC_ADDR_HOST_ARGS + * @flags: The host argument flags. + * @command_version: Command version. + * @data_size: The length of data. + * @checksum: Checksum; sum of command + flags + command_version + data_size + + * all params/response data bytes. + */ struct ec_lpc_host_args { uint8_t flags; uint8_t command_version; uint8_t data_size; - /* - * Checksum; sum of command + flags + command_version + data_size + - * all params/response data bytes. - */ uint8_t checksum; } __packed; @@ -468,54 +471,43 @@ struct ec_lpc_host_args { #define EC_HOST_REQUEST_VERSION 3 -/* Version 3 request from host */ +/** + * struct ec_host_request - Version 3 request from host. + * @struct_version: Should be 3. The EC will return EC_RES_INVALID_HEADER if it + * receives a header with a version it doesn't know how to + * parse. + * @checksum: Checksum of request and data; sum of all bytes including checksum + * should total to 0. + * @command: Command to send (EC_CMD_...) + * @command_version: Command version. + * @reserved: Unused byte in current protocol version; set to 0. + * @data_len: Length of data which follows this header. + */ struct ec_host_request { - /* Struct version (=3) - * - * EC will return EC_RES_INVALID_HEADER if it receives a header with a - * version it doesn't know how to parse. - */ uint8_t struct_version; - - /* - * Checksum of request and data; sum of all bytes including checksum - * should total to 0. - */ uint8_t checksum; - - /* Command code */ uint16_t command; - - /* Command version */ uint8_t command_version; - - /* Unused byte in current protocol version; set to 0 */ uint8_t reserved; - - /* Length of data which follows this header */ uint16_t data_len; } __packed; #define EC_HOST_RESPONSE_VERSION 3 -/* Version 3 response from EC */ +/** + * struct ec_host_response - Version 3 response from EC. + * @struct_version: Struct version (=3). + * @checksum: Checksum of response and data; sum of all bytes including + * checksum should total to 0. + * @result: EC's response to the command (separate from communication failure) + * @data_len: Length of data which follows this header. + * @reserved: Unused bytes in current protocol version; set to 0. + */ struct ec_host_response { - /* Struct version (=3) */ uint8_t struct_version; - - /* - * Checksum of response and data; sum of all bytes including checksum - * should total to 0. - */ uint8_t checksum; - - /* Result code (EC_RES_*) */ uint16_t result; - - /* Length of data which follows this header */ uint16_t data_len; - - /* Unused bytes in current protocol version; set to 0 */ uint16_t reserved; } __packed; @@ -540,6 +532,10 @@ struct ec_host_response { */ #define EC_CMD_PROTO_VERSION 0x00 +/** + * struct ec_response_proto_version - Response to the proto version command. + * @version: The protocol version. + */ struct ec_response_proto_version { uint32_t version; } __packed; @@ -550,12 +546,20 @@ struct ec_response_proto_version { */ #define EC_CMD_HELLO 0x01 +/** + * struct ec_params_hello - Parameters to the hello command. + * @in_data: Pass anything here. + */ struct ec_params_hello { - uint32_t in_data; /* Pass anything here */ + uint32_t in_data; } __packed; +/** + * struct ec_response_hello - Response to the hello command. + * @out_data: Output will be in_data + 0x01020304. + */ struct ec_response_hello { - uint32_t out_data; /* Output will be in_data + 0x01020304 */ + uint32_t out_data; } __packed; /* Get version number */ @@ -567,22 +571,37 @@ enum ec_current_image { EC_IMAGE_RW }; +/** + * struct ec_response_get_version - Response to the get version command. + * @version_string_ro: Null-terminated RO firmware version string. + * @version_string_rw: Null-terminated RW firmware version string. + * @reserved: Unused bytes; was previously RW-B firmware version string. + * @current_image: One of ec_current_image. + */ struct ec_response_get_version { - /* Null-terminated version strings for RO, RW */ char version_string_ro[32]; char version_string_rw[32]; - char reserved[32]; /* Was previously RW-B string */ - uint32_t current_image; /* One of ec_current_image */ + char reserved[32]; + uint32_t current_image; } __packed; /* Read test */ #define EC_CMD_READ_TEST 0x03 +/** + * struct ec_params_read_test - Parameters for the read test command. + * @offset: Starting value for read buffer. + * @size: Size to read in bytes. + */ struct ec_params_read_test { - uint32_t offset; /* Starting value for read buffer */ - uint32_t size; /* Size to read in bytes */ + uint32_t offset; + uint32_t size; } __packed; +/** + * struct ec_response_read_test - Response to the read test command. + * @data: Data returned by the read test command. + */ struct ec_response_read_test { uint32_t data[32]; } __packed; @@ -597,18 +616,27 @@ struct ec_response_read_test { /* Get chip info */ #define EC_CMD_GET_CHIP_INFO 0x05 +/** + * struct ec_response_get_chip_info - Response to the get chip info command. + * @vendor: Null-terminated string for chip vendor. + * @name: Null-terminated string for chip name. + * @revision: Null-terminated string for chip mask version. + */ struct ec_response_get_chip_info { - /* Null-terminated strings */ char vendor[32]; char name[32]; - char revision[32]; /* Mask version */ + char revision[32]; } __packed; /* Get board HW version */ #define EC_CMD_GET_BOARD_VERSION 0x06 +/** + * struct ec_response_board_version - Response to the board version command. + * @board_version: A monotonously incrementing number. + */ struct ec_response_board_version { - uint16_t board_version; /* A monotonously incrementing number. */ + uint16_t board_version; } __packed; /* @@ -621,27 +649,42 @@ struct ec_response_board_version { */ #define EC_CMD_READ_MEMMAP 0x07 +/** + * struct ec_params_read_memmap - Parameters for the read memory map command. + * @offset: Offset in memmap (EC_MEMMAP_*). + * @size: Size to read in bytes. + */ struct ec_params_read_memmap { - uint8_t offset; /* Offset in memmap (EC_MEMMAP_*) */ - uint8_t size; /* Size to read in bytes */ + uint8_t offset; + uint8_t size; } __packed; /* Read versions supported for a command */ #define EC_CMD_GET_CMD_VERSIONS 0x08 +/** + * struct ec_params_get_cmd_versions - Parameters for the get command versions. + * @cmd: Command to check. + */ struct ec_params_get_cmd_versions { - uint8_t cmd; /* Command to check */ + uint8_t cmd; } __packed; +/** + * struct ec_params_get_cmd_versions_v1 - Parameters for the get command + * versions (v1) + * @cmd: Command to check. + */ struct ec_params_get_cmd_versions_v1 { - uint16_t cmd; /* Command to check */ + uint16_t cmd; } __packed; +/** + * struct ec_response_get_cmd_version - Response to the get command versions. + * @version_mask: Mask of supported versions; use EC_VER_MASK() to compare with + * a desired version. + */ struct ec_response_get_cmd_versions { - /* - * Mask of supported versions; use EC_VER_MASK() to compare with a - * desired version. - */ uint32_t version_mask; } __packed; @@ -659,6 +702,11 @@ enum ec_comms_status { EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */ }; +/** + * struct ec_response_get_comms_status - Response to the get comms status + * command. + * @flags: Mask of enum ec_comms_status. + */ struct ec_response_get_comms_status { uint32_t flags; /* Mask of enum ec_comms_status */ } __packed; @@ -685,19 +733,19 @@ struct ec_response_test_protocol { /* EC_RES_IN_PROGRESS may be returned if a command is slow */ #define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0) +/** + * struct ec_response_get_protocol_info - Response to the get protocol info. + * @protocol_versions: Bitmask of protocol versions supported (1 << n means + * version n). + * @max_request_packet_size: Maximum request packet size in bytes. + * @max_response_packet_size: Maximum response packet size in bytes. + * @flags: see EC_PROTOCOL_INFO_* + */ struct ec_response_get_protocol_info { /* Fields which exist if at least protocol version 3 supported */ - - /* Bitmask of protocol versions supported (1 << n means version n)*/ uint32_t protocol_versions; - - /* Maximum request packet size, in bytes */ uint16_t max_request_packet_size; - - /* Maximum response packet size, in bytes */ uint16_t max_response_packet_size; - - /* Flags; see EC_PROTOCOL_INFO_* */ uint32_t flags; } __packed; @@ -708,8 +756,10 @@ struct ec_response_get_protocol_info { /* The upper byte of .flags tells what to do (nothing means "get") */ #define EC_GSV_SET 0x80000000 -/* The lower three bytes of .flags identifies the parameter, if that has - meaning for an individual command. */ +/* + * The lower three bytes of .flags identifies the parameter, if that has + * meaning for an individual command. + */ #define EC_GSV_PARAM_MASK 0x00ffffff struct ec_params_get_set_value { @@ -810,6 +860,7 @@ enum ec_feature_code { #define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32)) #define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32)) + struct ec_response_get_features { uint32_t flags[2]; } __packed; @@ -820,24 +871,22 @@ struct ec_response_get_features { /* Get flash info */ #define EC_CMD_FLASH_INFO 0x10 -/* Version 0 returns these fields */ +/** + * struct ec_response_flash_info - Response to the flash info command. + * @flash_size: Usable flash size in bytes. + * @write_block_size: Write block size. Write offset and size must be a + * multiple of this. + * @erase_block_size: Erase block size. Erase offset and size must be a + * multiple of this. + * @protect_block_size: Protection block size. Protection offset and size + * must be a multiple of this. + * + * Version 0 returns these fields. + */ struct ec_response_flash_info { - /* Usable flash size, in bytes */ uint32_t flash_size; - /* - * Write block size. Write offset and size must be a multiple - * of this. - */ uint32_t write_block_size; - /* - * Erase block size. Erase offset and size must be a multiple - * of this. - */ uint32_t erase_block_size; - /* - * Protection block size. Protection offset and size must be a - * multiple of this. - */ uint32_t protect_block_size; } __packed; @@ -845,7 +894,22 @@ struct ec_response_flash_info { /* EC flash erases bits to 0 instead of 1 */ #define EC_FLASH_INFO_ERASE_TO_0 (1 << 0) -/* +/** + * struct ec_response_flash_info_1 - Response to the flash info v1 command. + * @flash_size: Usable flash size in bytes. + * @write_block_size: Write block size. Write offset and size must be a + * multiple of this. + * @erase_block_size: Erase block size. Erase offset and size must be a + * multiple of this. + * @protect_block_size: Protection block size. Protection offset and size + * must be a multiple of this. + * @write_ideal_size: Ideal write size in bytes. Writes will be fastest if + * size is exactly this and offset is a multiple of this. + * For example, an EC may have a write buffer which can do + * half-page operations if data is aligned, and a slower + * word-at-a-time write mode. + * @flags: Flags; see EC_FLASH_INFO_* + * * Version 1 returns the same initial fields as version 0, with additional * fields following. * @@ -860,15 +924,7 @@ struct ec_response_flash_info_1 { uint32_t protect_block_size; /* Version 1 adds these fields: */ - /* - * Ideal write size in bytes. Writes will be fastest if size is - * exactly this and offset is a multiple of this. For example, an EC - * may have a write buffer which can do half-page operations if data is - * aligned, and a slower word-at-a-time write mode. - */ uint32_t write_ideal_size; - - /* Flags; see EC_FLASH_INFO_* */ uint32_t flags; } __packed; @@ -879,9 +935,14 @@ struct ec_response_flash_info_1 { */ #define EC_CMD_FLASH_READ 0x11 +/** + * struct ec_params_flash_read - Parameters for the flash read command. + * @offset: Byte offset to read. + * @size: Size to read in bytes. + */ struct ec_params_flash_read { - uint32_t offset; /* Byte offset to read */ - uint32_t size; /* Size to read in bytes */ + uint32_t offset; + uint32_t size; } __packed; /* Write flash */ @@ -891,18 +952,28 @@ struct ec_params_flash_read { /* Version 0 of the flash command supported only 64 bytes of data */ #define EC_FLASH_WRITE_VER0_SIZE 64 +/** + * struct ec_params_flash_write - Parameters for the flash write command. + * @offset: Byte offset to write. + * @size: Size to write in bytes. + */ struct ec_params_flash_write { - uint32_t offset; /* Byte offset to write */ - uint32_t size; /* Size to write in bytes */ + uint32_t offset; + uint32_t size; /* Followed by data to write */ } __packed; /* Erase flash */ #define EC_CMD_FLASH_ERASE 0x13 +/** + * struct ec_params_flash_erase - Parameters for the flash erase command. + * @offset: Byte offset to erase. + * @size: Size to erase in bytes. + */ struct ec_params_flash_erase { - uint32_t offset; /* Byte offset to erase */ - uint32_t size; /* Size to erase in bytes */ + uint32_t offset; + uint32_t size; } __packed; /* @@ -941,21 +1012,28 @@ struct ec_params_flash_erase { /* Entile flash code protected when the EC boots */ #define EC_FLASH_PROTECT_ALL_AT_BOOT (1 << 6) +/** + * struct ec_params_flash_protect - Parameters for the flash protect command. + * @mask: Bits in flags to apply. + * @flags: New flags to apply. + */ struct ec_params_flash_protect { - uint32_t mask; /* Bits in flags to apply */ - uint32_t flags; /* New flags to apply */ + uint32_t mask; + uint32_t flags; } __packed; +/** + * struct ec_response_flash_protect - Response to the flash protect command. + * @flags: Current value of flash protect flags. + * @valid_flags: Flags which are valid on this platform. This allows the + * caller to distinguish between flags which aren't set vs. flags + * which can't be set on this platform. + * @writable_flags: Flags which can be changed given the current protection + * state. + */ struct ec_response_flash_protect { - /* Current value of flash protect flags */ uint32_t flags; - /* - * Flags which are valid on this platform. This allows the caller - * to distinguish between flags which aren't set vs. flags which can't - * be set on this platform. - */ uint32_t valid_flags; - /* Flags which can be changed given the current protection state */ uint32_t writable_flags; } __packed; @@ -982,8 +1060,13 @@ enum ec_flash_region { EC_FLASH_REGION_COUNT, }; +/** + * struct ec_params_flash_region_info - Parameters for the flash region info + * command. + * @region: Flash region; see EC_FLASH_REGION_* + */ struct ec_params_flash_region_info { - uint32_t region; /* enum ec_flash_region */ + uint32_t region; } __packed; struct ec_response_flash_region_info { @@ -1094,7 +1177,9 @@ struct rgb_s { }; #define LB_BATTERY_LEVELS 4 -/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a + +/* + * List of tweakable parameters. NOTE: It's __packed so it can be sent in a * host command, but the alignment is the same regardless. Keep it that way. */ struct lightbar_params_v0 { diff --git a/include/linux/notifier.h b/include/linux/notifier.h index f35c7bf76143..0096a05395e3 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -122,8 +122,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); #ifdef CONFIG_TREE_SRCU #define _SRCU_NOTIFIER_HEAD(name, mod) \ - static DEFINE_PER_CPU(struct srcu_data, \ - name##_head_srcu_data); \ + static DEFINE_PER_CPU(struct srcu_data, name##_head_srcu_data); \ mod struct srcu_notifier_head name = \ SRCU_NOTIFIER_INIT(name, name##_head_srcu_data) diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h new file mode 100644 index 000000000000..53dfc2541960 --- /dev/null +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PLATFORM_DATA_X86_ASUS_WMI_H +#define __PLATFORM_DATA_X86_ASUS_WMI_H + +#include <linux/errno.h> +#include <linux/types.h> + +/* WMI Methods */ +#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */ +#define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */ +#define ASUS_WMI_METHODID_GLCD 0x44434C47 /* Get LCD status */ +#define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */ +#define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */ +#define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */ +#define ASUS_WMI_METHODID_AGFN 0x4E464741 /* FaN? */ +#define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */ +#define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */ +#define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */ +#define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */ +#define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */ +#define ASUS_WMI_METHODID_DSTS 0x53544344 /* Device STatuS */ +#define ASUS_WMI_METHODID_DSTS2 0x53545344 /* Device STatuS #2*/ +#define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */ +#define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */ +#define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */ +#define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */ +#define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */ +#define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */ + +#define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE + +/* Wireless */ +#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001 +#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 +#define ASUS_WMI_DEVID_CWAP 0x00010003 +#define ASUS_WMI_DEVID_WLAN 0x00010011 +#define ASUS_WMI_DEVID_WLAN_LED 0x00010012 +#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 +#define ASUS_WMI_DEVID_GPS 0x00010015 +#define ASUS_WMI_DEVID_WIMAX 0x00010017 +#define ASUS_WMI_DEVID_WWAN3G 0x00010019 +#define ASUS_WMI_DEVID_UWB 0x00010021 + +/* Leds */ +/* 0x000200XX and 0x000400XX */ +#define ASUS_WMI_DEVID_LED1 0x00020011 +#define ASUS_WMI_DEVID_LED2 0x00020012 +#define ASUS_WMI_DEVID_LED3 0x00020013 +#define ASUS_WMI_DEVID_LED4 0x00020014 +#define ASUS_WMI_DEVID_LED5 0x00020015 +#define ASUS_WMI_DEVID_LED6 0x00020016 + +/* Backlight and Brightness */ +#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */ +#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 +#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012 +#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 +#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ +#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025 + +/* Misc */ +#define ASUS_WMI_DEVID_CAMERA 0x00060013 + +/* Storage */ +#define ASUS_WMI_DEVID_CARDREADER 0x00080013 + +/* Input */ +#define ASUS_WMI_DEVID_TOUCHPAD 0x00100011 +#define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012 + +/* Fan, Thermal */ +#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011 +#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 + +/* Power */ +#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012 + +/* Deep S3 / Resume on LID open */ +#define ASUS_WMI_DEVID_LID_RESUME 0x00120031 + +/* DSTS masks */ +#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 +#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 +#define ASUS_WMI_DSTS_PRESENCE_BIT 0x00010000 +#define ASUS_WMI_DSTS_USER_BIT 0x00020000 +#define ASUS_WMI_DSTS_BIOS_BIT 0x00040000 +#define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF +#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00 +#define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F + +#if IS_REACHABLE(CONFIG_ASUS_WMI) +int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval); +#else +static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, + u32 *retval) +{ + return -ENODEV; +} +#endif + +#endif /* __PLATFORM_DATA_X86_ASUS_WMI_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 8f8a5418b627..a51c13c2b1a0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1200,6 +1200,11 @@ struct task_struct { void *security; #endif +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK + unsigned long lowest_stack; + unsigned long prev_lowest_stack; +#endif + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h new file mode 100644 index 000000000000..3d5c3271a9a8 --- /dev/null +++ b/include/linux/stackleak.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_STACKLEAK_H +#define _LINUX_STACKLEAK_H + +#include <linux/sched.h> +#include <linux/sched/task_stack.h> + +/* + * Check that the poison value points to the unused hole in the + * virtual memory map for your platform. + */ +#define STACKLEAK_POISON -0xBEEF +#define STACKLEAK_SEARCH_DEPTH 128 + +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK +#include <asm/stacktrace.h> + +static inline void stackleak_task_init(struct task_struct *t) +{ + t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long); +# ifdef CONFIG_STACKLEAK_METRICS + t->prev_lowest_stack = t->lowest_stack; +# endif +} + +#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE +int stack_erasing_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +#endif + +#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */ +static inline void stackleak_task_init(struct task_struct *t) { } +#endif + +#endif diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h index 131424cefc6a..02c0412e368c 100644 --- a/include/linux/sunrpc/gss_krb5.h +++ b/include/linux/sunrpc/gss_krb5.h @@ -107,8 +107,8 @@ struct krb5_ctx { u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ u8 cksum[GSS_KRB5_MAX_KEYLEN]; s32 endtime; - u32 seq_send; - u64 seq_send64; + atomic_t seq_send; + atomic64_t seq_send64; struct xdr_netobj mech_used; u8 initiator_sign[GSS_KRB5_MAX_KEYLEN]; u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN]; @@ -118,9 +118,6 @@ struct krb5_ctx { u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN]; }; -extern u32 gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx); -extern u64 gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx); - /* The length of the Kerberos GSS token header */ #define GSS_KRB5_TOK_HDR_LEN (16) diff --git a/include/linux/uio.h b/include/linux/uio.h index 422b1c01ee0d..55ce99ddb912 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -21,15 +21,16 @@ struct kvec { size_t iov_len; }; -enum { +enum iter_type { ITER_IOVEC = 0, ITER_KVEC = 2, ITER_BVEC = 4, ITER_PIPE = 8, + ITER_DISCARD = 16, }; struct iov_iter { - int type; + unsigned int type; size_t iov_offset; size_t count; union { @@ -47,6 +48,41 @@ struct iov_iter { }; }; +static inline enum iter_type iov_iter_type(const struct iov_iter *i) +{ + return i->type & ~(READ | WRITE); +} + +static inline bool iter_is_iovec(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_IOVEC; +} + +static inline bool iov_iter_is_kvec(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_KVEC; +} + +static inline bool iov_iter_is_bvec(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_BVEC; +} + +static inline bool iov_iter_is_pipe(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_PIPE; +} + +static inline bool iov_iter_is_discard(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_DISCARD; +} + +static inline unsigned char iov_iter_rw(const struct iov_iter *i) +{ + return i->type & (READ | WRITE); +} + /* * Total number of bytes covered by an iovec. * @@ -74,7 +110,8 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) } #define iov_for_each(iov, iter, start) \ - if (!((start).type & (ITER_BVEC | ITER_PIPE))) \ + if (iov_iter_type(start) == ITER_IOVEC || \ + iov_iter_type(start) == ITER_KVEC) \ for (iter = (start); \ (iter).count && \ ((iov = iov_iter_iovec(&(iter))), 1); \ @@ -181,14 +218,15 @@ size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) size_t iov_iter_zero(size_t bytes, struct iov_iter *); unsigned long iov_iter_alignment(const struct iov_iter *i); unsigned long iov_iter_gap_alignment(const struct iov_iter *i); -void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, +void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, unsigned long nr_segs, size_t count); -void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec, +void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, unsigned long nr_segs, size_t count); -void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec, +void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, unsigned long nr_segs, size_t count); -void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe, +void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, size_t count); +void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start); ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, @@ -202,19 +240,6 @@ static inline size_t iov_iter_count(const struct iov_iter *i) return i->count; } -static inline bool iter_is_iovec(const struct iov_iter *i) -{ - return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE)); -} - -/* - * Get one of READ or WRITE out of iter->type without any other flags OR'd in - * with it. - * - * The ?: is just for type safety. - */ -#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE)) - /* * Cap the iov_iter by given limit; note that the second argument is * *not* the new size - it's upper limit for such. Passing it a value diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 738a0c24874f..fdfd04e348f6 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -246,8 +246,7 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, * * @bio is a part of the writeback in progress controlled by @wbc. Perform * writeback specific initialization. This is used to apply the cgroup - * writeback context. Must be called after the bio has been associated with - * a device. + * writeback context. */ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) { @@ -258,7 +257,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) * regular writeback instead of writing things out itself. */ if (wbc->wb) - bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css); + bio_associate_blkcg(bio, wbc->wb->blkcg_css); } #else /* CONFIG_CGROUP_WRITEBACK */ diff --git a/include/net/af_unix.h b/include/net/af_unix.h index e2695c4bf358..ddbba838d048 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -13,7 +13,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp); void unix_gc(void); void wait_for_unix_gc(void); struct sock *unix_get_socket(struct file *filp); -struct sock *unix_peer_get(struct sock *); +struct sock *unix_peer_get(struct sock *sk); #define UNIX_HASH_SIZE 256 #define UNIX_HASH_BITS 8 @@ -40,7 +40,7 @@ struct unix_skb_parms { u32 consumed; } __randomize_layout; -#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) +#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) #define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) #define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index d0a341bc4540..33d291888ba9 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -54,6 +54,35 @@ enum afs_fs_operation { afs_FS_StoreData64 = 65538, /* AFS Store file data */ afs_FS_GiveUpAllCallBacks = 65539, /* AFS Give up all our callbacks on a server */ afs_FS_GetCapabilities = 65540, /* AFS Get FS server capabilities */ + + yfs_FS_FetchData = 130, /* YFS Fetch file data */ + yfs_FS_FetchACL = 64131, /* YFS Fetch file ACL */ + yfs_FS_FetchStatus = 64132, /* YFS Fetch file status */ + yfs_FS_StoreACL = 64134, /* YFS Store file ACL */ + yfs_FS_StoreStatus = 64135, /* YFS Store file status */ + yfs_FS_RemoveFile = 64136, /* YFS Remove a file */ + yfs_FS_CreateFile = 64137, /* YFS Create a file */ + yfs_FS_Rename = 64138, /* YFS Rename or move a file or directory */ + yfs_FS_Symlink = 64139, /* YFS Create a symbolic link */ + yfs_FS_Link = 64140, /* YFS Create a hard link */ + yfs_FS_MakeDir = 64141, /* YFS Create a directory */ + yfs_FS_RemoveDir = 64142, /* YFS Remove a directory */ + yfs_FS_GetVolumeStatus = 64149, /* YFS Get volume status information */ + yfs_FS_SetVolumeStatus = 64150, /* YFS Set volume status information */ + yfs_FS_SetLock = 64156, /* YFS Request a file lock */ + yfs_FS_ExtendLock = 64157, /* YFS Extend a file lock */ + yfs_FS_ReleaseLock = 64158, /* YFS Release a file lock */ + yfs_FS_Lookup = 64161, /* YFS lookup file in directory */ + yfs_FS_FlushCPS = 64165, + yfs_FS_FetchOpaqueACL = 64168, + yfs_FS_WhoAmI = 64170, + yfs_FS_RemoveACL = 64171, + yfs_FS_RemoveFile2 = 64173, + yfs_FS_StoreOpaqueACL2 = 64174, + yfs_FS_InlineBulkStatus = 64536, /* YFS Fetch multiple file statuses with errors */ + yfs_FS_FetchData64 = 64537, /* YFS Fetch file data */ + yfs_FS_StoreData64 = 64538, /* YFS Store file data */ + yfs_FS_UpdateSymlink = 64540, }; enum afs_vl_operation { @@ -84,6 +113,44 @@ enum afs_edit_dir_reason { afs_edit_dir_for_unlink, }; +enum afs_eproto_cause { + afs_eproto_bad_status, + afs_eproto_cb_count, + afs_eproto_cb_fid_count, + afs_eproto_file_type, + afs_eproto_ibulkst_cb_count, + afs_eproto_ibulkst_count, + afs_eproto_motd_len, + afs_eproto_offline_msg_len, + afs_eproto_volname_len, + afs_eproto_yvl_fsendpt4_len, + afs_eproto_yvl_fsendpt6_len, + afs_eproto_yvl_fsendpt_num, + afs_eproto_yvl_fsendpt_type, + afs_eproto_yvl_vlendpt4_len, + afs_eproto_yvl_vlendpt6_len, + afs_eproto_yvl_vlendpt_type, +}; + +enum afs_io_error { + afs_io_error_cm_reply, + afs_io_error_extract, + afs_io_error_fs_probe_fail, + afs_io_error_vl_lookup_fail, + afs_io_error_vl_probe_fail, +}; + +enum afs_file_error { + afs_file_error_dir_bad_magic, + afs_file_error_dir_big, + afs_file_error_dir_missing_page, + afs_file_error_dir_over_end, + afs_file_error_dir_small, + afs_file_error_dir_unmarked_ext, + afs_file_error_mntpt, + afs_file_error_writeback_fail, +}; + #endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */ /* @@ -119,7 +186,34 @@ enum afs_edit_dir_reason { EM(afs_FS_FetchData64, "FS.FetchData64") \ EM(afs_FS_StoreData64, "FS.StoreData64") \ EM(afs_FS_GiveUpAllCallBacks, "FS.GiveUpAllCallBacks") \ - E_(afs_FS_GetCapabilities, "FS.GetCapabilities") + EM(afs_FS_GetCapabilities, "FS.GetCapabilities") \ + EM(yfs_FS_FetchACL, "YFS.FetchACL") \ + EM(yfs_FS_FetchStatus, "YFS.FetchStatus") \ + EM(yfs_FS_StoreACL, "YFS.StoreACL") \ + EM(yfs_FS_StoreStatus, "YFS.StoreStatus") \ + EM(yfs_FS_RemoveFile, "YFS.RemoveFile") \ + EM(yfs_FS_CreateFile, "YFS.CreateFile") \ + EM(yfs_FS_Rename, "YFS.Rename") \ + EM(yfs_FS_Symlink, "YFS.Symlink") \ + EM(yfs_FS_Link, "YFS.Link") \ + EM(yfs_FS_MakeDir, "YFS.MakeDir") \ + EM(yfs_FS_RemoveDir, "YFS.RemoveDir") \ + EM(yfs_FS_GetVolumeStatus, "YFS.GetVolumeStatus") \ + EM(yfs_FS_SetVolumeStatus, "YFS.SetVolumeStatus") \ + EM(yfs_FS_SetLock, "YFS.SetLock") \ + EM(yfs_FS_ExtendLock, "YFS.ExtendLock") \ + EM(yfs_FS_ReleaseLock, "YFS.ReleaseLock") \ + EM(yfs_FS_Lookup, "YFS.Lookup") \ + EM(yfs_FS_FlushCPS, "YFS.FlushCPS") \ + EM(yfs_FS_FetchOpaqueACL, "YFS.FetchOpaqueACL") \ + EM(yfs_FS_WhoAmI, "YFS.WhoAmI") \ + EM(yfs_FS_RemoveACL, "YFS.RemoveACL") \ + EM(yfs_FS_RemoveFile2, "YFS.RemoveFile2") \ + EM(yfs_FS_StoreOpaqueACL2, "YFS.StoreOpaqueACL2") \ + EM(yfs_FS_InlineBulkStatus, "YFS.InlineBulkStatus") \ + EM(yfs_FS_FetchData64, "YFS.FetchData64") \ + EM(yfs_FS_StoreData64, "YFS.StoreData64") \ + E_(yfs_FS_UpdateSymlink, "YFS.UpdateSymlink") #define afs_vl_operations \ EM(afs_VL_GetEntryByNameU, "VL.GetEntryByNameU") \ @@ -146,6 +240,40 @@ enum afs_edit_dir_reason { EM(afs_edit_dir_for_symlink, "Symlnk") \ E_(afs_edit_dir_for_unlink, "Unlink") +#define afs_eproto_causes \ + EM(afs_eproto_bad_status, "BadStatus") \ + EM(afs_eproto_cb_count, "CbCount") \ + EM(afs_eproto_cb_fid_count, "CbFidCount") \ + EM(afs_eproto_file_type, "FileTYpe") \ + EM(afs_eproto_ibulkst_cb_count, "IBS.CbCount") \ + EM(afs_eproto_ibulkst_count, "IBS.FidCount") \ + EM(afs_eproto_motd_len, "MotdLen") \ + EM(afs_eproto_offline_msg_len, "OfflineMsgLen") \ + EM(afs_eproto_volname_len, "VolNameLen") \ + EM(afs_eproto_yvl_fsendpt4_len, "YVL.FsEnd4Len") \ + EM(afs_eproto_yvl_fsendpt6_len, "YVL.FsEnd6Len") \ + EM(afs_eproto_yvl_fsendpt_num, "YVL.FsEndCount") \ + EM(afs_eproto_yvl_fsendpt_type, "YVL.FsEndType") \ + EM(afs_eproto_yvl_vlendpt4_len, "YVL.VlEnd4Len") \ + EM(afs_eproto_yvl_vlendpt6_len, "YVL.VlEnd6Len") \ + E_(afs_eproto_yvl_vlendpt_type, "YVL.VlEndType") + +#define afs_io_errors \ + EM(afs_io_error_cm_reply, "CM_REPLY") \ + EM(afs_io_error_extract, "EXTRACT") \ + EM(afs_io_error_fs_probe_fail, "FS_PROBE_FAIL") \ + EM(afs_io_error_vl_lookup_fail, "VL_LOOKUP_FAIL") \ + E_(afs_io_error_vl_probe_fail, "VL_PROBE_FAIL") + +#define afs_file_errors \ + EM(afs_file_error_dir_bad_magic, "DIR_BAD_MAGIC") \ + EM(afs_file_error_dir_big, "DIR_BIG") \ + EM(afs_file_error_dir_missing_page, "DIR_MISSING_PAGE") \ + EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \ + EM(afs_file_error_dir_small, "DIR_SMALL") \ + EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \ + EM(afs_file_error_mntpt, "MNTPT_READ_FAILED") \ + E_(afs_file_error_writeback_fail, "WRITEBACK_FAILED") /* * Export enum symbols via userspace. @@ -160,6 +288,9 @@ afs_fs_operations; afs_vl_operations; afs_edit_dir_ops; afs_edit_dir_reasons; +afs_eproto_causes; +afs_io_errors; +afs_file_errors; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -170,17 +301,16 @@ afs_edit_dir_reasons; #define EM(a, b) { a, b }, #define E_(a, b) { a, b } -TRACE_EVENT(afs_recv_data, - TP_PROTO(struct afs_call *call, unsigned count, unsigned offset, +TRACE_EVENT(afs_receive_data, + TP_PROTO(struct afs_call *call, struct iov_iter *iter, bool want_more, int ret), - TP_ARGS(call, count, offset, want_more, ret), + TP_ARGS(call, iter, want_more, ret), TP_STRUCT__entry( + __field(loff_t, remain ) __field(unsigned int, call ) __field(enum afs_call_state, state ) - __field(unsigned int, count ) - __field(unsigned int, offset ) __field(unsigned short, unmarshall ) __field(bool, want_more ) __field(int, ret ) @@ -190,17 +320,18 @@ TRACE_EVENT(afs_recv_data, __entry->call = call->debug_id; __entry->state = call->state; __entry->unmarshall = call->unmarshall; - __entry->count = count; - __entry->offset = offset; + __entry->remain = iov_iter_count(iter); __entry->want_more = want_more; __entry->ret = ret; ), - TP_printk("c=%08x s=%u u=%u %u/%u wm=%u ret=%d", + TP_printk("c=%08x r=%llu u=%u w=%u s=%u ret=%d", __entry->call, - __entry->state, __entry->unmarshall, - __entry->offset, __entry->count, - __entry->want_more, __entry->ret) + __entry->remain, + __entry->unmarshall, + __entry->want_more, + __entry->state, + __entry->ret) ); TRACE_EVENT(afs_notify_call, @@ -301,7 +432,7 @@ TRACE_EVENT(afs_make_fs_call, } ), - TP_printk("c=%08x %06x:%06x:%06x %s", + TP_printk("c=%08x %06llx:%06llx:%06x %s", __entry->call, __entry->fid.vid, __entry->fid.vnode, @@ -555,24 +686,70 @@ TRACE_EVENT(afs_edit_dir, ); TRACE_EVENT(afs_protocol_error, - TP_PROTO(struct afs_call *call, int error, const void *where), + TP_PROTO(struct afs_call *call, int error, enum afs_eproto_cause cause), + + TP_ARGS(call, error, cause), + + TP_STRUCT__entry( + __field(unsigned int, call ) + __field(int, error ) + __field(enum afs_eproto_cause, cause ) + ), + + TP_fast_assign( + __entry->call = call ? call->debug_id : 0; + __entry->error = error; + __entry->cause = cause; + ), + + TP_printk("c=%08x r=%d %s", + __entry->call, __entry->error, + __print_symbolic(__entry->cause, afs_eproto_causes)) + ); + +TRACE_EVENT(afs_io_error, + TP_PROTO(unsigned int call, int error, enum afs_io_error where), TP_ARGS(call, error, where), TP_STRUCT__entry( __field(unsigned int, call ) __field(int, error ) - __field(const void *, where ) + __field(enum afs_io_error, where ) ), TP_fast_assign( - __entry->call = call ? call->debug_id : 0; + __entry->call = call; + __entry->error = error; + __entry->where = where; + ), + + TP_printk("c=%08x r=%d %s", + __entry->call, __entry->error, + __print_symbolic(__entry->where, afs_io_errors)) + ); + +TRACE_EVENT(afs_file_error, + TP_PROTO(struct afs_vnode *vnode, int error, enum afs_file_error where), + + TP_ARGS(vnode, error, where), + + TP_STRUCT__entry( + __field_struct(struct afs_fid, fid ) + __field(int, error ) + __field(enum afs_file_error, where ) + ), + + TP_fast_assign( + __entry->fid = vnode->fid; __entry->error = error; __entry->where = where; ), - TP_printk("c=%08x r=%d sp=%pSR", - __entry->call, __entry->error, __entry->where) + TP_printk("%llx:%llx:%x r=%d %s", + __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique, + __entry->error, + __print_symbolic(__entry->where, afs_file_errors)) ); TRACE_EVENT(afs_cm_no_server, diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index 31aa10178335..93722e60204c 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h @@ -41,6 +41,7 @@ #define EM_TILEPRO 188 /* Tilera TILEPro */ #define EM_MICROBLAZE 189 /* Xilinx MicroBlaze */ #define EM_TILEGX 191 /* Tilera TILE-Gx */ +#define EM_RISCV 243 /* RISC-V */ #define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */ #define EM_FRV 0x5441 /* Fujitsu FR-V */ diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 92fa24c24c92..b4967d48bfda 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -116,6 +116,12 @@ * * 7.27 * - add FUSE_ABORT_ERROR + * + * 7.28 + * - add FUSE_COPY_FILE_RANGE + * - add FOPEN_CACHE_DIR + * - add FUSE_MAX_PAGES, add max_pages to init_out + * - add FUSE_CACHE_SYMLINKS */ #ifndef _LINUX_FUSE_H @@ -151,7 +157,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 27 +#define FUSE_KERNEL_MINOR_VERSION 28 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -219,10 +225,12 @@ struct fuse_file_lock { * FOPEN_DIRECT_IO: bypass page cache for this open file * FOPEN_KEEP_CACHE: don't invalidate the data cache on open * FOPEN_NONSEEKABLE: the file is not seekable + * FOPEN_CACHE_DIR: allow caching this directory */ #define FOPEN_DIRECT_IO (1 << 0) #define FOPEN_KEEP_CACHE (1 << 1) #define FOPEN_NONSEEKABLE (1 << 2) +#define FOPEN_CACHE_DIR (1 << 3) /** * INIT request/reply flags @@ -249,6 +257,8 @@ struct fuse_file_lock { * FUSE_HANDLE_KILLPRIV: fs handles killing suid/sgid/cap on write/chown/trunc * FUSE_POSIX_ACL: filesystem supports posix acls * FUSE_ABORT_ERROR: reading the device after abort returns ECONNABORTED + * FUSE_MAX_PAGES: init_out.max_pages contains the max number of req pages + * FUSE_CACHE_SYMLINKS: cache READLINK responses */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -272,6 +282,8 @@ struct fuse_file_lock { #define FUSE_HANDLE_KILLPRIV (1 << 19) #define FUSE_POSIX_ACL (1 << 20) #define FUSE_ABORT_ERROR (1 << 21) +#define FUSE_MAX_PAGES (1 << 22) +#define FUSE_CACHE_SYMLINKS (1 << 23) /** * CUSE INIT request/reply flags @@ -337,53 +349,54 @@ struct fuse_file_lock { #define FUSE_POLL_SCHEDULE_NOTIFY (1 << 0) enum fuse_opcode { - FUSE_LOOKUP = 1, - FUSE_FORGET = 2, /* no reply */ - FUSE_GETATTR = 3, - FUSE_SETATTR = 4, - FUSE_READLINK = 5, - FUSE_SYMLINK = 6, - FUSE_MKNOD = 8, - FUSE_MKDIR = 9, - FUSE_UNLINK = 10, - FUSE_RMDIR = 11, - FUSE_RENAME = 12, - FUSE_LINK = 13, - FUSE_OPEN = 14, - FUSE_READ = 15, - FUSE_WRITE = 16, - FUSE_STATFS = 17, - FUSE_RELEASE = 18, - FUSE_FSYNC = 20, - FUSE_SETXATTR = 21, - FUSE_GETXATTR = 22, - FUSE_LISTXATTR = 23, - FUSE_REMOVEXATTR = 24, - FUSE_FLUSH = 25, - FUSE_INIT = 26, - FUSE_OPENDIR = 27, - FUSE_READDIR = 28, - FUSE_RELEASEDIR = 29, - FUSE_FSYNCDIR = 30, - FUSE_GETLK = 31, - FUSE_SETLK = 32, - FUSE_SETLKW = 33, - FUSE_ACCESS = 34, - FUSE_CREATE = 35, - FUSE_INTERRUPT = 36, - FUSE_BMAP = 37, - FUSE_DESTROY = 38, - FUSE_IOCTL = 39, - FUSE_POLL = 40, - FUSE_NOTIFY_REPLY = 41, - FUSE_BATCH_FORGET = 42, - FUSE_FALLOCATE = 43, - FUSE_READDIRPLUS = 44, - FUSE_RENAME2 = 45, - FUSE_LSEEK = 46, + FUSE_LOOKUP = 1, + FUSE_FORGET = 2, /* no reply */ + FUSE_GETATTR = 3, + FUSE_SETATTR = 4, + FUSE_READLINK = 5, + FUSE_SYMLINK = 6, + FUSE_MKNOD = 8, + FUSE_MKDIR = 9, + FUSE_UNLINK = 10, + FUSE_RMDIR = 11, + FUSE_RENAME = 12, + FUSE_LINK = 13, + FUSE_OPEN = 14, + FUSE_READ = 15, + FUSE_WRITE = 16, + FUSE_STATFS = 17, + FUSE_RELEASE = 18, + FUSE_FSYNC = 20, + FUSE_SETXATTR = 21, + FUSE_GETXATTR = 22, + FUSE_LISTXATTR = 23, + FUSE_REMOVEXATTR = 24, + FUSE_FLUSH = 25, + FUSE_INIT = 26, + FUSE_OPENDIR = 27, + FUSE_READDIR = 28, + FUSE_RELEASEDIR = 29, + FUSE_FSYNCDIR = 30, + FUSE_GETLK = 31, + FUSE_SETLK = 32, + FUSE_SETLKW = 33, + FUSE_ACCESS = 34, + FUSE_CREATE = 35, + FUSE_INTERRUPT = 36, + FUSE_BMAP = 37, + FUSE_DESTROY = 38, + FUSE_IOCTL = 39, + FUSE_POLL = 40, + FUSE_NOTIFY_REPLY = 41, + FUSE_BATCH_FORGET = 42, + FUSE_FALLOCATE = 43, + FUSE_READDIRPLUS = 44, + FUSE_RENAME2 = 45, + FUSE_LSEEK = 46, + FUSE_COPY_FILE_RANGE = 47, /* CUSE specific operations */ - CUSE_INIT = 4096, + CUSE_INIT = 4096, }; enum fuse_notify_code { @@ -610,7 +623,9 @@ struct fuse_init_out { uint16_t congestion_threshold; uint32_t max_write; uint32_t time_gran; - uint32_t unused[9]; + uint16_t max_pages; + uint16_t padding; + uint32_t unused[8]; }; #define CUSE_INIT_INFO_MAX 4096 @@ -792,4 +807,14 @@ struct fuse_lseek_out { uint64_t offset; }; +struct fuse_copy_file_range_in { + uint64_t fh_in; + uint64_t off_in; + uint64_t nodeid_out; + uint64_t fh_out; + uint64_t off_out; + uint64_t len; + uint64_t flags; +}; + #endif /* _LINUX_FUSE_H */ diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h index 0f3cb13db8e9..f45ee0f69c0c 100644 --- a/include/uapi/linux/keyctl.h +++ b/include/uapi/linux/keyctl.h @@ -61,6 +61,11 @@ #define KEYCTL_INVALIDATE 21 /* invalidate a key */ #define KEYCTL_GET_PERSISTENT 22 /* get a user's persistent keyring */ #define KEYCTL_DH_COMPUTE 23 /* Compute Diffie-Hellman values */ +#define KEYCTL_PKEY_QUERY 24 /* Query public key parameters */ +#define KEYCTL_PKEY_ENCRYPT 25 /* Encrypt a blob using a public key */ +#define KEYCTL_PKEY_DECRYPT 26 /* Decrypt a blob using a public key */ +#define KEYCTL_PKEY_SIGN 27 /* Create a public key signature */ +#define KEYCTL_PKEY_VERIFY 28 /* Verify a public key signature */ #define KEYCTL_RESTRICT_KEYRING 29 /* Restrict keys allowed to link to a keyring */ /* keyctl structures */ @@ -82,4 +87,29 @@ struct keyctl_kdf_params { __u32 __spare[8]; }; +#define KEYCTL_SUPPORTS_ENCRYPT 0x01 +#define KEYCTL_SUPPORTS_DECRYPT 0x02 +#define KEYCTL_SUPPORTS_SIGN 0x04 +#define KEYCTL_SUPPORTS_VERIFY 0x08 + +struct keyctl_pkey_query { + __u32 supported_ops; /* Which ops are supported */ + __u32 key_size; /* Size of the key in bits */ + __u16 max_data_size; /* Maximum size of raw data to sign in bytes */ + __u16 max_sig_size; /* Maximum size of signature in bytes */ + __u16 max_enc_size; /* Maximum size of encrypted blob in bytes */ + __u16 max_dec_size; /* Maximum size of decrypted blob in bytes */ + __u32 __spare[10]; +}; + +struct keyctl_pkey_params { + __s32 key_id; /* Serial no. of public key to use */ + __u32 in_len; /* Input data size */ + union { + __u32 out_len; /* Output buffer size (encrypt/decrypt/sign) */ + __u32 in2_len; /* 2nd input data size (verify) */ + }; + __u32 __spare[7]; +}; + #endif /* _LINUX_KEYCTL_H */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index f35eb72739c0..9de8780ac8d9 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -646,10 +646,12 @@ struct perf_event_mmap_page { * * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event + * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal) * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events */ #define PERF_RECORD_MISC_MMAP_DATA (1 << 13) #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) +#define PERF_RECORD_MISC_FORK_EXEC (1 << 13) #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) /* * These PERF_RECORD_MISC_* flags below are safely reused diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index f378b9802d8b..813102810f53 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -303,6 +303,56 @@ struct vfio_region_info_cap_type { #define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2) #define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3) +#define VFIO_REGION_TYPE_GFX (1) +#define VFIO_REGION_SUBTYPE_GFX_EDID (1) + +/** + * struct vfio_region_gfx_edid - EDID region layout. + * + * Set display link state and EDID blob. + * + * The EDID blob has monitor information such as brand, name, serial + * number, physical size, supported video modes and more. + * + * This special region allows userspace (typically qemu) set a virtual + * EDID for the virtual monitor, which allows a flexible display + * configuration. + * + * For the edid blob spec look here: + * https://en.wikipedia.org/wiki/Extended_Display_Identification_Data + * + * On linux systems you can find the EDID blob in sysfs: + * /sys/class/drm/${card}/${connector}/edid + * + * You can use the edid-decode ulility (comes with xorg-x11-utils) to + * decode the EDID blob. + * + * @edid_offset: location of the edid blob, relative to the + * start of the region (readonly). + * @edid_max_size: max size of the edid blob (readonly). + * @edid_size: actual edid size (read/write). + * @link_state: display link state (read/write). + * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on. + * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off. + * @max_xres: max display width (0 == no limitation, readonly). + * @max_yres: max display height (0 == no limitation, readonly). + * + * EDID update protocol: + * (1) set link-state to down. + * (2) update edid blob and size. + * (3) set link-state to up. + */ +struct vfio_region_gfx_edid { + __u32 edid_offset; + __u32 edid_max_size; + __u32 edid_size; + __u32 max_xres; + __u32 max_yres; + __u32 link_state; +#define VFIO_DEVICE_GFX_LINK_STATE_UP 1 +#define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2 +}; + /* * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped * which allows direct access to non-MSIX registers which happened to be within diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index 13b8cb563892..a1966cd7b677 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h @@ -34,15 +34,23 @@ #define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ #define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */ #define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */ +#define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */ +#define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */ /* Size of a PFN in the balloon interface. */ #define VIRTIO_BALLOON_PFN_SHIFT 12 +#define VIRTIO_BALLOON_CMD_ID_STOP 0 +#define VIRTIO_BALLOON_CMD_ID_DONE 1 struct virtio_balloon_config { /* Number of pages host wants Guest to give up. */ __u32 num_pages; /* Number of pages we've actually got in balloon. */ __u32 actual; + /* Free page report command id, readonly by guest */ + __u32 free_page_report_cmd_id; + /* Stores PAGE_POISON if page poisoning is in use */ + __u32 poison_val; }; #define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */ diff --git a/include/video/udlfb.h b/include/video/udlfb.h index 3abd327bada6..7d09e54ae54e 100644 --- a/include/video/udlfb.h +++ b/include/video/udlfb.h @@ -36,12 +36,9 @@ struct dlfb_data { struct usb_device *udev; struct fb_info *info; struct urb_list urbs; - struct kref kref; char *backing_buffer; int fb_count; bool virtualized; /* true when physical usb device not present */ - struct delayed_work init_framebuffer_work; - struct delayed_work free_framebuffer_work; atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */ atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */ char *edid; /* null until we read edid from hw or get from sysfs */ diff --git a/kernel/Makefile b/kernel/Makefile index 7a63d567fdb5..7343b3a9bff0 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -117,6 +117,10 @@ obj-$(CONFIG_HAS_IOMEM) += iomem.o obj-$(CONFIG_ZONE_DEVICE) += memremap.o obj-$(CONFIG_RSEQ) += rseq.o +obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o +KASAN_SANITIZE_stackleak.o := n +KCOV_INSTRUMENT_stackleak.o := n + $(obj)/configs.o: $(obj)/config_data.h targets += config_data.gz diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 171a2c88e77d..1971ca325fb4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2852,10 +2852,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || fn->ret_type == RET_PTR_TO_MAP_VALUE) { - if (fn->ret_type == RET_PTR_TO_MAP_VALUE) - regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; - else - regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); /* remember map_ptr, so that check_map_access() @@ -2868,7 +2864,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; - regs[BPF_REG_0].id = ++env->id_gen; + if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { + regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; + } else { + regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; + regs[BPF_REG_0].id = ++env->id_gen; + } } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { int id = acquire_reference_state(env, insn_idx); if (id < 0) @@ -3046,7 +3047,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; - dst_reg->range = ptr_reg->range; + dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. Note that off_reg->off @@ -3076,10 +3077,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; + dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ - dst_reg->range = 0; + dst_reg->raw = 0; } break; case BPF_SUB: @@ -3108,7 +3110,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; - dst_reg->range = ptr_reg->range; + dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. If the subtrahend is known @@ -3134,11 +3136,12 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; + dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) - dst_reg->range = 0; + dst_reg->raw = 0; } break; case BPF_AND: diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 8b79318810ad..6aaf5dd5383b 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -493,7 +493,7 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp, } /** - * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss + * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem * @cgrp: the cgroup of interest * @ss: the subsystem of interest (%NULL returns @cgrp->self) * @@ -502,8 +502,8 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp, * enabled. If @ss is associated with the hierarchy @cgrp is on, this * function is guaranteed to return non-NULL css. */ -static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp, - struct cgroup_subsys *ss) +static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, + struct cgroup_subsys *ss) { lockdep_assert_held(&cgroup_mutex); @@ -524,35 +524,6 @@ static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp, } /** - * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem - * @cgrp: the cgroup of interest - * @ss: the subsystem of interest - * - * Find and get the effective css of @cgrp for @ss. The effective css is - * defined as the matching css of the nearest ancestor including self which - * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on, - * the root css is returned, so this function always returns a valid css. - * - * The returned css is not guaranteed to be online, and therefore it is the - * callers responsiblity to tryget a reference for it. - */ -struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, - struct cgroup_subsys *ss) -{ - struct cgroup_subsys_state *css; - - do { - css = cgroup_css(cgrp, ss); - - if (css) - return css; - cgrp = cgroup_parent(cgrp); - } while (cgrp); - - return init_css_set.subsys[ss->id]; -} - -/** * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem * @cgrp: the cgroup of interest * @ss: the subsystem of interest @@ -634,11 +605,10 @@ EXPORT_SYMBOL_GPL(of_css); * * Should be called under cgroup_[tree_]mutex. */ -#define for_each_e_css(css, ssid, cgrp) \ - for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ - if (!((css) = cgroup_e_css_by_mask(cgrp, \ - cgroup_subsys[(ssid)]))) \ - ; \ +#define for_each_e_css(css, ssid, cgrp) \ + for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ + if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \ + ; \ else /** @@ -1037,7 +1007,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, * @ss is in this hierarchy, so we want the * effective css from @cgrp. */ - template[i] = cgroup_e_css_by_mask(cgrp, ss); + template[i] = cgroup_e_css(cgrp, ss); } else { /* * @ss is not in this hierarchy, so we don't want @@ -3054,7 +3024,7 @@ static int cgroup_apply_control(struct cgroup *cgrp) return ret; /* - * At this point, cgroup_e_css_by_mask() results reflect the new csses + * At this point, cgroup_e_css() results reflect the new csses * making the following cgroup_update_dfl_csses() properly update * css associations of all tasks in the subtree. */ diff --git a/kernel/configs/kvm_guest.config b/kernel/configs/kvm_guest.config index 108fecc20fc1..208481d91090 100644 --- a/kernel/configs/kvm_guest.config +++ b/kernel/configs/kvm_guest.config @@ -20,6 +20,7 @@ CONFIG_PARAVIRT=y CONFIG_KVM_GUEST=y CONFIG_S390_GUEST=y CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_CONSOLE=y diff --git a/kernel/events/core.c b/kernel/events/core.c index 8c490130c4fb..84530ab358c3 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -750,7 +750,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) /* * Do not update time when cgroup is not active */ - if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) + if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) __update_cgrp_time(event->cgrp); } diff --git a/kernel/fork.c b/kernel/fork.c index 8f82a3bdcb8f..07cddff89c7b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -91,6 +91,7 @@ #include <linux/kcov.h> #include <linux/livepatch.h> #include <linux/thread_info.h> +#include <linux/stackleak.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -1926,6 +1927,8 @@ static __latent_entropy struct task_struct *copy_process( if (retval) goto bad_fork_cleanup_io; + stackleak_task_init(p); + if (pid != &init_struct_pid) { pid = alloc_pid(p->nsproxy->pid_ns_for_children); if (IS_ERR(pid)) { diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 6e6d467f3dec..1f0985adf193 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -8,7 +8,7 @@ #include <linux/cpu.h> #include <linux/irq.h> -#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long)) +#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS)) struct cpumap { unsigned int available; diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index c6a3b6851372..35cf0ad29718 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -25,8 +25,6 @@ #include <linux/elf.h> #include <linux/elfcore.h> #include <linux/kernel.h> -#include <linux/kexec.h> -#include <linux/slab.h> #include <linux/syscalls.h> #include <linux/vmalloc.h> #include "kexec_internal.h" diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 2e2955a8cf8f..a21ea6021929 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1561,7 +1561,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* * We may dequeue prev's rt_rq in put_prev_task(). - * So, we update time before rt_nr_running check. + * So, we update time before rt_queued check. */ if (prev->sched_class == &rt_sched_class) update_curr_rt(rq); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 9d74371e4aad..8d7f15ba5916 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1337,7 +1337,7 @@ void sched_init_numa(void) int level = 0; int i, j, k; - sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); + sched_domains_numa_distance = kzalloc(sizeof(int) * (nr_node_ids + 1), GFP_KERNEL); if (!sched_domains_numa_distance) return; diff --git a/kernel/stackleak.c b/kernel/stackleak.c new file mode 100644 index 000000000000..e42892926244 --- /dev/null +++ b/kernel/stackleak.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This code fills the used part of the kernel stack with a poison value + * before returning to userspace. It's part of the STACKLEAK feature + * ported from grsecurity/PaX. + * + * Author: Alexander Popov <alex.popov@linux.com> + * + * STACKLEAK reduces the information which kernel stack leak bugs can + * reveal and blocks some uninitialized stack variable attacks. + */ + +#include <linux/stackleak.h> + +#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE +#include <linux/jump_label.h> +#include <linux/sysctl.h> + +static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass); + +int stack_erasing_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret = 0; + int state = !static_branch_unlikely(&stack_erasing_bypass); + int prev_state = state; + + table->data = &state; + table->maxlen = sizeof(int); + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + state = !!state; + if (ret || !write || state == prev_state) + return ret; + + if (state) + static_branch_disable(&stack_erasing_bypass); + else + static_branch_enable(&stack_erasing_bypass); + + pr_warn("stackleak: kernel stack erasing is %s\n", + state ? "enabled" : "disabled"); + return ret; +} + +#define skip_erasing() static_branch_unlikely(&stack_erasing_bypass) +#else +#define skip_erasing() false +#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */ + +asmlinkage void stackleak_erase(void) +{ + /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */ + unsigned long kstack_ptr = current->lowest_stack; + unsigned long boundary = (unsigned long)end_of_stack(current); + unsigned int poison_count = 0; + const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long); + + if (skip_erasing()) + return; + + /* Check that 'lowest_stack' value is sane */ + if (unlikely(kstack_ptr - boundary >= THREAD_SIZE)) + kstack_ptr = boundary; + + /* Search for the poison value in the kernel stack */ + while (kstack_ptr > boundary && poison_count <= depth) { + if (*(unsigned long *)kstack_ptr == STACKLEAK_POISON) + poison_count++; + else + poison_count = 0; + + kstack_ptr -= sizeof(unsigned long); + } + + /* + * One 'long int' at the bottom of the thread stack is reserved and + * should not be poisoned (see CONFIG_SCHED_STACK_END_CHECK=y). + */ + if (kstack_ptr == boundary) + kstack_ptr += sizeof(unsigned long); + +#ifdef CONFIG_STACKLEAK_METRICS + current->prev_lowest_stack = kstack_ptr; +#endif + + /* + * Now write the poison value to the kernel stack. Start from + * 'kstack_ptr' and move up till the new 'boundary'. We assume that + * the stack pointer doesn't change when we write poison. + */ + if (on_thread_stack()) + boundary = current_stack_pointer; + else + boundary = current_top_of_stack(); + + while (kstack_ptr < boundary) { + *(unsigned long *)kstack_ptr = STACKLEAK_POISON; + kstack_ptr += sizeof(unsigned long); + } + + /* Reset the 'lowest_stack' value for the next syscall */ + current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64; +} + +void __used stackleak_track_stack(void) +{ + /* + * N.B. stackleak_erase() fills the kernel stack with the poison value, + * which has the register width. That code assumes that the value + * of 'lowest_stack' is aligned on the register width boundary. + * + * That is true for x86 and x86_64 because of the kernel stack + * alignment on these platforms (for details, see 'cc_stack_align' in + * arch/x86/Makefile). Take care of that when you port STACKLEAK to + * new platforms. + */ + unsigned long sp = (unsigned long)&sp; + + /* + * Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than + * STACKLEAK_SEARCH_DEPTH makes the poison search in + * stackleak_erase() unreliable. Let's prevent that. + */ + BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH); + + if (sp < current->lowest_stack && + sp >= (unsigned long)task_stack_page(current) + + sizeof(unsigned long)) { + current->lowest_stack = sp; + } +} +EXPORT_SYMBOL(stackleak_track_stack); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index cc02050fd0c4..5fc724e4e454 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -66,7 +66,6 @@ #include <linux/kexec.h> #include <linux/bpf.h> #include <linux/mount.h> -#include <linux/pipe_fs_i.h> #include <linux/uaccess.h> #include <asm/processor.h> @@ -91,7 +90,9 @@ #ifdef CONFIG_CHR_DEV_SG #include <scsi/sg.h> #endif - +#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE +#include <linux/stackleak.h> +#endif #ifdef CONFIG_LOCKUP_DETECTOR #include <linux/nmi.h> #endif @@ -1233,6 +1234,17 @@ static struct ctl_table kern_table[] = { .extra2 = &one, }, #endif +#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE + { + .procname = "stack_erasing", + .data = NULL, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = stack_erasing_sysctl, + .extra1 = &zero, + .extra2 = &one, + }, +#endif { } }; diff --git a/kernel/time/time.c b/kernel/time/time.c index e3a7f7fd3abc..ad204cf6d001 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -842,7 +842,7 @@ int get_timespec64(struct timespec64 *ts, ts->tv_sec = kts.tv_sec; /* Zero out the padding for 32 bit systems or in compat mode */ - if (IS_ENABLED(CONFIG_64BIT_TIME) && (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())) + if (IS_ENABLED(CONFIG_64BIT_TIME) && in_compat_syscall()) kts.tv_nsec &= 0xFFFFFFFFUL; ts->tv_nsec = kts.tv_nsec; diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index fac0ddf8a8e2..2868d85f1fb1 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -764,9 +764,9 @@ blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) return NULL; - if (!bio->bi_blkg) + if (!bio->bi_css) return NULL; - return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup); + return cgroup_get_kernfs_id(bio->bi_css->cgroup); } #else static union kernfs_node_id * diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index b0875b327f5c..c3fd849d4a8f 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -115,7 +115,7 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self, * section, then we need to read the link list pointers. The trick is * we pass the address of the string to the seq function just like * we do for the kernel core formats. To get back the structure that - * holds the format, we simply use containerof() and then go to the + * holds the format, we simply use container_of() and then go to the * next format in the list. */ static const char ** diff --git a/lib/Kconfig b/lib/Kconfig index d1573a16aa92..a9965f4af4dd 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -624,6 +624,3 @@ config GENERIC_LIB_CMPDI2 config GENERIC_LIB_UCMPDI2 bool - -config GENERIC_LIB_UMODDI3 - bool diff --git a/lib/Makefile b/lib/Makefile index 988949c4fd3a..db06d1237898 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -274,4 +274,3 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o -obj-$(CONFIG_GENERIC_LIB_UMODDI3) += umoddi3.o udivmoddi4.o diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 8be175df3075..7ebccb5c1637 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -83,6 +83,7 @@ const struct kvec *kvec; \ struct kvec v; \ iterate_kvec(i, n, v, kvec, skip, (K)) \ + } else if (unlikely(i->type & ITER_DISCARD)) { \ } else { \ const struct iovec *iov; \ struct iovec v; \ @@ -114,6 +115,8 @@ } \ i->nr_segs -= kvec - i->kvec; \ i->kvec = kvec; \ + } else if (unlikely(i->type & ITER_DISCARD)) { \ + skip += n; \ } else { \ const struct iovec *iov; \ struct iovec v; \ @@ -428,17 +431,19 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) } EXPORT_SYMBOL(iov_iter_fault_in_readable); -void iov_iter_init(struct iov_iter *i, int direction, +void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, unsigned long nr_segs, size_t count) { + WARN_ON(direction & ~(READ | WRITE)); + direction &= READ | WRITE; + /* It will get better. Eventually... */ if (uaccess_kernel()) { - direction |= ITER_KVEC; - i->type = direction; + i->type = ITER_KVEC | direction; i->kvec = (struct kvec *)iov; } else { - i->type = direction; + i->type = ITER_IOVEC | direction; i->iov = iov; } i->nr_segs = nr_segs; @@ -558,7 +563,7 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes, size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { const char *from = addr; - if (unlikely(i->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(i))) return copy_pipe_to_iter(addr, bytes, i); if (iter_is_iovec(i)) might_fault(); @@ -658,7 +663,7 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) const char *from = addr; unsigned long rem, curr_addr, s_addr = (unsigned long) addr; - if (unlikely(i->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(i))) return copy_pipe_to_iter_mcsafe(addr, bytes, i); if (iter_is_iovec(i)) might_fault(); @@ -692,7 +697,7 @@ EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe); size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { WARN_ON(1); return 0; } @@ -712,7 +717,7 @@ EXPORT_SYMBOL(_copy_from_iter); bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { WARN_ON(1); return false; } @@ -739,7 +744,7 @@ EXPORT_SYMBOL(_copy_from_iter_full); size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { WARN_ON(1); return 0; } @@ -773,7 +778,7 @@ EXPORT_SYMBOL(_copy_from_iter_nocache); size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { WARN_ON(1); return 0; } @@ -794,7 +799,7 @@ EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { WARN_ON(1); return false; } @@ -836,7 +841,9 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, size_t wanted = copy_to_iter(kaddr + offset, bytes, i); kunmap_atomic(kaddr); return wanted; - } else if (likely(!(i->type & ITER_PIPE))) + } else if (unlikely(iov_iter_is_discard(i))) + return bytes; + else if (likely(!iov_iter_is_pipe(i))) return copy_page_to_iter_iovec(page, offset, bytes, i); else return copy_page_to_iter_pipe(page, offset, bytes, i); @@ -848,7 +855,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, { if (unlikely(!page_copy_sane(page, offset, bytes))) return 0; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { WARN_ON(1); return 0; } @@ -888,7 +895,7 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i) size_t iov_iter_zero(size_t bytes, struct iov_iter *i) { - if (unlikely(i->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(i))) return pipe_zero(bytes, i); iterate_and_advance(i, bytes, v, clear_user(v.iov_base, v.iov_len), @@ -908,7 +915,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, kunmap_atomic(kaddr); return 0; } - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { kunmap_atomic(kaddr); WARN_ON(1); return 0; @@ -972,10 +979,14 @@ static void pipe_advance(struct iov_iter *i, size_t size) void iov_iter_advance(struct iov_iter *i, size_t size) { - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { pipe_advance(i, size); return; } + if (unlikely(iov_iter_is_discard(i))) { + i->count -= size; + return; + } iterate_and_advance(i, size, v, 0, 0, 0) } EXPORT_SYMBOL(iov_iter_advance); @@ -987,7 +998,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) if (WARN_ON(unroll > MAX_RW_COUNT)) return; i->count += unroll; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { struct pipe_inode_info *pipe = i->pipe; int idx = i->idx; size_t off = i->iov_offset; @@ -1011,12 +1022,14 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) pipe_truncate(i); return; } + if (unlikely(iov_iter_is_discard(i))) + return; if (unroll <= i->iov_offset) { i->iov_offset -= unroll; return; } unroll -= i->iov_offset; - if (i->type & ITER_BVEC) { + if (iov_iter_is_bvec(i)) { const struct bio_vec *bvec = i->bvec; while (1) { size_t n = (--bvec)->bv_len; @@ -1049,23 +1062,25 @@ EXPORT_SYMBOL(iov_iter_revert); */ size_t iov_iter_single_seg_count(const struct iov_iter *i) { - if (unlikely(i->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(i))) return i->count; // it is a silly place, anyway if (i->nr_segs == 1) return i->count; - else if (i->type & ITER_BVEC) + if (unlikely(iov_iter_is_discard(i))) + return i->count; + else if (iov_iter_is_bvec(i)) return min(i->count, i->bvec->bv_len - i->iov_offset); else return min(i->count, i->iov->iov_len - i->iov_offset); } EXPORT_SYMBOL(iov_iter_single_seg_count); -void iov_iter_kvec(struct iov_iter *i, int direction, +void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, unsigned long nr_segs, size_t count) { - BUG_ON(!(direction & ITER_KVEC)); - i->type = direction; + WARN_ON(direction & ~(READ | WRITE)); + i->type = ITER_KVEC | (direction & (READ | WRITE)); i->kvec = kvec; i->nr_segs = nr_segs; i->iov_offset = 0; @@ -1073,12 +1088,12 @@ void iov_iter_kvec(struct iov_iter *i, int direction, } EXPORT_SYMBOL(iov_iter_kvec); -void iov_iter_bvec(struct iov_iter *i, int direction, +void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, unsigned long nr_segs, size_t count) { - BUG_ON(!(direction & ITER_BVEC)); - i->type = direction; + WARN_ON(direction & ~(READ | WRITE)); + i->type = ITER_BVEC | (direction & (READ | WRITE)); i->bvec = bvec; i->nr_segs = nr_segs; i->iov_offset = 0; @@ -1086,13 +1101,13 @@ void iov_iter_bvec(struct iov_iter *i, int direction, } EXPORT_SYMBOL(iov_iter_bvec); -void iov_iter_pipe(struct iov_iter *i, int direction, +void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, size_t count) { - BUG_ON(direction != ITER_PIPE); + BUG_ON(direction != READ); WARN_ON(pipe->nrbufs == pipe->buffers); - i->type = direction; + i->type = ITER_PIPE | READ; i->pipe = pipe; i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); i->iov_offset = 0; @@ -1101,12 +1116,30 @@ void iov_iter_pipe(struct iov_iter *i, int direction, } EXPORT_SYMBOL(iov_iter_pipe); +/** + * iov_iter_discard - Initialise an I/O iterator that discards data + * @i: The iterator to initialise. + * @direction: The direction of the transfer. + * @count: The size of the I/O buffer in bytes. + * + * Set up an I/O iterator that just discards everything that's written to it. + * It's only available as a READ iterator. + */ +void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) +{ + BUG_ON(direction != READ); + i->type = ITER_DISCARD | READ; + i->count = count; + i->iov_offset = 0; +} +EXPORT_SYMBOL(iov_iter_discard); + unsigned long iov_iter_alignment(const struct iov_iter *i) { unsigned long res = 0; size_t size = i->count; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx])) return size | i->iov_offset; return size; @@ -1125,7 +1158,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i) unsigned long res = 0; size_t size = i->count; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { WARN_ON(1); return ~0U; } @@ -1193,8 +1226,11 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, if (maxsize > i->count) maxsize = i->count; - if (unlikely(i->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(i))) return pipe_get_pages(i, pages, maxsize, maxpages, start); + if (unlikely(iov_iter_is_discard(i))) + return -EFAULT; + iterate_all_kinds(i, maxsize, v, ({ unsigned long addr = (unsigned long)v.iov_base; size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); @@ -1205,7 +1241,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, len = maxpages * PAGE_SIZE; addr &= ~(PAGE_SIZE - 1); n = DIV_ROUND_UP(len, PAGE_SIZE); - res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); + res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages); if (unlikely(res < 0)) return res; return (res == n ? len : res * PAGE_SIZE) - *start; @@ -1270,8 +1306,11 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, if (maxsize > i->count) maxsize = i->count; - if (unlikely(i->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(i))) return pipe_get_pages_alloc(i, pages, maxsize, start); + if (unlikely(iov_iter_is_discard(i))) + return -EFAULT; + iterate_all_kinds(i, maxsize, v, ({ unsigned long addr = (unsigned long)v.iov_base; size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); @@ -1283,7 +1322,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, p = get_pages_array(n); if (!p) return -ENOMEM; - res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); + res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p); if (unlikely(res < 0)) { kvfree(p); return res; @@ -1313,7 +1352,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, __wsum sum, next; size_t off = 0; sum = *csum; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { WARN_ON(1); return 0; } @@ -1355,7 +1394,7 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, __wsum sum, next; size_t off = 0; sum = *csum; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { WARN_ON(1); return false; } @@ -1400,7 +1439,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, __wsum sum, next; size_t off = 0; sum = *csum; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { WARN_ON(1); /* for now */ return 0; } @@ -1442,8 +1481,10 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) if (!size) return 0; + if (unlikely(iov_iter_is_discard(i))) + return 0; - if (unlikely(i->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(i))) { struct pipe_inode_info *pipe = i->pipe; size_t off; int idx; @@ -1481,11 +1522,13 @@ EXPORT_SYMBOL(iov_iter_npages); const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) { *new = *old; - if (unlikely(new->type & ITER_PIPE)) { + if (unlikely(iov_iter_is_pipe(new))) { WARN_ON(1); return NULL; } - if (new->type & ITER_BVEC) + if (unlikely(iov_iter_is_discard(new))) + return NULL; + if (iov_iter_is_bvec(new)) return new->bvec = kmemdup(new->bvec, new->nr_segs * sizeof(struct bio_vec), flags); diff --git a/lib/udivmoddi4.c b/lib/udivmoddi4.c deleted file mode 100644 index c08bc8a5f1cf..000000000000 --- a/lib/udivmoddi4.c +++ /dev/null @@ -1,310 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see the file COPYING, or write - * to the Free Software Foundation, Inc. - */ - -#include <linux/libgcc.h> - -#define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz(X)) - -#define W_TYPE_SIZE 32 - -#define __ll_B ((unsigned long) 1 << (W_TYPE_SIZE / 2)) -#define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1)) -#define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2)) - -/* If we still don't have umul_ppmm, define it using plain C. */ -#if !defined(umul_ppmm) -#define umul_ppmm(w1, w0, u, v) \ - do { \ - unsigned long __x0, __x1, __x2, __x3; \ - unsigned short __ul, __vl, __uh, __vh; \ - \ - __ul = __ll_lowpart(u); \ - __uh = __ll_highpart(u); \ - __vl = __ll_lowpart(v); \ - __vh = __ll_highpart(v); \ - \ - __x0 = (unsigned long) __ul * __vl; \ - __x1 = (unsigned long) __ul * __vh; \ - __x2 = (unsigned long) __uh * __vl; \ - __x3 = (unsigned long) __uh * __vh; \ - \ - __x1 += __ll_highpart(__x0); \ - __x1 += __x2; \ - if (__x1 < __x2) \ - __x3 += __ll_B; \ - \ - (w1) = __x3 + __ll_highpart(__x1); \ - (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\ - } while (0) -#endif - -#if !defined(sub_ddmmss) -#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ - do { \ - unsigned long __x; \ - __x = (al) - (bl); \ - (sh) = (ah) - (bh) - (__x > (al)); \ - (sl) = __x; \ - } while (0) -#endif - -/* Define this unconditionally, so it can be used for debugging. */ -#define __udiv_qrnnd_c(q, r, n1, n0, d) \ - do { \ - unsigned long __d1, __d0, __q1, __q0; \ - unsigned long __r1, __r0, __m; \ - __d1 = __ll_highpart(d); \ - __d0 = __ll_lowpart(d); \ - \ - __r1 = (n1) % __d1; \ - __q1 = (n1) / __d1; \ - __m = (unsigned long) __q1 * __d0; \ - __r1 = __r1 * __ll_B | __ll_highpart(n0); \ - if (__r1 < __m) { \ - __q1--, __r1 += (d); \ - if (__r1 >= (d)) \ - if (__r1 < __m) \ - __q1--, __r1 += (d); \ - } \ - __r1 -= __m; \ - \ - __r0 = __r1 % __d1; \ - __q0 = __r1 / __d1; \ - __m = (unsigned long) __q0 * __d0; \ - __r0 = __r0 * __ll_B | __ll_lowpart(n0); \ - if (__r0 < __m) { \ - __q0--, __r0 += (d); \ - if (__r0 >= (d)) \ - if (__r0 < __m) \ - __q0--, __r0 += (d); \ - } \ - __r0 -= __m; \ - \ - (q) = (unsigned long) __q1 * __ll_B | __q0; \ - (r) = __r0; \ - } while (0) - -/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */ -#if !defined(udiv_qrnnd) -#define UDIV_NEEDS_NORMALIZATION 1 -#define udiv_qrnnd __udiv_qrnnd_c -#endif - -unsigned long long __udivmoddi4(unsigned long long u, unsigned long long v, - unsigned long long *rp) -{ - const DWunion nn = {.ll = u }; - const DWunion dd = {.ll = v }; - DWunion rr, ww; - unsigned long d0, d1, n0, n1, n2; - unsigned long q0 = 0, q1 = 0; - unsigned long b, bm; - - d0 = dd.s.low; - d1 = dd.s.high; - n0 = nn.s.low; - n1 = nn.s.high; - -#if !UDIV_NEEDS_NORMALIZATION - - if (d1 == 0) { - if (d0 > n1) { - /* 0q = nn / 0D */ - - udiv_qrnnd(q0, n0, n1, n0, d0); - q1 = 0; - - /* Remainder in n0. */ - } else { - /* qq = NN / 0d */ - - if (d0 == 0) - /* Divide intentionally by zero. */ - d0 = 1 / d0; - - udiv_qrnnd(q1, n1, 0, n1, d0); - udiv_qrnnd(q0, n0, n1, n0, d0); - - /* Remainder in n0. */ - } - - if (rp != 0) { - rr.s.low = n0; - rr.s.high = 0; - *rp = rr.ll; - } - -#else /* UDIV_NEEDS_NORMALIZATION */ - - if (d1 == 0) { - if (d0 > n1) { - /* 0q = nn / 0D */ - - count_leading_zeros(bm, d0); - - if (bm != 0) { - /* - * Normalize, i.e. make the most significant bit - * of the denominator set. - */ - - d0 = d0 << bm; - n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm)); - n0 = n0 << bm; - } - - udiv_qrnnd(q0, n0, n1, n0, d0); - q1 = 0; - - /* Remainder in n0 >> bm. */ - } else { - /* qq = NN / 0d */ - - if (d0 == 0) - /* Divide intentionally by zero. */ - d0 = 1 / d0; - - count_leading_zeros(bm, d0); - - if (bm == 0) { - /* - * From (n1 >= d0) /\ (the most significant bit - * of d0 is set), conclude (the most significant - * bit of n1 is set) /\ (theleading quotient - * digit q1 = 1). - * - * This special case is necessary, not an - * optimization. (Shifts counts of W_TYPE_SIZE - * are undefined.) - */ - - n1 -= d0; - q1 = 1; - } else { - /* Normalize. */ - - b = W_TYPE_SIZE - bm; - - d0 = d0 << bm; - n2 = n1 >> b; - n1 = (n1 << bm) | (n0 >> b); - n0 = n0 << bm; - - udiv_qrnnd(q1, n1, n2, n1, d0); - } - - /* n1 != d0... */ - - udiv_qrnnd(q0, n0, n1, n0, d0); - - /* Remainder in n0 >> bm. */ - } - - if (rp != 0) { - rr.s.low = n0 >> bm; - rr.s.high = 0; - *rp = rr.ll; - } - -#endif /* UDIV_NEEDS_NORMALIZATION */ - - } else { - if (d1 > n1) { - /* 00 = nn / DD */ - - q0 = 0; - q1 = 0; - - /* Remainder in n1n0. */ - if (rp != 0) { - rr.s.low = n0; - rr.s.high = n1; - *rp = rr.ll; - } - } else { - /* 0q = NN / dd */ - - count_leading_zeros(bm, d1); - if (bm == 0) { - /* - * From (n1 >= d1) /\ (the most significant bit - * of d1 is set), conclude (the most significant - * bit of n1 is set) /\ (the quotient digit q0 = - * 0 or 1). - * - * This special case is necessary, not an - * optimization. - */ - - /* - * The condition on the next line takes - * advantage of that n1 >= d1 (true due to - * program flow). - */ - if (n1 > d1 || n0 >= d0) { - q0 = 1; - sub_ddmmss(n1, n0, n1, n0, d1, d0); - } else { - q0 = 0; - } - - q1 = 0; - - if (rp != 0) { - rr.s.low = n0; - rr.s.high = n1; - *rp = rr.ll; - } - } else { - unsigned long m1, m0; - /* Normalize. */ - - b = W_TYPE_SIZE - bm; - - d1 = (d1 << bm) | (d0 >> b); - d0 = d0 << bm; - n2 = n1 >> b; - n1 = (n1 << bm) | (n0 >> b); - n0 = n0 << bm; - - udiv_qrnnd(q0, n1, n2, n1, d1); - umul_ppmm(m1, m0, q0, d0); - - if (m1 > n1 || (m1 == n1 && m0 > n0)) { - q0--; - sub_ddmmss(m1, m0, m1, m0, d1, d0); - } - - q1 = 0; - - /* Remainder in (n1n0 - m1m0) >> bm. */ - if (rp != 0) { - sub_ddmmss(n1, n0, n1, n0, m1, m0); - rr.s.low = (n1 << b) | (n0 >> bm); - rr.s.high = n1 >> bm; - *rp = rr.ll; - } - } - } - } - - ww.s.low = q0; - ww.s.high = q1; - - return ww.ll; -} diff --git a/lib/umoddi3.c b/lib/umoddi3.c deleted file mode 100644 index d7bbf0f85197..000000000000 --- a/lib/umoddi3.c +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see the file COPYING, or write - * to the Free Software Foundation, Inc. - */ - -#include <linux/module.h> -#include <linux/libgcc.h> - -extern unsigned long long __udivmoddi4(unsigned long long u, - unsigned long long v, - unsigned long long *rp); - -unsigned long long __umoddi3(unsigned long long u, unsigned long long v) -{ - unsigned long long w; - (void)__udivmoddi4(u, v, &w); - return w; -} -EXPORT_SYMBOL(__umoddi3); diff --git a/mm/filemap.c b/mm/filemap.c index 218d0b2ec82d..81adec8ee02c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2049,7 +2049,7 @@ find_page: !mapping->a_ops->is_partially_uptodate) goto page_not_up_to_date; /* pipes can't handle partially uptodate pages */ - if (unlikely(iter->type & ITER_PIPE)) + if (unlikely(iov_iter_is_pipe(iter))) goto page_not_up_to_date; if (!trylock_page(page)) goto page_not_up_to_date; @@ -2825,6 +2825,42 @@ struct page *read_cache_page_gfp(struct address_space *mapping, EXPORT_SYMBOL(read_cache_page_gfp); /* + * Don't operate on ranges the page cache doesn't support, and don't exceed the + * LFS limits. If pos is under the limit it becomes a short access. If it + * exceeds the limit we return -EFBIG. + */ +static int generic_access_check_limits(struct file *file, loff_t pos, + loff_t *count) +{ + struct inode *inode = file->f_mapping->host; + loff_t max_size = inode->i_sb->s_maxbytes; + + if (!(file->f_flags & O_LARGEFILE)) + max_size = MAX_NON_LFS; + + if (unlikely(pos >= max_size)) + return -EFBIG; + *count = min(*count, max_size - pos); + return 0; +} + +static int generic_write_check_limits(struct file *file, loff_t pos, + loff_t *count) +{ + loff_t limit = rlimit(RLIMIT_FSIZE); + + if (limit != RLIM_INFINITY) { + if (pos >= limit) { + send_sig(SIGXFSZ, current, 0); + return -EFBIG; + } + *count = min(*count, limit - pos); + } + + return generic_access_check_limits(file, pos, count); +} + +/* * Performs necessary checks before doing a write * * Can adjust writing position or amount of bytes to write. @@ -2835,8 +2871,8 @@ inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - unsigned long limit = rlimit(RLIMIT_FSIZE); - loff_t pos; + loff_t count; + int ret; if (!iov_iter_count(from)) return 0; @@ -2845,43 +2881,99 @@ inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) if (iocb->ki_flags & IOCB_APPEND) iocb->ki_pos = i_size_read(inode); - pos = iocb->ki_pos; - if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) return -EINVAL; - if (limit != RLIM_INFINITY) { - if (iocb->ki_pos >= limit) { - send_sig(SIGXFSZ, current, 0); - return -EFBIG; - } - iov_iter_truncate(from, limit - (unsigned long)pos); - } + count = iov_iter_count(from); + ret = generic_write_check_limits(file, iocb->ki_pos, &count); + if (ret) + return ret; + + iov_iter_truncate(from, count); + return iov_iter_count(from); +} +EXPORT_SYMBOL(generic_write_checks); + +/* + * Performs necessary checks before doing a clone. + * + * Can adjust amount of bytes to clone. + * Returns appropriate error code that caller should return or + * zero in case the clone should be allowed. + */ +int generic_remap_checks(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *req_count, unsigned int remap_flags) +{ + struct inode *inode_in = file_in->f_mapping->host; + struct inode *inode_out = file_out->f_mapping->host; + uint64_t count = *req_count; + uint64_t bcount; + loff_t size_in, size_out; + loff_t bs = inode_out->i_sb->s_blocksize; + int ret; + + /* The start of both ranges must be aligned to an fs block. */ + if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs)) + return -EINVAL; + + /* Ensure offsets don't wrap. */ + if (pos_in + count < pos_in || pos_out + count < pos_out) + return -EINVAL; + + size_in = i_size_read(inode_in); + size_out = i_size_read(inode_out); + + /* Dedupe requires both ranges to be within EOF. */ + if ((remap_flags & REMAP_FILE_DEDUP) && + (pos_in >= size_in || pos_in + count > size_in || + pos_out >= size_out || pos_out + count > size_out)) + return -EINVAL; + + /* Ensure the infile range is within the infile. */ + if (pos_in >= size_in) + return -EINVAL; + count = min(count, size_in - (uint64_t)pos_in); + + ret = generic_access_check_limits(file_in, pos_in, &count); + if (ret) + return ret; + + ret = generic_write_check_limits(file_out, pos_out, &count); + if (ret) + return ret; /* - * LFS rule + * If the user wanted us to link to the infile's EOF, round up to the + * next block boundary for this check. + * + * Otherwise, make sure the count is also block-aligned, having + * already confirmed the starting offsets' block alignment. */ - if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS && - !(file->f_flags & O_LARGEFILE))) { - if (pos >= MAX_NON_LFS) - return -EFBIG; - iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos); + if (pos_in + count == size_in) { + bcount = ALIGN(size_in, bs) - pos_in; + } else { + if (!IS_ALIGNED(count, bs)) + count = ALIGN_DOWN(count, bs); + bcount = count; } + /* Don't allow overlapped cloning within the same file. */ + if (inode_in == inode_out && + pos_out + bcount > pos_in && + pos_out < pos_in + bcount) + return -EINVAL; + /* - * Are we about to exceed the fs block limit ? - * - * If we have written data it becomes a short write. If we have - * exceeded without writing data we send a signal and return EFBIG. - * Linus frestrict idea will clean these up nicely.. + * We shortened the request but the caller can't deal with that, so + * bounce the request back to userspace. */ - if (unlikely(pos >= inode->i_sb->s_maxbytes)) - return -EFBIG; + if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN)) + return -EINVAL; - iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos); - return iov_iter_count(from); + *req_count = count; + return 0; } -EXPORT_SYMBOL(generic_write_checks); int pagecache_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4e4ef8fa479d..55478ab3c83b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -629,21 +629,40 @@ release: * available * never: never stall for any thp allocation */ -static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) +static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr) { const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); + gfp_t this_node = 0; + +#ifdef CONFIG_NUMA + struct mempolicy *pol; + /* + * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not + * specified, to express a general desire to stay on the current + * node for optimistic allocation attempts. If the defrag mode + * and/or madvise hint requires the direct reclaim then we prefer + * to fallback to other node rather than node reclaim because that + * can lead to excessive reclaim even though there is free memory + * on other nodes. We expect that NUMA preferences are specified + * by memory policies. + */ + pol = get_vma_policy(vma, addr); + if (pol->mode != MPOL_BIND) + this_node = __GFP_THISNODE; + mpol_cond_put(pol); +#endif if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) - return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; + return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node; if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : - __GFP_KSWAPD_RECLAIM); + __GFP_KSWAPD_RECLAIM | this_node); if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : - 0); - return GFP_TRANSHUGE_LIGHT; + this_node); + return GFP_TRANSHUGE_LIGHT | this_node; } /* Caller must hold page table lock. */ @@ -715,8 +734,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) pte_free(vma->vm_mm, pgtable); return ret; } - gfp = alloc_hugepage_direct_gfpmask(vma); - page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); + gfp = alloc_hugepage_direct_gfpmask(vma, haddr); + page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id()); if (unlikely(!page)) { count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; @@ -1286,8 +1305,9 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) alloc: if (transparent_hugepage_enabled(vma) && !transparent_hugepage_debug_cow()) { - huge_gfp = alloc_hugepage_direct_gfpmask(vma); - new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); + huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr); + new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma, + haddr, numa_node_id()); } else new_page = NULL; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 54920cbc46bf..6e1469b80cb7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2593,7 +2593,7 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) struct mem_cgroup *memcg; int ret = 0; - if (memcg_kmem_bypass()) + if (mem_cgroup_disabled() || memcg_kmem_bypass()) return 0; memcg = get_mem_cgroup_from_current(); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 61972da38d93..2b2b3ccbbfb5 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -586,6 +586,7 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, for (i = 0; i < sections_to_remove; i++) { unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; + cond_resched(); ret = __remove_section(zone, __pfn_to_section(pfn), map_offset, altmap); map_offset = 0; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index cfd26d7e61a1..5837a067124d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1116,8 +1116,8 @@ static struct page *new_page(struct page *page, unsigned long start) } else if (PageTransHuge(page)) { struct page *thp; - thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, - HPAGE_PMD_ORDER); + thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma, + address, numa_node_id()); if (!thp) return NULL; prep_transhuge_page(thp); @@ -1662,7 +1662,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, * freeing by another task. It is the caller's responsibility to free the * extra reference for shared policies. */ -static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, +struct mempolicy *get_vma_policy(struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = __get_vma_policy(vma, addr); @@ -2011,7 +2011,6 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, * @vma: Pointer to VMA or NULL if not available. * @addr: Virtual Address of the allocation. Must be inside the VMA. * @node: Which node to prefer for allocation (modulo policy). - * @hugepage: for hugepages try only the preferred node if possible * * This function allocates a page from the kernel page pool and applies * a NUMA policy associated with the VMA or the current process. @@ -2022,7 +2021,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, */ struct page * alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, - unsigned long addr, int node, bool hugepage) + unsigned long addr, int node) { struct mempolicy *pol; struct page *page; @@ -2040,32 +2039,6 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, goto out; } - if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { - int hpage_node = node; - - /* - * For hugepage allocation and non-interleave policy which - * allows the current node (or other explicitly preferred - * node) we only try to allocate from the current/preferred - * node and don't fall back to other nodes, as the cost of - * remote accesses would likely offset THP benefits. - * - * If the policy is interleave, or does not allow the current - * node in its nodemask, we allocate the standard way. - */ - if (pol->mode == MPOL_PREFERRED && - !(pol->flags & MPOL_F_LOCAL)) - hpage_node = pol->v.preferred_node; - - nmask = policy_nodemask(gfp, pol); - if (!nmask || node_isset(hpage_node, *nmask)) { - mpol_cond_put(pol); - page = __alloc_pages_node(hpage_node, - gfp | __GFP_THISNODE, order); - goto out; - } - } - nmask = policy_nodemask(gfp, pol); preferred_nid = policy_node(gfp, pol, node); page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); diff --git a/mm/page_io.c b/mm/page_io.c index a451ffa9491c..d4d1c89bcddd 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -294,7 +294,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, }; struct iov_iter from; - iov_iter_bvec(&from, ITER_BVEC | WRITE, &bv, 1, PAGE_SIZE); + iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE); init_sync_kiocb(&kiocb, swap_file); kiocb.ki_pos = page_file_offset(page); @@ -339,7 +339,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, goto out; } bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc); - bio_associate_blkg_from_page(bio, page); + bio_associate_blkcg_from_page(bio, page); count_swpout_vm_event(page); set_page_writeback(page); unlock_page(page); diff --git a/mm/page_poison.c b/mm/page_poison.c index f7e2a676365a..f0c15e9017c0 100644 --- a/mm/page_poison.c +++ b/mm/page_poison.c @@ -17,6 +17,11 @@ static int __init early_page_poison_param(char *buf) } early_param("page_poison", early_page_poison_param); +/** + * page_poisoning_enabled - check if page poisoning is enabled + * + * Return true if page poisoning is enabled, or false if not. + */ bool page_poisoning_enabled(void) { /* @@ -29,6 +34,7 @@ bool page_poisoning_enabled(void) (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && debug_pagealloc_enabled())); } +EXPORT_SYMBOL_GPL(page_poisoning_enabled); static void poison_page(struct page *page) { diff --git a/mm/percpu.c b/mm/percpu.c index a6b74c6fe0be..db86282fd024 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -2591,7 +2591,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, BUG_ON(ai->nr_groups != 1); upa = ai->alloc_size/ai->unit_size; nr_g0_units = roundup(num_possible_cpus(), upa); - if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) { + if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) { pcpu_free_alloc_info(ai); return -EINVAL; } diff --git a/mm/shmem.c b/mm/shmem.c index 56bf122e0bb4..ea26d7a0342d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1435,7 +1435,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp, shmem_pseudo_vma_init(&pvma, info, hindex); page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, - HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); + HPAGE_PMD_ORDER, &pvma, 0, numa_node_id()); shmem_pseudo_vma_destroy(&pvma); if (page) prep_transhuge_page(page); diff --git a/net/9p/client.c b/net/9p/client.c index 5f23e18eecc0..2c9a17b9b46b 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -2066,7 +2066,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) struct kvec kv = {.iov_base = data, .iov_len = count}; struct iov_iter to; - iov_iter_kvec(&to, READ | ITER_KVEC, &kv, 1, count); + iov_iter_kvec(&to, READ, &kv, 1, count); p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", fid->fid, (unsigned long long) offset, count); diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index eb596c2ed546..b1d39cabf125 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -329,7 +329,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan, if (!iov_iter_count(data)) return 0; - if (!(data->type & ITER_KVEC)) { + if (!iov_iter_is_kvec(data)) { int n; /* * We allow only p9_max_pages pinned. We wait for the diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 4e2576fc0c59..828e87fe8027 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -467,7 +467,7 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, iv.iov_len = skb->len; memset(&msg, 0, sizeof(msg)); - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, skb->len); + iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len); err = l2cap_chan_send(chan, &msg, skb->len); if (err > 0) { diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c index 51c2cf2d8923..58fc6333d412 100644 --- a/net/bluetooth/a2mp.c +++ b/net/bluetooth/a2mp.c @@ -63,7 +63,7 @@ static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *dat memset(&msg, 0, sizeof(msg)); - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, total_len); + iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, total_len); l2cap_chan_send(chan, &msg, total_len); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index a1c1b7e8a45c..c822e626761b 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -622,7 +622,7 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) memset(&msg, 0, sizeof(msg)); - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iv, 2, 1 + len); + iov_iter_kvec(&msg.msg_iter, WRITE, iv, 2, 1 + len); l2cap_chan_send(chan, &msg, 1 + len); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 0a187196aeed..57fcc6b4bf6e 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -156,7 +156,6 @@ static bool con_flag_test_and_set(struct ceph_connection *con, /* Slab caches for frequently-allocated structures */ static struct kmem_cache *ceph_msg_cache; -static struct kmem_cache *ceph_msg_data_cache; /* static tag bytes (protocol control messages) */ static char tag_msg = CEPH_MSGR_TAG_MSG; @@ -235,23 +234,11 @@ static int ceph_msgr_slab_init(void) if (!ceph_msg_cache) return -ENOMEM; - BUG_ON(ceph_msg_data_cache); - ceph_msg_data_cache = KMEM_CACHE(ceph_msg_data, 0); - if (ceph_msg_data_cache) - return 0; - - kmem_cache_destroy(ceph_msg_cache); - ceph_msg_cache = NULL; - - return -ENOMEM; + return 0; } static void ceph_msgr_slab_exit(void) { - BUG_ON(!ceph_msg_data_cache); - kmem_cache_destroy(ceph_msg_data_cache); - ceph_msg_data_cache = NULL; - BUG_ON(!ceph_msg_cache); kmem_cache_destroy(ceph_msg_cache); ceph_msg_cache = NULL; @@ -526,7 +513,7 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) if (!buf) msg.msg_flags |= MSG_TRUNC; - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, len); + iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, len); r = sock_recvmsg(sock, &msg, msg.msg_flags); if (r == -EAGAIN) r = 0; @@ -545,7 +532,7 @@ static int ceph_tcp_recvpage(struct socket *sock, struct page *page, int r; BUG_ON(page_offset + length > PAGE_SIZE); - iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, &bvec, 1, length); + iov_iter_bvec(&msg.msg_iter, READ, &bvec, 1, length); r = sock_recvmsg(sock, &msg, msg.msg_flags); if (r == -EAGAIN) r = 0; @@ -607,7 +594,7 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page, else msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ - iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, size); + iov_iter_bvec(&msg.msg_iter, WRITE, &bvec, 1, size); ret = sock_sendmsg(sock, &msg); if (ret == -EAGAIN) ret = 0; @@ -1141,16 +1128,13 @@ static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor) static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) { struct ceph_msg_data_cursor *cursor = &msg->cursor; - struct ceph_msg_data *data; BUG_ON(!length); BUG_ON(length > msg->data_length); - BUG_ON(list_empty(&msg->data)); + BUG_ON(!msg->num_data_items); - cursor->data_head = &msg->data; cursor->total_resid = length; - data = list_first_entry(&msg->data, struct ceph_msg_data, links); - cursor->data = data; + cursor->data = msg->data; __ceph_msg_data_cursor_init(cursor); } @@ -1231,8 +1215,7 @@ static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, if (!cursor->resid && cursor->total_resid) { WARN_ON(!cursor->last_piece); - BUG_ON(list_is_last(&cursor->data->links, cursor->data_head)); - cursor->data = list_next_entry(cursor->data, links); + cursor->data++; __ceph_msg_data_cursor_init(cursor); new_piece = true; } @@ -1248,9 +1231,6 @@ static size_t sizeof_footer(struct ceph_connection *con) static void prepare_message_data(struct ceph_msg *msg, u32 data_len) { - BUG_ON(!msg); - BUG_ON(!data_len); - /* Initialize data cursor */ ceph_msg_data_cursor_init(msg, (size_t)data_len); @@ -1590,7 +1570,7 @@ static int write_partial_message_data(struct ceph_connection *con) dout("%s %p msg %p\n", __func__, con, msg); - if (list_empty(&msg->data)) + if (!msg->num_data_items) return -EINVAL; /* @@ -2347,8 +2327,7 @@ static int read_partial_msg_data(struct ceph_connection *con) u32 crc = 0; int ret; - BUG_ON(!msg); - if (list_empty(&msg->data)) + if (!msg->num_data_items) return -EIO; if (do_datacrc) @@ -3256,32 +3235,16 @@ bool ceph_con_keepalive_expired(struct ceph_connection *con, return false; } -static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type) +static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg) { - struct ceph_msg_data *data; - - if (WARN_ON(!ceph_msg_data_type_valid(type))) - return NULL; - - data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS); - if (!data) - return NULL; - - data->type = type; - INIT_LIST_HEAD(&data->links); - - return data; + BUG_ON(msg->num_data_items >= msg->max_data_items); + return &msg->data[msg->num_data_items++]; } static void ceph_msg_data_destroy(struct ceph_msg_data *data) { - if (!data) - return; - - WARN_ON(!list_empty(&data->links)); if (data->type == CEPH_MSG_DATA_PAGELIST) ceph_pagelist_release(data->pagelist); - kmem_cache_free(ceph_msg_data_cache, data); } void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, @@ -3292,13 +3255,12 @@ void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, BUG_ON(!pages); BUG_ON(!length); - data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); - BUG_ON(!data); + data = ceph_msg_data_add(msg); + data->type = CEPH_MSG_DATA_PAGES; data->pages = pages; data->length = length; data->alignment = alignment & ~PAGE_MASK; - list_add_tail(&data->links, &msg->data); msg->data_length += length; } EXPORT_SYMBOL(ceph_msg_data_add_pages); @@ -3311,11 +3273,11 @@ void ceph_msg_data_add_pagelist(struct ceph_msg *msg, BUG_ON(!pagelist); BUG_ON(!pagelist->length); - data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); - BUG_ON(!data); + data = ceph_msg_data_add(msg); + data->type = CEPH_MSG_DATA_PAGELIST; + refcount_inc(&pagelist->refcnt); data->pagelist = pagelist; - list_add_tail(&data->links, &msg->data); msg->data_length += pagelist->length; } EXPORT_SYMBOL(ceph_msg_data_add_pagelist); @@ -3326,12 +3288,11 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos, { struct ceph_msg_data *data; - data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); - BUG_ON(!data); + data = ceph_msg_data_add(msg); + data->type = CEPH_MSG_DATA_BIO; data->bio_pos = *bio_pos; data->bio_length = length; - list_add_tail(&data->links, &msg->data); msg->data_length += length; } EXPORT_SYMBOL(ceph_msg_data_add_bio); @@ -3342,11 +3303,10 @@ void ceph_msg_data_add_bvecs(struct ceph_msg *msg, { struct ceph_msg_data *data; - data = ceph_msg_data_create(CEPH_MSG_DATA_BVECS); - BUG_ON(!data); + data = ceph_msg_data_add(msg); + data->type = CEPH_MSG_DATA_BVECS; data->bvec_pos = *bvec_pos; - list_add_tail(&data->links, &msg->data); msg->data_length += bvec_pos->iter.bi_size; } EXPORT_SYMBOL(ceph_msg_data_add_bvecs); @@ -3355,8 +3315,8 @@ EXPORT_SYMBOL(ceph_msg_data_add_bvecs); * construct a new message with given type, size * the new msg has a ref count of 1. */ -struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, - bool can_fail) +struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items, + gfp_t flags, bool can_fail) { struct ceph_msg *m; @@ -3370,7 +3330,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, INIT_LIST_HEAD(&m->list_head); kref_init(&m->kref); - INIT_LIST_HEAD(&m->data); /* front */ if (front_len) { @@ -3385,6 +3344,15 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, } m->front_alloc_len = m->front.iov_len = front_len; + if (max_data_items) { + m->data = kmalloc_array(max_data_items, sizeof(*m->data), + flags); + if (!m->data) + goto out2; + + m->max_data_items = max_data_items; + } + dout("ceph_msg_new %p front %d\n", m, front_len); return m; @@ -3401,6 +3369,13 @@ out: } return NULL; } +EXPORT_SYMBOL(ceph_msg_new2); + +struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, + bool can_fail) +{ + return ceph_msg_new2(type, front_len, 0, flags, can_fail); +} EXPORT_SYMBOL(ceph_msg_new); /* @@ -3496,13 +3471,14 @@ static void ceph_msg_free(struct ceph_msg *m) { dout("%s %p\n", __func__, m); kvfree(m->front.iov_base); + kfree(m->data); kmem_cache_free(ceph_msg_cache, m); } static void ceph_msg_release(struct kref *kref) { struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); - struct ceph_msg_data *data, *next; + int i; dout("%s %p\n", __func__, m); WARN_ON(!list_empty(&m->list_head)); @@ -3515,11 +3491,8 @@ static void ceph_msg_release(struct kref *kref) m->middle = NULL; } - list_for_each_entry_safe(data, next, &m->data, links) { - list_del_init(&data->links); - ceph_msg_data_destroy(data); - } - m->data_length = 0; + for (i = 0; i < m->num_data_items; i++) + ceph_msg_data_destroy(&m->data[i]); if (m->pool) ceph_msgpool_put(m->pool, m); diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c index 72571535883f..e3ecb80cd182 100644 --- a/net/ceph/msgpool.c +++ b/net/ceph/msgpool.c @@ -14,7 +14,8 @@ static void *msgpool_alloc(gfp_t gfp_mask, void *arg) struct ceph_msgpool *pool = arg; struct ceph_msg *msg; - msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true); + msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, + gfp_mask, true); if (!msg) { dout("msgpool_alloc %s failed\n", pool->name); } else { @@ -35,11 +36,13 @@ static void msgpool_free(void *element, void *arg) } int ceph_msgpool_init(struct ceph_msgpool *pool, int type, - int front_len, int size, bool blocking, const char *name) + int front_len, int max_data_items, int size, + const char *name) { dout("msgpool %s init\n", name); pool->type = type; pool->front_len = front_len; + pool->max_data_items = max_data_items; pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool); if (!pool->pool) return -ENOMEM; @@ -53,18 +56,21 @@ void ceph_msgpool_destroy(struct ceph_msgpool *pool) mempool_destroy(pool->pool); } -struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, - int front_len) +struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len, + int max_data_items) { struct ceph_msg *msg; - if (front_len > pool->front_len) { - dout("msgpool_get %s need front %d, pool size is %d\n", - pool->name, front_len, pool->front_len); - WARN_ON(1); + if (front_len > pool->front_len || + max_data_items > pool->max_data_items) { + pr_warn_ratelimited("%s need %d/%d, pool %s has %d/%d\n", + __func__, front_len, max_data_items, pool->name, + pool->front_len, pool->max_data_items); + WARN_ON_ONCE(1); /* try to alloc a fresh message */ - return ceph_msg_new(pool->type, front_len, GFP_NOFS, false); + return ceph_msg_new2(pool->type, front_len, max_data_items, + GFP_NOFS, false); } msg = mempool_alloc(pool->pool, GFP_NOFS); @@ -80,6 +86,9 @@ void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) msg->front.iov_len = pool->front_len; msg->hdr.front_len = cpu_to_le32(pool->front_len); + msg->data_length = 0; + msg->num_data_items = 0; + kref_init(&msg->kref); /* retake single ref */ mempool_free(msg, pool->pool); } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 60934bd8796c..d23a9f81f3d7 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -126,6 +126,9 @@ static void ceph_osd_data_init(struct ceph_osd_data *osd_data) osd_data->type = CEPH_OSD_DATA_TYPE_NONE; } +/* + * Consumes @pages if @own_pages is true. + */ static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) @@ -138,6 +141,9 @@ static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, osd_data->own_pages = own_pages; } +/* + * Consumes a ref on @pagelist. + */ static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, struct ceph_pagelist *pagelist) { @@ -362,6 +368,8 @@ static void ceph_osd_data_release(struct ceph_osd_data *osd_data) num_pages = calc_pages_for((u64)osd_data->alignment, (u64)osd_data->length); ceph_release_page_vector(osd_data->pages, num_pages); + } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { + ceph_pagelist_release(osd_data->pagelist); } ceph_osd_data_init(osd_data); } @@ -402,6 +410,9 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req, case CEPH_OSD_OP_LIST_WATCHERS: ceph_osd_data_release(&op->list_watchers.response_data); break; + case CEPH_OSD_OP_COPY_FROM: + ceph_osd_data_release(&op->copy_from.osd_data); + break; default: break; } @@ -606,12 +617,15 @@ static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc) return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0); } -int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp) +static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp, + int num_request_data_items, + int num_reply_data_items) { struct ceph_osd_client *osdc = req->r_osdc; struct ceph_msg *msg; int msg_size; + WARN_ON(req->r_request || req->r_reply); WARN_ON(ceph_oid_empty(&req->r_base_oid)); WARN_ON(ceph_oloc_empty(&req->r_base_oloc)); @@ -633,9 +647,11 @@ int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp) msg_size += 4 + 8; /* retry_attempt, features */ if (req->r_mempool) - msg = ceph_msgpool_get(&osdc->msgpool_op, 0); + msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size, + num_request_data_items); else - msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true); + msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size, + num_request_data_items, gfp, true); if (!msg) return -ENOMEM; @@ -648,9 +664,11 @@ int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp) msg_size += req->r_num_ops * sizeof(struct ceph_osd_op); if (req->r_mempool) - msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); + msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size, + num_reply_data_items); else - msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true); + msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size, + num_reply_data_items, gfp, true); if (!msg) return -ENOMEM; @@ -658,7 +676,6 @@ int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp) return 0; } -EXPORT_SYMBOL(ceph_osdc_alloc_messages); static bool osd_req_opcode_valid(u16 opcode) { @@ -671,6 +688,65 @@ __CEPH_FORALL_OSD_OPS(GENERATE_CASE) } } +static void get_num_data_items(struct ceph_osd_request *req, + int *num_request_data_items, + int *num_reply_data_items) +{ + struct ceph_osd_req_op *op; + + *num_request_data_items = 0; + *num_reply_data_items = 0; + + for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { + switch (op->op) { + /* request */ + case CEPH_OSD_OP_WRITE: + case CEPH_OSD_OP_WRITEFULL: + case CEPH_OSD_OP_SETXATTR: + case CEPH_OSD_OP_CMPXATTR: + case CEPH_OSD_OP_NOTIFY_ACK: + case CEPH_OSD_OP_COPY_FROM: + *num_request_data_items += 1; + break; + + /* reply */ + case CEPH_OSD_OP_STAT: + case CEPH_OSD_OP_READ: + case CEPH_OSD_OP_LIST_WATCHERS: + *num_reply_data_items += 1; + break; + + /* both */ + case CEPH_OSD_OP_NOTIFY: + *num_request_data_items += 1; + *num_reply_data_items += 1; + break; + case CEPH_OSD_OP_CALL: + *num_request_data_items += 2; + *num_reply_data_items += 1; + break; + + default: + WARN_ON(!osd_req_opcode_valid(op->op)); + break; + } + } +} + +/* + * oid, oloc and OSD op opcode(s) must be filled in before this function + * is called. + */ +int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp) +{ + int num_request_data_items, num_reply_data_items; + + get_num_data_items(req, &num_request_data_items, &num_reply_data_items); + return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items, + num_reply_data_items); +} +EXPORT_SYMBOL(ceph_osdc_alloc_messages); + /* * This is an osd op init function for opcodes that have no data or * other information associated with them. It also serves as a @@ -767,22 +843,19 @@ void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, EXPORT_SYMBOL(osd_req_op_extent_dup_last); int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, - u16 opcode, const char *class, const char *method) + const char *class, const char *method) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, - opcode, 0); + struct ceph_osd_req_op *op; struct ceph_pagelist *pagelist; size_t payload_len = 0; size_t size; - BUG_ON(opcode != CEPH_OSD_OP_CALL); + op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); - pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); + pagelist = ceph_pagelist_alloc(GFP_NOFS); if (!pagelist) return -ENOMEM; - ceph_pagelist_init(pagelist); - op->cls.class_name = class; size = strlen(class); BUG_ON(size > (size_t) U8_MAX); @@ -815,12 +888,10 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR); - pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); + pagelist = ceph_pagelist_alloc(GFP_NOFS); if (!pagelist) return -ENOMEM; - ceph_pagelist_init(pagelist); - payload_len = strlen(name); op->xattr.name_len = payload_len; ceph_pagelist_append(pagelist, name, payload_len); @@ -900,12 +971,6 @@ static void ceph_osdc_msg_data_add(struct ceph_msg *msg, static u32 osd_req_encode_op(struct ceph_osd_op *dst, const struct ceph_osd_req_op *src) { - if (WARN_ON(!osd_req_opcode_valid(src->op))) { - pr_err("unrecognized osd opcode %d\n", src->op); - - return 0; - } - switch (src->op) { case CEPH_OSD_OP_STAT: break; @@ -955,6 +1020,14 @@ static u32 osd_req_encode_op(struct ceph_osd_op *dst, case CEPH_OSD_OP_CREATE: case CEPH_OSD_OP_DELETE: break; + case CEPH_OSD_OP_COPY_FROM: + dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid); + dst->copy_from.src_version = + cpu_to_le64(src->copy_from.src_version); + dst->copy_from.flags = src->copy_from.flags; + dst->copy_from.src_fadvise_flags = + cpu_to_le32(src->copy_from.src_fadvise_flags); + break; default: pr_err("unsupported osd opcode %s\n", ceph_osd_op_name(src->op)); @@ -1038,7 +1111,15 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, if (flags & CEPH_OSD_FLAG_WRITE) req->r_data_offset = off; - r = ceph_osdc_alloc_messages(req, GFP_NOFS); + if (num_ops > 1) + /* + * This is a special case for ceph_writepages_start(), but it + * also covers ceph_uninline_data(). If more multi-op request + * use cases emerge, we will need a separate helper. + */ + r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0); + else + r = ceph_osdc_alloc_messages(req, GFP_NOFS); if (r) goto fail; @@ -1845,48 +1926,55 @@ static bool should_plug_request(struct ceph_osd_request *req) return true; } -static void setup_request_data(struct ceph_osd_request *req, - struct ceph_msg *msg) +/* + * Keep get_num_data_items() in sync with this function. + */ +static void setup_request_data(struct ceph_osd_request *req) { - u32 data_len = 0; - int i; + struct ceph_msg *request_msg = req->r_request; + struct ceph_msg *reply_msg = req->r_reply; + struct ceph_osd_req_op *op; - if (!list_empty(&msg->data)) + if (req->r_request->num_data_items || req->r_reply->num_data_items) return; - WARN_ON(msg->data_length); - for (i = 0; i < req->r_num_ops; i++) { - struct ceph_osd_req_op *op = &req->r_ops[i]; - + WARN_ON(request_msg->data_length || reply_msg->data_length); + for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) { switch (op->op) { /* request */ case CEPH_OSD_OP_WRITE: case CEPH_OSD_OP_WRITEFULL: WARN_ON(op->indata_len != op->extent.length); - ceph_osdc_msg_data_add(msg, &op->extent.osd_data); + ceph_osdc_msg_data_add(request_msg, + &op->extent.osd_data); break; case CEPH_OSD_OP_SETXATTR: case CEPH_OSD_OP_CMPXATTR: WARN_ON(op->indata_len != op->xattr.name_len + op->xattr.value_len); - ceph_osdc_msg_data_add(msg, &op->xattr.osd_data); + ceph_osdc_msg_data_add(request_msg, + &op->xattr.osd_data); break; case CEPH_OSD_OP_NOTIFY_ACK: - ceph_osdc_msg_data_add(msg, + ceph_osdc_msg_data_add(request_msg, &op->notify_ack.request_data); break; + case CEPH_OSD_OP_COPY_FROM: + ceph_osdc_msg_data_add(request_msg, + &op->copy_from.osd_data); + break; /* reply */ case CEPH_OSD_OP_STAT: - ceph_osdc_msg_data_add(req->r_reply, + ceph_osdc_msg_data_add(reply_msg, &op->raw_data_in); break; case CEPH_OSD_OP_READ: - ceph_osdc_msg_data_add(req->r_reply, + ceph_osdc_msg_data_add(reply_msg, &op->extent.osd_data); break; case CEPH_OSD_OP_LIST_WATCHERS: - ceph_osdc_msg_data_add(req->r_reply, + ceph_osdc_msg_data_add(reply_msg, &op->list_watchers.response_data); break; @@ -1895,25 +1983,23 @@ static void setup_request_data(struct ceph_osd_request *req, WARN_ON(op->indata_len != op->cls.class_len + op->cls.method_len + op->cls.indata_len); - ceph_osdc_msg_data_add(msg, &op->cls.request_info); + ceph_osdc_msg_data_add(request_msg, + &op->cls.request_info); /* optional, can be NONE */ - ceph_osdc_msg_data_add(msg, &op->cls.request_data); + ceph_osdc_msg_data_add(request_msg, + &op->cls.request_data); /* optional, can be NONE */ - ceph_osdc_msg_data_add(req->r_reply, + ceph_osdc_msg_data_add(reply_msg, &op->cls.response_data); break; case CEPH_OSD_OP_NOTIFY: - ceph_osdc_msg_data_add(msg, + ceph_osdc_msg_data_add(request_msg, &op->notify.request_data); - ceph_osdc_msg_data_add(req->r_reply, + ceph_osdc_msg_data_add(reply_msg, &op->notify.response_data); break; } - - data_len += op->indata_len; } - - WARN_ON(data_len != msg->data_length); } static void encode_pgid(void **p, const struct ceph_pg *pgid) @@ -1961,7 +2047,7 @@ static void encode_request_partial(struct ceph_osd_request *req, req->r_data_offset || req->r_snapc); } - setup_request_data(req, msg); + setup_request_data(req); encode_spgid(&p, &req->r_t.spgid); /* actual spg */ ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */ @@ -3001,11 +3087,21 @@ static void linger_submit(struct ceph_osd_linger_request *lreq) struct ceph_osd_client *osdc = lreq->osdc; struct ceph_osd *osd; + down_write(&osdc->lock); + linger_register(lreq); + if (lreq->is_watch) { + lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id; + lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id; + } else { + lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id; + } + calc_target(osdc, &lreq->t, NULL, false); osd = lookup_create_osd(osdc, lreq->t.osd, true); link_linger(osd, lreq); send_linger(lreq); + up_write(&osdc->lock); } static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq) @@ -4318,9 +4414,7 @@ static void handle_watch_notify(struct ceph_osd_client *osdc, lreq->notify_id, notify_id); } else if (!completion_done(&lreq->notify_finish_wait)) { struct ceph_msg_data *data = - list_first_entry_or_null(&msg->data, - struct ceph_msg_data, - links); + msg->num_data_items ? &msg->data[0] : NULL; if (data) { if (lreq->preply_pages) { @@ -4476,6 +4570,23 @@ alloc_linger_request(struct ceph_osd_linger_request *lreq) ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid); ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc); + return req; +} + +static struct ceph_osd_request * +alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode) +{ + struct ceph_osd_request *req; + + req = alloc_linger_request(lreq); + if (!req) + return NULL; + + /* + * Pass 0 for cookie because we don't know it yet, it will be + * filled in by linger_submit(). + */ + osd_req_op_watch_init(req, 0, 0, watch_opcode); if (ceph_osdc_alloc_messages(req, GFP_NOIO)) { ceph_osdc_put_request(req); @@ -4514,27 +4625,19 @@ ceph_osdc_watch(struct ceph_osd_client *osdc, lreq->t.flags = CEPH_OSD_FLAG_WRITE; ktime_get_real_ts64(&lreq->mtime); - lreq->reg_req = alloc_linger_request(lreq); + lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH); if (!lreq->reg_req) { ret = -ENOMEM; goto err_put_lreq; } - lreq->ping_req = alloc_linger_request(lreq); + lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING); if (!lreq->ping_req) { ret = -ENOMEM; goto err_put_lreq; } - down_write(&osdc->lock); - linger_register(lreq); /* before osd_req_op_* */ - osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id, - CEPH_OSD_WATCH_OP_WATCH); - osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id, - CEPH_OSD_WATCH_OP_PING); linger_submit(lreq); - up_write(&osdc->lock); - ret = linger_reg_commit_wait(lreq); if (ret) { linger_cancel(lreq); @@ -4599,11 +4702,10 @@ static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which, op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); - pl = kmalloc(sizeof(*pl), GFP_NOIO); + pl = ceph_pagelist_alloc(GFP_NOIO); if (!pl) return -ENOMEM; - ceph_pagelist_init(pl); ret = ceph_pagelist_encode_64(pl, notify_id); ret |= ceph_pagelist_encode_64(pl, cookie); if (payload) { @@ -4641,12 +4743,12 @@ int ceph_osdc_notify_ack(struct ceph_osd_client *osdc, ceph_oloc_copy(&req->r_base_oloc, oloc); req->r_flags = CEPH_OSD_FLAG_READ; - ret = ceph_osdc_alloc_messages(req, GFP_NOIO); + ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload, + payload_len); if (ret) goto out_put_req; - ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload, - payload_len); + ret = ceph_osdc_alloc_messages(req, GFP_NOIO); if (ret) goto out_put_req; @@ -4670,11 +4772,10 @@ static int osd_req_op_notify_init(struct ceph_osd_request *req, int which, op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); op->notify.cookie = cookie; - pl = kmalloc(sizeof(*pl), GFP_NOIO); + pl = ceph_pagelist_alloc(GFP_NOIO); if (!pl) return -ENOMEM; - ceph_pagelist_init(pl); ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */ ret |= ceph_pagelist_encode_32(pl, timeout); ret |= ceph_pagelist_encode_32(pl, payload_len); @@ -4733,29 +4834,30 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc, goto out_put_lreq; } + /* + * Pass 0 for cookie because we don't know it yet, it will be + * filled in by linger_submit(). + */ + ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout, + payload, payload_len); + if (ret) + goto out_put_lreq; + /* for notify_id */ pages = ceph_alloc_page_vector(1, GFP_NOIO); if (IS_ERR(pages)) { ret = PTR_ERR(pages); goto out_put_lreq; } - - down_write(&osdc->lock); - linger_register(lreq); /* before osd_req_op_* */ - ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1, - timeout, payload, payload_len); - if (ret) { - linger_unregister(lreq); - up_write(&osdc->lock); - ceph_release_page_vector(pages, 1); - goto out_put_lreq; - } ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify, response_data), pages, PAGE_SIZE, 0, false, true); - linger_submit(lreq); - up_write(&osdc->lock); + ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO); + if (ret) + goto out_put_lreq; + + linger_submit(lreq); ret = linger_reg_commit_wait(lreq); if (!ret) ret = linger_notify_finish_wait(lreq); @@ -4881,10 +4983,6 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, ceph_oloc_copy(&req->r_base_oloc, oloc); req->r_flags = CEPH_OSD_FLAG_READ; - ret = ceph_osdc_alloc_messages(req, GFP_NOIO); - if (ret) - goto out_put_req; - pages = ceph_alloc_page_vector(1, GFP_NOIO); if (IS_ERR(pages)) { ret = PTR_ERR(pages); @@ -4896,6 +4994,10 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, response_data), pages, PAGE_SIZE, 0, false, true); + ret = ceph_osdc_alloc_messages(req, GFP_NOIO); + if (ret) + goto out_put_req; + ceph_osdc_start_request(osdc, req, false); ret = ceph_osdc_wait_request(osdc, req); if (ret >= 0) { @@ -4958,11 +5060,7 @@ int ceph_osdc_call(struct ceph_osd_client *osdc, ceph_oloc_copy(&req->r_base_oloc, oloc); req->r_flags = flags; - ret = ceph_osdc_alloc_messages(req, GFP_NOIO); - if (ret) - goto out_put_req; - - ret = osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method); + ret = osd_req_op_cls_init(req, 0, class, method); if (ret) goto out_put_req; @@ -4973,6 +5071,10 @@ int ceph_osdc_call(struct ceph_osd_client *osdc, osd_req_op_cls_response_data_pages(req, 0, &resp_page, *resp_len, 0, false, false); + ret = ceph_osdc_alloc_messages(req, GFP_NOIO); + if (ret) + goto out_put_req; + ceph_osdc_start_request(osdc, req, false); ret = ceph_osdc_wait_request(osdc, req); if (ret >= 0) { @@ -5021,11 +5123,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) goto out_map; err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP, - PAGE_SIZE, 10, true, "osd_op"); + PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op"); if (err < 0) goto out_mempool; err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY, - PAGE_SIZE, 10, true, "osd_op_reply"); + PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, + "osd_op_reply"); if (err < 0) goto out_msgpool; @@ -5168,6 +5271,80 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, } EXPORT_SYMBOL(ceph_osdc_writepages); +static int osd_req_op_copy_from_init(struct ceph_osd_request *req, + u64 src_snapid, u64 src_version, + struct ceph_object_id *src_oid, + struct ceph_object_locator *src_oloc, + u32 src_fadvise_flags, + u32 dst_fadvise_flags, + u8 copy_from_flags) +{ + struct ceph_osd_req_op *op; + struct page **pages; + void *p, *end; + + pages = ceph_alloc_page_vector(1, GFP_KERNEL); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM, dst_fadvise_flags); + op->copy_from.snapid = src_snapid; + op->copy_from.src_version = src_version; + op->copy_from.flags = copy_from_flags; + op->copy_from.src_fadvise_flags = src_fadvise_flags; + + p = page_address(pages[0]); + end = p + PAGE_SIZE; + ceph_encode_string(&p, end, src_oid->name, src_oid->name_len); + encode_oloc(&p, end, src_oloc); + op->indata_len = PAGE_SIZE - (end - p); + + ceph_osd_data_pages_init(&op->copy_from.osd_data, pages, + op->indata_len, 0, false, true); + return 0; +} + +int ceph_osdc_copy_from(struct ceph_osd_client *osdc, + u64 src_snapid, u64 src_version, + struct ceph_object_id *src_oid, + struct ceph_object_locator *src_oloc, + u32 src_fadvise_flags, + struct ceph_object_id *dst_oid, + struct ceph_object_locator *dst_oloc, + u32 dst_fadvise_flags, + u8 copy_from_flags) +{ + struct ceph_osd_request *req; + int ret; + + req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->r_flags = CEPH_OSD_FLAG_WRITE; + + ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc); + ceph_oid_copy(&req->r_t.base_oid, dst_oid); + + ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid, + src_oloc, src_fadvise_flags, + dst_fadvise_flags, copy_from_flags); + if (ret) + goto out; + + ret = ceph_osdc_alloc_messages(req, GFP_KERNEL); + if (ret) + goto out; + + ceph_osdc_start_request(osdc, req, false); + ret = ceph_osdc_wait_request(osdc, req); + +out: + ceph_osdc_put_request(req); + return ret; +} +EXPORT_SYMBOL(ceph_osdc_copy_from); + int __init ceph_osdc_setup(void) { size_t size = sizeof(struct ceph_osd_request) + @@ -5295,7 +5472,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr) u32 front_len = le32_to_cpu(hdr->front_len); u32 data_len = le32_to_cpu(hdr->data_len); - m = ceph_msg_new(type, front_len, GFP_NOIO, false); + m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false); if (!m) return NULL; diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c index 2ea0564771d2..65e34f78b05d 100644 --- a/net/ceph/pagelist.c +++ b/net/ceph/pagelist.c @@ -6,6 +6,26 @@ #include <linux/highmem.h> #include <linux/ceph/pagelist.h> +struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags) +{ + struct ceph_pagelist *pl; + + pl = kmalloc(sizeof(*pl), gfp_flags); + if (!pl) + return NULL; + + INIT_LIST_HEAD(&pl->head); + pl->mapped_tail = NULL; + pl->length = 0; + pl->room = 0; + INIT_LIST_HEAD(&pl->free_list); + pl->num_pages_free = 0; + refcount_set(&pl->refcnt, 1); + + return pl; +} +EXPORT_SYMBOL(ceph_pagelist_alloc); + static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl) { if (pl->mapped_tail) { diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index f679c7a7d761..e01274bd5e3e 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3600,6 +3600,11 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, return -EINVAL; } + if (dev->type != ARPHRD_ETHER) { + NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices"); + return -EINVAL; + } + addr = nla_data(tb[NDA_LLADDR]); err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); @@ -3704,6 +3709,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, return -EINVAL; } + if (dev->type != ARPHRD_ETHER) { + NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices"); + return -EINVAL; + } + addr = nla_data(tb[NDA_LLADDR]); err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 4da39446da2d..765b2b32c4a4 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -111,13 +111,10 @@ #ifdef CONFIG_IP_MULTICAST /* Parameter names and values are taken from igmp-v2-06 draft */ -#define IGMP_V1_ROUTER_PRESENT_TIMEOUT (400*HZ) -#define IGMP_V2_ROUTER_PRESENT_TIMEOUT (400*HZ) #define IGMP_V2_UNSOLICITED_REPORT_INTERVAL (10*HZ) #define IGMP_V3_UNSOLICITED_REPORT_INTERVAL (1*HZ) +#define IGMP_QUERY_INTERVAL (125*HZ) #define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ) -#define IGMP_QUERY_ROBUSTNESS_VARIABLE 2 - #define IGMP_INITIAL_REPORT_DELAY (1) @@ -935,13 +932,15 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, max_delay = IGMP_QUERY_RESPONSE_INTERVAL; in_dev->mr_v1_seen = jiffies + - IGMP_V1_ROUTER_PRESENT_TIMEOUT; + (in_dev->mr_qrv * in_dev->mr_qi) + + in_dev->mr_qri; group = 0; } else { /* v2 router present */ max_delay = ih->code*(HZ/IGMP_TIMER_SCALE); in_dev->mr_v2_seen = jiffies + - IGMP_V2_ROUTER_PRESENT_TIMEOUT; + (in_dev->mr_qrv * in_dev->mr_qi) + + in_dev->mr_qri; } /* cancel the interface change timer */ in_dev->mr_ifc_count = 0; @@ -981,8 +980,21 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, if (!max_delay) max_delay = 1; /* can't mod w/ 0 */ in_dev->mr_maxdelay = max_delay; - if (ih3->qrv) - in_dev->mr_qrv = ih3->qrv; + + /* RFC3376, 4.1.6. QRV and 4.1.7. QQIC, when the most recently + * received value was zero, use the default or statically + * configured value. + */ + in_dev->mr_qrv = ih3->qrv ?: net->ipv4.sysctl_igmp_qrv; + in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL; + + /* RFC3376, 8.3. Query Response Interval: + * The number of seconds represented by the [Query Response + * Interval] must be less than the [Query Interval]. + */ + if (in_dev->mr_qri >= in_dev->mr_qi) + in_dev->mr_qri = (in_dev->mr_qi/HZ - 1)*HZ; + if (!group) { /* general query */ if (ih3->nsrcs) return true; /* no sources allowed */ @@ -1723,18 +1735,30 @@ void ip_mc_down(struct in_device *in_dev) ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); } -void ip_mc_init_dev(struct in_device *in_dev) -{ #ifdef CONFIG_IP_MULTICAST +static void ip_mc_reset(struct in_device *in_dev) +{ struct net *net = dev_net(in_dev->dev); + + in_dev->mr_qi = IGMP_QUERY_INTERVAL; + in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL; + in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv; +} +#else +static void ip_mc_reset(struct in_device *in_dev) +{ +} #endif + +void ip_mc_init_dev(struct in_device *in_dev) +{ ASSERT_RTNL(); #ifdef CONFIG_IP_MULTICAST timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0); timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0); - in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv; #endif + ip_mc_reset(in_dev); spin_lock_init(&in_dev->mc_tomb_lock); } @@ -1744,15 +1768,10 @@ void ip_mc_init_dev(struct in_device *in_dev) void ip_mc_up(struct in_device *in_dev) { struct ip_mc_list *pmc; -#ifdef CONFIG_IP_MULTICAST - struct net *net = dev_net(in_dev->dev); -#endif ASSERT_RTNL(); -#ifdef CONFIG_IP_MULTICAST - in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv; -#endif + ip_mc_reset(in_dev); ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); for_each_pmc_rtnl(in_dev, pmc) { diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index b7918d4caa30..3b45fe530f91 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -145,6 +145,7 @@ msg_bytes_ready: ret = err; goto out; } + copied = -EAGAIN; } ret = copied; out: diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index d4020c5e831d..2526be6b3d90 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1616,7 +1616,7 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) EnterFunction(7); /* Receive a packet */ - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, buflen); + iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, buflen); len = sock_recvmsg(sock, &msg, MSG_DONTWAIT); if (len < 0) return len; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index a70097ecf33c..865ecef68196 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -3030,7 +3030,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, * is already present */ if (mac_proto != MAC_PROTO_NONE) return -EINVAL; - mac_proto = MAC_PROTO_NONE; + mac_proto = MAC_PROTO_ETHERNET; break; case OVS_ACTION_ATTR_POP_ETH: @@ -3038,7 +3038,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, return -EINVAL; if (vlan_tci & htons(VLAN_TAG_PRESENT)) return -EINVAL; - mac_proto = MAC_PROTO_ETHERNET; + mac_proto = MAC_PROTO_NONE; break; case OVS_ACTION_ATTR_PUSH_NSH: diff --git a/net/sctp/associola.c b/net/sctp/associola.c index a827a1f562bf..6a28b96e779e 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -499,8 +499,9 @@ void sctp_assoc_set_primary(struct sctp_association *asoc, void sctp_assoc_rm_peer(struct sctp_association *asoc, struct sctp_transport *peer) { - struct list_head *pos; - struct sctp_transport *transport; + struct sctp_transport *transport; + struct list_head *pos; + struct sctp_chunk *ch; pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc, &peer->ipaddr.sa); @@ -564,7 +565,6 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, */ if (!list_empty(&peer->transmitted)) { struct sctp_transport *active = asoc->peer.active_path; - struct sctp_chunk *ch; /* Reset the transport of each chunk on this list */ list_for_each_entry(ch, &peer->transmitted, @@ -586,6 +586,10 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, sctp_transport_hold(active); } + list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) + if (ch->transport == peer) + ch->transport = NULL; + asoc->peer.transport_count--; sctp_transport_free(peer); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index fc0386e8ff23..739f3e50120d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -7083,14 +7083,15 @@ static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len, } policy = params.sprstat_policy; - if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL))) + if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)) || + ((policy & SCTP_PR_SCTP_ALL) && (policy & SCTP_PR_SCTP_MASK))) goto out; asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); if (!asoc) goto out; - if (policy & SCTP_PR_SCTP_ALL) { + if (policy == SCTP_PR_SCTP_ALL) { params.sprstat_abandoned_unsent = 0; params.sprstat_abandoned_sent = 0; for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { @@ -7142,7 +7143,8 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len, } policy = params.sprstat_policy; - if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL))) + if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)) || + ((policy & SCTP_PR_SCTP_ALL) && (policy & SCTP_PR_SCTP_MASK))) goto out; asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 52241d679cc9..89c3a8c7859a 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -286,7 +286,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, */ krflags = MSG_PEEK | MSG_WAITALL; smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME; - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, + iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, sizeof(struct smc_clc_msg_hdr)); len = sock_recvmsg(smc->clcsock, &msg, krflags); if (signal_pending(current)) { @@ -325,7 +325,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, /* receive the complete CLC message */ memset(&msg, 0, sizeof(struct msghdr)); - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen); + iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, datlen); krflags = MSG_WAITALL; len = sock_recvmsg(smc->clcsock, &msg, krflags); if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) { diff --git a/net/socket.c b/net/socket.c index 99c96851469f..593826e11a53 100644 --- a/net/socket.c +++ b/net/socket.c @@ -635,7 +635,7 @@ EXPORT_SYMBOL(sock_sendmsg); int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { - iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size); + iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size); return sock_sendmsg(sock, msg); } EXPORT_SYMBOL(kernel_sendmsg); @@ -648,7 +648,7 @@ int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, if (!sock->ops->sendmsg_locked) return sock_no_sendmsg_locked(sk, msg, size); - iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size); + iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size); return sock->ops->sendmsg_locked(sk, msg, msg_data_left(msg)); } @@ -823,7 +823,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, mm_segment_t oldfs = get_fs(); int result; - iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, vec, num, size); + iov_iter_kvec(&msg->msg_iter, READ, vec, num, size); set_fs(KERNEL_DS); result = sock_recvmsg(sock, msg, flags); set_fs(oldfs); diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 7f0424dfa8f6..eab71fc7af3e 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -274,6 +274,7 @@ out_err: static int gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) { + u32 seq_send; int tmp; p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); @@ -315,9 +316,10 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); if (IS_ERR(p)) goto out_err; - p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send)); + p = simple_get_bytes(p, end, &seq_send, sizeof(seq_send)); if (IS_ERR(p)) goto out_err; + atomic_set(&ctx->seq_send, seq_send); p = simple_get_netobj(p, end, &ctx->mech_used); if (IS_ERR(p)) goto out_err; @@ -607,6 +609,7 @@ static int gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, gfp_t gfp_mask) { + u64 seq_send64; int keylen; p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags)); @@ -617,14 +620,15 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); if (IS_ERR(p)) goto out_err; - p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64)); + p = simple_get_bytes(p, end, &seq_send64, sizeof(seq_send64)); if (IS_ERR(p)) goto out_err; + atomic64_set(&ctx->seq_send64, seq_send64); /* set seq_send for use by "older" enctypes */ - ctx->seq_send = ctx->seq_send64; - if (ctx->seq_send64 != ctx->seq_send) { - dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, - (unsigned long)ctx->seq_send64, ctx->seq_send); + atomic_set(&ctx->seq_send, seq_send64); + if (seq_send64 != atomic_read(&ctx->seq_send)) { + dprintk("%s: seq_send64 %llx, seq_send %x overflow?\n", __func__, + seq_send64, atomic_read(&ctx->seq_send)); p = ERR_PTR(-EINVAL); goto out_err; } diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index b4adeb06660b..48fe4a591b54 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -123,30 +123,6 @@ setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token) return krb5_hdr; } -u32 -gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx) -{ - u32 old, seq_send = READ_ONCE(ctx->seq_send); - - do { - old = seq_send; - seq_send = cmpxchg(&ctx->seq_send, old, old + 1); - } while (old != seq_send); - return seq_send; -} - -u64 -gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx) -{ - u64 old, seq_send = READ_ONCE(ctx->seq_send); - - do { - old = seq_send; - seq_send = cmpxchg64(&ctx->seq_send64, old, old + 1); - } while (old != seq_send); - return seq_send; -} - static u32 gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, struct xdr_netobj *token) @@ -177,7 +153,7 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); - seq_send = gss_seq_send_fetch_and_inc(ctx); + seq_send = atomic_fetch_inc(&ctx->seq_send); if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff, seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)) @@ -205,7 +181,7 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, /* Set up the sequence number. Now 64-bits in clear * text and w/o direction indicator */ - seq_send_be64 = cpu_to_be64(gss_seq_send64_fetch_and_inc(ctx)); + seq_send_be64 = cpu_to_be64(atomic64_fetch_inc(&ctx->seq_send64)); memcpy(krb5_hdr + 8, (char *) &seq_send_be64, 8); if (ctx->initiate) { diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 962fa84e6db1..5cdde6cb703a 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -228,7 +228,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); - seq_send = gss_seq_send_fetch_and_inc(kctx); + seq_send = atomic_fetch_inc(&kctx->seq_send); /* XXX would probably be more efficient to compute checksum * and encrypt at the same time: */ @@ -475,7 +475,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, *be16ptr++ = 0; be64ptr = (__be64 *)be16ptr; - *be64ptr = cpu_to_be64(gss_seq_send64_fetch_and_inc(kctx)); + *be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64)); err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages); if (err) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 3b525accaa68..986f3ed7d1a2 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -336,7 +336,7 @@ static ssize_t svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, rqstp->rq_xprt_hlen = 0; clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, nr, buflen); + iov_iter_kvec(&msg.msg_iter, READ, iov, nr, buflen); if (base != 0) { iov_iter_advance(&msg.msg_iter, base); buflen -= base; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 1b51e04d3566..ae77c71c1f64 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -361,7 +361,7 @@ static ssize_t xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags, struct kvec *kvec, size_t count, size_t seek) { - iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, kvec, 1, count); + iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count); return xs_sock_recvmsg(sock, msg, flags, seek); } @@ -370,7 +370,7 @@ xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags, struct bio_vec *bvec, unsigned long nr, size_t count, size_t seek) { - iov_iter_bvec(&msg->msg_iter, READ | ITER_BVEC, bvec, nr, count); + iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count); return xs_sock_recvmsg(sock, msg, flags, seek); } diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 4bdea0057171..efb16f69bd2c 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -394,7 +394,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) iov.iov_base = &s; iov.iov_len = sizeof(s); msg.msg_name = NULL; - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len); + iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, iov.iov_len); ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); if (ret == -EWOULDBLOCK) return -EWOULDBLOCK; diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 276edbc04f38..d753e362d2d9 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -489,7 +489,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page, iov.iov_base = kaddr + offset; iov.iov_len = size; - iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size); + iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size); rc = tls_push_data(sk, &msg_iter, size, flags, TLS_RECORD_TYPE_DATA); kunmap(page); @@ -538,7 +538,7 @@ static int tls_device_push_pending_record(struct sock *sk, int flags) { struct iov_iter msg_iter; - iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, NULL, 0, 0); + iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0); return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA); } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 5cd88ba8acd1..7b1af8b59cd2 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -799,7 +799,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send); bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; unsigned char record_type = TLS_RECORD_TYPE_DATA; - bool is_kvec = msg->msg_iter.type & ITER_KVEC; + bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); bool eor = !(msg->msg_flags & MSG_MORE); size_t try_to_copy, copied = 0; struct sk_msg *msg_pl, *msg_en; @@ -1457,7 +1457,7 @@ int tls_sw_recvmsg(struct sock *sk, bool cmsg = false; int target, err = 0; long timeo; - bool is_kvec = msg->msg_iter.type & ITER_KVEC; + bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); int num_async = 0; flags |= nonblock; diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig index 4a9ee2d83158..140270a13d54 100644 --- a/net/xfrm/Kconfig +++ b/net/xfrm/Kconfig @@ -8,7 +8,6 @@ config XFRM config XFRM_OFFLOAD bool - depends on XFRM config XFRM_ALGO tristate diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index b669262682c9..dc4a9f1fb941 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2077,10 +2077,8 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen struct xfrm_mgr *km; struct xfrm_policy *pol = NULL; -#ifdef CONFIG_COMPAT if (in_compat_syscall()) return -EOPNOTSUPP; -#endif if (!optval && !optlen) { xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index ca7a207b81a9..c9a84e22f5d5 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2621,10 +2621,8 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, const struct xfrm_link *link; int type, err; -#ifdef CONFIG_COMPAT if (in_compat_syscall()) return -EOPNOTSUPP; -#endif type = nlh->nlmsg_type; if (type > XFRM_MSG_MAX) diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index 2535c3677c7b..ca7960adf5a3 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c @@ -71,11 +71,19 @@ #define MBOCHS_NAME "mbochs" #define MBOCHS_CLASS_NAME "mbochs" +#define MBOCHS_EDID_REGION_INDEX VFIO_PCI_NUM_REGIONS +#define MBOCHS_NUM_REGIONS (MBOCHS_EDID_REGION_INDEX+1) + #define MBOCHS_CONFIG_SPACE_SIZE 0xff #define MBOCHS_MMIO_BAR_OFFSET PAGE_SIZE #define MBOCHS_MMIO_BAR_SIZE PAGE_SIZE -#define MBOCHS_MEMORY_BAR_OFFSET (MBOCHS_MMIO_BAR_OFFSET + \ +#define MBOCHS_EDID_OFFSET (MBOCHS_MMIO_BAR_OFFSET + \ MBOCHS_MMIO_BAR_SIZE) +#define MBOCHS_EDID_SIZE PAGE_SIZE +#define MBOCHS_MEMORY_BAR_OFFSET (MBOCHS_EDID_OFFSET + \ + MBOCHS_EDID_SIZE) + +#define MBOCHS_EDID_BLOB_OFFSET (MBOCHS_EDID_SIZE/2) #define STORE_LE16(addr, val) (*(u16 *)addr = val) #define STORE_LE32(addr, val) (*(u32 *)addr = val) @@ -95,16 +103,24 @@ MODULE_PARM_DESC(mem, "megabytes available to " MBOCHS_NAME " devices"); static const struct mbochs_type { const char *name; u32 mbytes; + u32 max_x; + u32 max_y; } mbochs_types[] = { { .name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_1, .mbytes = 4, + .max_x = 800, + .max_y = 600, }, { .name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_2, .mbytes = 16, + .max_x = 1920, + .max_y = 1440, }, { .name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_3, .mbytes = 64, + .max_x = 0, + .max_y = 0, }, }; @@ -115,6 +131,11 @@ static struct cdev mbochs_cdev; static struct device mbochs_dev; static int mbochs_used_mbytes; +struct vfio_region_info_ext { + struct vfio_region_info base; + struct vfio_region_info_cap_type type; +}; + struct mbochs_mode { u32 drm_format; u32 bytepp; @@ -144,13 +165,14 @@ struct mdev_state { u32 memory_bar_mask; struct mutex ops_lock; struct mdev_device *mdev; - struct vfio_device_info dev_info; const struct mbochs_type *type; u16 vbe[VBE_DISPI_INDEX_COUNT]; u64 memsize; struct page **pages; pgoff_t pagecount; + struct vfio_region_gfx_edid edid_regs; + u8 edid_blob[0x400]; struct list_head dmabufs; u32 active_id; @@ -342,10 +364,20 @@ static void handle_mmio_read(struct mdev_state *mdev_state, u16 offset, char *buf, u32 count) { struct device *dev = mdev_dev(mdev_state->mdev); + struct vfio_region_gfx_edid *edid; u16 reg16 = 0; int index; switch (offset) { + case 0x000 ... 0x3ff: /* edid block */ + edid = &mdev_state->edid_regs; + if (edid->link_state != VFIO_DEVICE_GFX_LINK_STATE_UP || + offset >= edid->edid_size) { + memset(buf, 0, count); + break; + } + memcpy(buf, mdev_state->edid_blob + offset, count); + break; case 0x500 ... 0x515: /* bochs dispi interface */ if (count != 2) goto unhandled; @@ -365,6 +397,44 @@ unhandled: } } +static void handle_edid_regs(struct mdev_state *mdev_state, u16 offset, + char *buf, u32 count, bool is_write) +{ + char *regs = (void *)&mdev_state->edid_regs; + + if (offset + count > sizeof(mdev_state->edid_regs)) + return; + if (count != 4) + return; + if (offset % 4) + return; + + if (is_write) { + switch (offset) { + case offsetof(struct vfio_region_gfx_edid, link_state): + case offsetof(struct vfio_region_gfx_edid, edid_size): + memcpy(regs + offset, buf, count); + break; + default: + /* read-only regs */ + break; + } + } else { + memcpy(buf, regs + offset, count); + } +} + +static void handle_edid_blob(struct mdev_state *mdev_state, u16 offset, + char *buf, u32 count, bool is_write) +{ + if (offset + count > mdev_state->edid_regs.edid_max_size) + return; + if (is_write) + memcpy(mdev_state->edid_blob + offset, buf, count); + else + memcpy(buf, mdev_state->edid_blob + offset, count); +} + static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count, loff_t pos, bool is_write) { @@ -384,13 +454,25 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count, memcpy(buf, (mdev_state->vconfig + pos), count); } else if (pos >= MBOCHS_MMIO_BAR_OFFSET && - pos + count <= MBOCHS_MEMORY_BAR_OFFSET) { + pos + count <= (MBOCHS_MMIO_BAR_OFFSET + + MBOCHS_MMIO_BAR_SIZE)) { pos -= MBOCHS_MMIO_BAR_OFFSET; if (is_write) handle_mmio_write(mdev_state, pos, buf, count); else handle_mmio_read(mdev_state, pos, buf, count); + } else if (pos >= MBOCHS_EDID_OFFSET && + pos + count <= (MBOCHS_EDID_OFFSET + + MBOCHS_EDID_SIZE)) { + pos -= MBOCHS_EDID_OFFSET; + if (pos < MBOCHS_EDID_BLOB_OFFSET) { + handle_edid_regs(mdev_state, pos, buf, count, is_write); + } else { + pos -= MBOCHS_EDID_BLOB_OFFSET; + handle_edid_blob(mdev_state, pos, buf, count, is_write); + } + } else if (pos >= MBOCHS_MEMORY_BAR_OFFSET && pos + count <= MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) { @@ -471,6 +553,10 @@ static int mbochs_create(struct kobject *kobj, struct mdev_device *mdev) mdev_state->next_id = 1; mdev_state->type = type; + mdev_state->edid_regs.max_xres = type->max_x; + mdev_state->edid_regs.max_yres = type->max_y; + mdev_state->edid_regs.edid_offset = MBOCHS_EDID_BLOB_OFFSET; + mdev_state->edid_regs.edid_max_size = sizeof(mdev_state->edid_blob); mbochs_create_config_space(mdev_state); mbochs_reset(mdev); @@ -932,16 +1018,16 @@ static int mbochs_dmabuf_export(struct mbochs_dmabuf *dmabuf) } static int mbochs_get_region_info(struct mdev_device *mdev, - struct vfio_region_info *region_info, - u16 *cap_type_id, void **cap_type) + struct vfio_region_info_ext *ext) { + struct vfio_region_info *region_info = &ext->base; struct mdev_state *mdev_state; mdev_state = mdev_get_drvdata(mdev); if (!mdev_state) return -EINVAL; - if (region_info->index >= VFIO_PCI_NUM_REGIONS) + if (region_info->index >= MBOCHS_NUM_REGIONS) return -EINVAL; switch (region_info->index) { @@ -964,6 +1050,20 @@ static int mbochs_get_region_info(struct mdev_device *mdev, region_info->flags = (VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_WRITE); break; + case MBOCHS_EDID_REGION_INDEX: + ext->base.argsz = sizeof(*ext); + ext->base.offset = MBOCHS_EDID_OFFSET; + ext->base.size = MBOCHS_EDID_SIZE; + ext->base.flags = (VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE | + VFIO_REGION_INFO_FLAG_CAPS); + ext->base.cap_offset = offsetof(typeof(*ext), type); + ext->type.header.id = VFIO_REGION_INFO_CAP_TYPE; + ext->type.header.version = 1; + ext->type.header.next = 0; + ext->type.type = VFIO_REGION_TYPE_GFX; + ext->type.subtype = VFIO_REGION_SUBTYPE_GFX_EDID; + break; default: region_info->size = 0; region_info->offset = 0; @@ -984,7 +1084,7 @@ static int mbochs_get_device_info(struct mdev_device *mdev, struct vfio_device_info *dev_info) { dev_info->flags = VFIO_DEVICE_FLAGS_PCI; - dev_info->num_regions = VFIO_PCI_NUM_REGIONS; + dev_info->num_regions = MBOCHS_NUM_REGIONS; dev_info->num_irqs = VFIO_PCI_NUM_IRQS; return 0; } @@ -1084,7 +1184,7 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned long arg) { int ret = 0; - unsigned long minsz; + unsigned long minsz, outsz; struct mdev_state *mdev_state; mdev_state = mdev_get_drvdata(mdev); @@ -1106,8 +1206,6 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd, if (ret) return ret; - memcpy(&mdev_state->dev_info, &info, sizeof(info)); - if (copy_to_user((void __user *)arg, &info, minsz)) return -EFAULT; @@ -1115,24 +1213,24 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd, } case VFIO_DEVICE_GET_REGION_INFO: { - struct vfio_region_info info; - u16 cap_type_id = 0; - void *cap_type = NULL; + struct vfio_region_info_ext info; - minsz = offsetofend(struct vfio_region_info, offset); + minsz = offsetofend(typeof(info), base.offset); if (copy_from_user(&info, (void __user *)arg, minsz)) return -EFAULT; - if (info.argsz < minsz) + outsz = info.base.argsz; + if (outsz < minsz) + return -EINVAL; + if (outsz > sizeof(info)) return -EINVAL; - ret = mbochs_get_region_info(mdev, &info, &cap_type_id, - &cap_type); + ret = mbochs_get_region_info(mdev, &info); if (ret) return ret; - if (copy_to_user((void __user *)arg, &info, minsz)) + if (copy_to_user((void __user *)arg, &info, outsz)) return -EFAULT; return 0; @@ -1148,7 +1246,7 @@ static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd, return -EFAULT; if ((info.argsz < minsz) || - (info.index >= mdev_state->dev_info.num_irqs)) + (info.index >= VFIO_PCI_NUM_IRQS)) return -EINVAL; ret = mbochs_get_irq_info(mdev, &info); diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index ca21a35fa244..bb015551c2d9 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -140,17 +140,9 @@ cc-option-yn = $(call try-run,\ cc-disable-warning = $(call try-run,\ $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) -# cc-name -# Expands to either gcc or clang -cc-name = $(shell $(CC) -v 2>&1 | grep -q "clang version" && echo clang || echo gcc) - # cc-version cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC)) -# cc-fullversion -cc-fullversion = $(shell $(CONFIG_SHELL) \ - $(srctree)/scripts/gcc-version.sh -p $(CC)) - # cc-ifversion # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 24b2fb1d1297..768306add591 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -29,6 +29,7 @@ warning-1 += $(call cc-option, -Wmissing-include-dirs) warning-1 += $(call cc-option, -Wunused-but-set-variable) warning-1 += $(call cc-option, -Wunused-const-variable) warning-1 += $(call cc-option, -Wpacked-not-aligned) +warning-1 += $(call cc-option, -Wstringop-truncation) warning-1 += $(call cc-disable-warning, missing-field-initializers) warning-1 += $(call cc-disable-warning, sign-compare) @@ -64,7 +65,7 @@ endif KBUILD_CFLAGS += $(warning) else -ifeq ($(cc-name),clang) +ifdef CONFIG_CC_IS_CLANG KBUILD_CFLAGS += $(call cc-disable-warning, initializer-overrides) KBUILD_CFLAGS += $(call cc-disable-warning, unused-value) KBUILD_CFLAGS += $(call cc-disable-warning, format) diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index 0a482f341576..46c5c6809806 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -26,6 +26,16 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) \ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE) \ += -fplugin-arg-randomize_layout_plugin-performance-mode +gcc-plugin-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak_plugin.so +gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ + += -DSTACKLEAK_PLUGIN +gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ + += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_STACKLEAK_TRACK_MIN_SIZE) +ifdef CONFIG_GCC_PLUGIN_STACKLEAK + DISABLE_STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-disable +endif +export DISABLE_STACKLEAK_PLUGIN + # All the plugin CFLAGS are collected here in case a build target needs to # filter them out of the KBUILD_CFLAGS. GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y)) diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig index cb0c889e13aa..0d5c799688f0 100644 --- a/scripts/gcc-plugins/Kconfig +++ b/scripts/gcc-plugins/Kconfig @@ -139,4 +139,55 @@ config GCC_PLUGIN_RANDSTRUCT_PERFORMANCE in structures. This reduces the performance hit of RANDSTRUCT at the cost of weakened randomization. +config GCC_PLUGIN_STACKLEAK + bool "Erase the kernel stack before returning from syscalls" + depends on GCC_PLUGINS + depends on HAVE_ARCH_STACKLEAK + help + This option makes the kernel erase the kernel stack before + returning from system calls. That reduces the information which + kernel stack leak bugs can reveal and blocks some uninitialized + stack variable attacks. + + The tradeoff is the performance impact: on a single CPU system kernel + compilation sees a 1% slowdown, other systems and workloads may vary + and you are advised to test this feature on your expected workload + before deploying it. + + This plugin was ported from grsecurity/PaX. More information at: + * https://grsecurity.net/ + * https://pax.grsecurity.net/ + +config STACKLEAK_TRACK_MIN_SIZE + int "Minimum stack frame size of functions tracked by STACKLEAK" + default 100 + range 0 4096 + depends on GCC_PLUGIN_STACKLEAK + help + The STACKLEAK gcc plugin instruments the kernel code for tracking + the lowest border of the kernel stack (and for some other purposes). + It inserts the stackleak_track_stack() call for the functions with + a stack frame size greater than or equal to this parameter. + If unsure, leave the default value 100. + +config STACKLEAK_METRICS + bool "Show STACKLEAK metrics in the /proc file system" + depends on GCC_PLUGIN_STACKLEAK + depends on PROC_FS + help + If this is set, STACKLEAK metrics for every task are available in + the /proc file system. In particular, /proc/<pid>/stack_depth + shows the maximum kernel stack consumption for the current and + previous syscalls. Although this information is not precise, it + can be useful for estimating the STACKLEAK performance impact for + your workloads. + +config STACKLEAK_RUNTIME_DISABLE + bool "Allow runtime disabling of kernel stack erasing" + depends on GCC_PLUGIN_STACKLEAK + help + This option provides 'stack_erasing' sysctl, which can be used in + runtime to control kernel stack erasing for kernels built with + CONFIG_GCC_PLUGIN_STACKLEAK. + endif diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c new file mode 100644 index 000000000000..2f48da98b5d4 --- /dev/null +++ b/scripts/gcc-plugins/stackleak_plugin.c @@ -0,0 +1,427 @@ +/* + * Copyright 2011-2017 by the PaX Team <pageexec@freemail.hu> + * Modified by Alexander Popov <alex.popov@linux.com> + * Licensed under the GPL v2 + * + * Note: the choice of the license means that the compilation process is + * NOT 'eligible' as defined by gcc's library exception to the GPL v3, + * but for the kernel it doesn't matter since it doesn't link against + * any of the gcc libraries + * + * This gcc plugin is needed for tracking the lowest border of the kernel stack. + * It instruments the kernel code inserting stackleak_track_stack() calls: + * - after alloca(); + * - for the functions with a stack frame size greater than or equal + * to the "track-min-size" plugin parameter. + * + * This plugin is ported from grsecurity/PaX. For more information see: + * https://grsecurity.net/ + * https://pax.grsecurity.net/ + * + * Debugging: + * - use fprintf() to stderr, debug_generic_expr(), debug_gimple_stmt(), + * print_rtl() and print_simple_rtl(); + * - add "-fdump-tree-all -fdump-rtl-all" to the plugin CFLAGS in + * Makefile.gcc-plugins to see the verbose dumps of the gcc passes; + * - use gcc -E to understand the preprocessing shenanigans; + * - use gcc with enabled CFG/GIMPLE/SSA verification (--enable-checking). + */ + +#include "gcc-common.h" + +__visible int plugin_is_GPL_compatible; + +static int track_frame_size = -1; +static const char track_function[] = "stackleak_track_stack"; + +/* + * Mark these global variables (roots) for gcc garbage collector since + * they point to the garbage-collected memory. + */ +static GTY(()) tree track_function_decl; + +static struct plugin_info stackleak_plugin_info = { + .version = "201707101337", + .help = "track-min-size=nn\ttrack stack for functions with a stack frame size >= nn bytes\n" + "disable\t\tdo not activate the plugin\n" +}; + +static void stackleak_add_track_stack(gimple_stmt_iterator *gsi, bool after) +{ + gimple stmt; + gcall *stackleak_track_stack; + cgraph_node_ptr node; + int frequency; + basic_block bb; + + /* Insert call to void stackleak_track_stack(void) */ + stmt = gimple_build_call(track_function_decl, 0); + stackleak_track_stack = as_a_gcall(stmt); + if (after) { + gsi_insert_after(gsi, stackleak_track_stack, + GSI_CONTINUE_LINKING); + } else { + gsi_insert_before(gsi, stackleak_track_stack, GSI_SAME_STMT); + } + + /* Update the cgraph */ + bb = gimple_bb(stackleak_track_stack); + node = cgraph_get_create_node(track_function_decl); + gcc_assert(node); + frequency = compute_call_stmt_bb_frequency(current_function_decl, bb); + cgraph_create_edge(cgraph_get_node(current_function_decl), node, + stackleak_track_stack, bb->count, frequency); +} + +static bool is_alloca(gimple stmt) +{ + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA)) + return true; + +#if BUILDING_GCC_VERSION >= 4007 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) + return true; +#endif + + return false; +} + +/* + * Work with the GIMPLE representation of the code. Insert the + * stackleak_track_stack() call after alloca() and into the beginning + * of the function if it is not instrumented. + */ +static unsigned int stackleak_instrument_execute(void) +{ + basic_block bb, entry_bb; + bool prologue_instrumented = false, is_leaf = true; + gimple_stmt_iterator gsi; + + /* + * ENTRY_BLOCK_PTR is a basic block which represents possible entry + * point of a function. This block does not contain any code and + * has a CFG edge to its successor. + */ + gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun))); + entry_bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun)); + + /* + * Loop through the GIMPLE statements in each of cfun basic blocks. + * cfun is a global variable which represents the function that is + * currently processed. + */ + FOR_EACH_BB_FN(bb, cfun) { + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { + gimple stmt; + + stmt = gsi_stmt(gsi); + + /* Leaf function is a function which makes no calls */ + if (is_gimple_call(stmt)) + is_leaf = false; + + if (!is_alloca(stmt)) + continue; + + /* Insert stackleak_track_stack() call after alloca() */ + stackleak_add_track_stack(&gsi, true); + if (bb == entry_bb) + prologue_instrumented = true; + } + } + + if (prologue_instrumented) + return 0; + + /* + * Special cases to skip the instrumentation. + * + * Taking the address of static inline functions materializes them, + * but we mustn't instrument some of them as the resulting stack + * alignment required by the function call ABI will break other + * assumptions regarding the expected (but not otherwise enforced) + * register clobbering ABI. + * + * Case in point: native_save_fl on amd64 when optimized for size + * clobbers rdx if it were instrumented here. + * + * TODO: any more special cases? + */ + if (is_leaf && + !TREE_PUBLIC(current_function_decl) && + DECL_DECLARED_INLINE_P(current_function_decl)) { + return 0; + } + + if (is_leaf && + !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), + "_paravirt_", 10)) { + return 0; + } + + /* Insert stackleak_track_stack() call at the function beginning */ + bb = entry_bb; + if (!single_pred_p(bb)) { + /* gcc_assert(bb_loop_depth(bb) || + (bb->flags & BB_IRREDUCIBLE_LOOP)); */ + split_edge(single_succ_edge(ENTRY_BLOCK_PTR_FOR_FN(cfun))); + gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun))); + bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun)); + } + gsi = gsi_after_labels(bb); + stackleak_add_track_stack(&gsi, false); + + return 0; +} + +static bool large_stack_frame(void) +{ +#if BUILDING_GCC_VERSION >= 8000 + return maybe_ge(get_frame_size(), track_frame_size); +#else + return (get_frame_size() >= track_frame_size); +#endif +} + +/* + * Work with the RTL representation of the code. + * Remove the unneeded stackleak_track_stack() calls from the functions + * which don't call alloca() and don't have a large enough stack frame size. + */ +static unsigned int stackleak_cleanup_execute(void) +{ + rtx_insn *insn, *next; + + if (cfun->calls_alloca) + return 0; + + if (large_stack_frame()) + return 0; + + /* + * Find stackleak_track_stack() calls. Loop through the chain of insns, + * which is an RTL representation of the code for a function. + * + * The example of a matching insn: + * (call_insn 8 4 10 2 (call (mem (symbol_ref ("stackleak_track_stack") + * [flags 0x41] <function_decl 0x7f7cd3302a80 stackleak_track_stack>) + * [0 stackleak_track_stack S1 A8]) (0)) 675 {*call} (expr_list + * (symbol_ref ("stackleak_track_stack") [flags 0x41] <function_decl + * 0x7f7cd3302a80 stackleak_track_stack>) (expr_list (0) (nil))) (nil)) + */ + for (insn = get_insns(); insn; insn = next) { + rtx body; + + next = NEXT_INSN(insn); + + /* Check the expression code of the insn */ + if (!CALL_P(insn)) + continue; + + /* + * Check the expression code of the insn body, which is an RTL + * Expression (RTX) describing the side effect performed by + * that insn. + */ + body = PATTERN(insn); + + if (GET_CODE(body) == PARALLEL) + body = XVECEXP(body, 0, 0); + + if (GET_CODE(body) != CALL) + continue; + + /* + * Check the first operand of the call expression. It should + * be a mem RTX describing the needed subroutine with a + * symbol_ref RTX. + */ + body = XEXP(body, 0); + if (GET_CODE(body) != MEM) + continue; + + body = XEXP(body, 0); + if (GET_CODE(body) != SYMBOL_REF) + continue; + + if (SYMBOL_REF_DECL(body) != track_function_decl) + continue; + + /* Delete the stackleak_track_stack() call */ + delete_insn_and_edges(insn); +#if BUILDING_GCC_VERSION >= 4007 && BUILDING_GCC_VERSION < 8000 + if (GET_CODE(next) == NOTE && + NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) { + insn = next; + next = NEXT_INSN(insn); + delete_insn_and_edges(insn); + } +#endif + } + + return 0; +} + +static bool stackleak_gate(void) +{ + tree section; + + section = lookup_attribute("section", + DECL_ATTRIBUTES(current_function_decl)); + if (section && TREE_VALUE(section)) { + section = TREE_VALUE(TREE_VALUE(section)); + + if (!strncmp(TREE_STRING_POINTER(section), ".init.text", 10)) + return false; + if (!strncmp(TREE_STRING_POINTER(section), ".devinit.text", 13)) + return false; + if (!strncmp(TREE_STRING_POINTER(section), ".cpuinit.text", 13)) + return false; + if (!strncmp(TREE_STRING_POINTER(section), ".meminit.text", 13)) + return false; + } + + return track_frame_size >= 0; +} + +/* Build the function declaration for stackleak_track_stack() */ +static void stackleak_start_unit(void *gcc_data __unused, + void *user_data __unused) +{ + tree fntype; + + /* void stackleak_track_stack(void) */ + fntype = build_function_type_list(void_type_node, NULL_TREE); + track_function_decl = build_fn_decl(track_function, fntype); + DECL_ASSEMBLER_NAME(track_function_decl); /* for LTO */ + TREE_PUBLIC(track_function_decl) = 1; + TREE_USED(track_function_decl) = 1; + DECL_EXTERNAL(track_function_decl) = 1; + DECL_ARTIFICIAL(track_function_decl) = 1; + DECL_PRESERVE_P(track_function_decl) = 1; +} + +/* + * Pass gate function is a predicate function that gets executed before the + * corresponding pass. If the return value is 'true' the pass gets executed, + * otherwise, it is skipped. + */ +static bool stackleak_instrument_gate(void) +{ + return stackleak_gate(); +} + +#define PASS_NAME stackleak_instrument +#define PROPERTIES_REQUIRED PROP_gimple_leh | PROP_cfg +#define TODO_FLAGS_START TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts +#define TODO_FLAGS_FINISH TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func \ + | TODO_update_ssa | TODO_rebuild_cgraph_edges +#include "gcc-generate-gimple-pass.h" + +static bool stackleak_cleanup_gate(void) +{ + return stackleak_gate(); +} + +#define PASS_NAME stackleak_cleanup +#define TODO_FLAGS_FINISH TODO_dump_func +#include "gcc-generate-rtl-pass.h" + +/* + * Every gcc plugin exports a plugin_init() function that is called right + * after the plugin is loaded. This function is responsible for registering + * the plugin callbacks and doing other required initialization. + */ +__visible int plugin_init(struct plugin_name_args *plugin_info, + struct plugin_gcc_version *version) +{ + const char * const plugin_name = plugin_info->base_name; + const int argc = plugin_info->argc; + const struct plugin_argument * const argv = plugin_info->argv; + int i = 0; + + /* Extra GGC root tables describing our GTY-ed data */ + static const struct ggc_root_tab gt_ggc_r_gt_stackleak[] = { + { + .base = &track_function_decl, + .nelt = 1, + .stride = sizeof(track_function_decl), + .cb = >_ggc_mx_tree_node, + .pchw = >_pch_nx_tree_node + }, + LAST_GGC_ROOT_TAB + }; + + /* + * The stackleak_instrument pass should be executed before the + * "optimized" pass, which is the control flow graph cleanup that is + * performed just before expanding gcc trees to the RTL. In former + * versions of the plugin this new pass was inserted before the + * "tree_profile" pass, which is currently called "profile". + */ + PASS_INFO(stackleak_instrument, "optimized", 1, + PASS_POS_INSERT_BEFORE); + + /* + * The stackleak_cleanup pass should be executed after the + * "reload" pass, when the stack frame size is final. + */ + PASS_INFO(stackleak_cleanup, "reload", 1, PASS_POS_INSERT_AFTER); + + if (!plugin_default_version_check(version, &gcc_version)) { + error(G_("incompatible gcc/plugin versions")); + return 1; + } + + /* Parse the plugin arguments */ + for (i = 0; i < argc; i++) { + if (!strcmp(argv[i].key, "disable")) + return 0; + + if (!strcmp(argv[i].key, "track-min-size")) { + if (!argv[i].value) { + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), + plugin_name, argv[i].key); + return 1; + } + + track_frame_size = atoi(argv[i].value); + if (track_frame_size < 0) { + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), + plugin_name, argv[i].key, argv[i].value); + return 1; + } + } else { + error(G_("unknown option '-fplugin-arg-%s-%s'"), + plugin_name, argv[i].key); + return 1; + } + } + + /* Give the information about the plugin */ + register_callback(plugin_name, PLUGIN_INFO, NULL, + &stackleak_plugin_info); + + /* Register to be called before processing a translation unit */ + register_callback(plugin_name, PLUGIN_START_UNIT, + &stackleak_start_unit, NULL); + + /* Register an extra GCC garbage collector (GGC) root table */ + register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, + (void *)>_ggc_r_gt_stackleak); + + /* + * Hook into the Pass Manager to register new gcc passes. + * + * The stack frame size info is available only at the last RTL pass, + * when it's too late to insert complex code like a function call. + * So we register two gcc passes to instrument every function at first + * and remove the unneeded instrumentation later. + */ + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, + &stackleak_instrument_pass_info); + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, + &stackleak_cleanup_pass_info); + + return 0; +} diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 67ed9f6ccdf8..63b609243d03 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -68,21 +68,7 @@ PHONY += $(simple-targets) $(simple-targets): $(obj)/conf $< $(silent) --$@ $(Kconfig) -PHONY += oldnoconfig silentoldconfig savedefconfig defconfig - -# oldnoconfig is an alias of olddefconfig, because people already are dependent -# on its behavior (sets new symbols to their default value but not 'n') with the -# counter-intuitive name. -oldnoconfig: olddefconfig - @echo " WARNING: \"oldnoconfig\" target will be removed after Linux 4.19" - @echo " Please use \"olddefconfig\" instead, which is an alias." - -# We do not expect manual invokcation of "silentoldcofig" (or "syncconfig"). -silentoldconfig: syncconfig - @echo " WARNING: \"silentoldconfig\" has been renamed to \"syncconfig\"" - @echo " and is now an internal implementation detail." - @echo " What you want is probably \"oldconfig\"." - @echo " \"silentoldconfig\" will be removed after Linux 4.19" +PHONY += savedefconfig defconfig savedefconfig: $(obj)/conf $< $(silent) --$@=defconfig $(Kconfig) diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index 7b2b37260669..98e0c7a34699 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c @@ -460,12 +460,6 @@ static struct option long_opts[] = { {"randconfig", no_argument, NULL, randconfig}, {"listnewconfig", no_argument, NULL, listnewconfig}, {"olddefconfig", no_argument, NULL, olddefconfig}, - /* - * oldnoconfig is an alias of olddefconfig, because people already - * are dependent on its behavior(sets new symbols to their default - * value but not 'n') with the counter-intuitive name. - */ - {"oldnoconfig", no_argument, NULL, olddefconfig}, {NULL, 0, NULL, 0} }; @@ -480,7 +474,6 @@ static void conf_usage(const char *progname) printf(" --syncconfig Similar to oldconfig but generates configuration in\n" " include/{generated/,config/}\n"); printf(" --olddefconfig Same as oldconfig but sets new symbols to their default value\n"); - printf(" --oldnoconfig An alias of olddefconfig\n"); printf(" --defconfig <file> New config with default defined in <file>\n"); printf(" --savedefconfig <file> Save the minimal current configuration to <file>\n"); printf(" --allnoconfig New config where all options are answered with no\n"); diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 67d131447631..da66e7742282 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -33,12 +33,15 @@ usage() { echo " -n use allnoconfig instead of alldefconfig" echo " -r list redundant entries when merging fragments" echo " -O dir to put generated output files. Consider setting \$KCONFIG_CONFIG instead." + echo + echo "Used prefix: '$CONFIG_PREFIX'. You can redefine it with \$CONFIG_ environment variable." } RUNMAKE=true ALLTARGET=alldefconfig WARNREDUN=false OUTPUT=. +CONFIG_PREFIX=${CONFIG_-CONFIG_} while true; do case $1 in @@ -99,7 +102,8 @@ if [ ! -r "$INITFILE" ]; then fi MERGE_LIST=$* -SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(CONFIG_[a-zA-Z0-9_]*\)[= ].*/\2/p" +SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)[= ].*/\2/p" + TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX) echo "Using $INITFILE as base" diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index e09fe4d7307c..8963203319ea 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -1742,7 +1742,7 @@ static int ns_rmdir_op(struct inode *dir, struct dentry *dentry) if (error) return error; - parent = aa_get_ns(dir->i_private); + parent = aa_get_ns(dir->i_private); /* rmdir calls the generic securityfs functions to remove files * from the apparmor dir. It is up to the apparmor ns locking * to avoid races. diff --git a/security/apparmor/file.c b/security/apparmor/file.c index 4285943f7260..d0afed9ebd0e 100644 --- a/security/apparmor/file.c +++ b/security/apparmor/file.c @@ -496,7 +496,7 @@ static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label, /* update caching of label on file_ctx */ spin_lock(&fctx->lock); old = rcu_dereference_protected(fctx->label, - spin_is_locked(&fctx->lock)); + lockdep_is_held(&fctx->lock)); l = aa_label_merge(old, label, GFP_ATOMIC); if (l) { if (l != old) { diff --git a/security/apparmor/include/cred.h b/security/apparmor/include/cred.h index e287b7d0d4be..265ae6641a06 100644 --- a/security/apparmor/include/cred.h +++ b/security/apparmor/include/cred.h @@ -151,6 +151,8 @@ static inline struct aa_label *begin_current_label_crit_section(void) { struct aa_label *label = aa_current_raw_label(); + might_sleep(); + if (label_is_stale(label)) { label = aa_get_newest_label(label); if (aa_replace_current_label(label) == 0) diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h index ec7228e857a9..7334ac966d01 100644 --- a/security/apparmor/include/net.h +++ b/security/apparmor/include/net.h @@ -83,6 +83,13 @@ struct aa_sk_ctx { __e; \ }) +struct aa_secmark { + u8 audit; + u8 deny; + u32 secid; + char *label; +}; + extern struct aa_sfs_entry aa_sfs_entry_network[]; void audit_net_cb(struct audit_buffer *ab, void *va); @@ -103,4 +110,7 @@ int aa_sk_perm(const char *op, u32 request, struct sock *sk); int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request, struct socket *sock); +int apparmor_secmark_check(struct aa_label *label, char *op, u32 request, + u32 secid, struct sock *sk); + #endif /* __AA_NET_H */ diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h index ab64c6b5db5a..8e6707c837be 100644 --- a/security/apparmor/include/policy.h +++ b/security/apparmor/include/policy.h @@ -155,6 +155,9 @@ struct aa_profile { struct aa_rlimit rlimits; + int secmark_count; + struct aa_secmark *secmark; + struct aa_loaddata *rawdata; unsigned char *hash; char *dirname; diff --git a/security/apparmor/include/secid.h b/security/apparmor/include/secid.h index dee6fa3b6081..fa2062711b63 100644 --- a/security/apparmor/include/secid.h +++ b/security/apparmor/include/secid.h @@ -22,6 +22,9 @@ struct aa_label; /* secid value that will not be allocated */ #define AA_SECID_INVALID 0 +/* secid value that matches any other secid */ +#define AA_SECID_WILDCARD 1 + struct aa_label *aa_secid_to_label(u32 secid); int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 974affe50531..76491e7f4177 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c @@ -90,10 +90,12 @@ const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name, const char *end = fqname + n; const char *name = skipn_spaces(fqname, n); - if (!name) - return NULL; *ns_name = NULL; *ns_len = 0; + + if (!name) + return NULL; + if (name[0] == ':') { char *split = strnchr(&name[1], end - &name[1], ':'); *ns_name = skipn_spaces(&name[1], end - &name[1]); diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index aa35939443c4..42446a216f3b 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -23,6 +23,8 @@ #include <linux/sysctl.h> #include <linux/audit.h> #include <linux/user_namespace.h> +#include <linux/netfilter_ipv4.h> +#include <linux/netfilter_ipv6.h> #include <net/sock.h> #include "include/apparmor.h" @@ -114,13 +116,13 @@ static int apparmor_ptrace_access_check(struct task_struct *child, struct aa_label *tracer, *tracee; int error; - tracer = begin_current_label_crit_section(); + tracer = __begin_current_label_crit_section(); tracee = aa_get_task_label(child); error = aa_may_ptrace(tracer, tracee, (mode & PTRACE_MODE_READ) ? AA_PTRACE_READ : AA_PTRACE_TRACE); aa_put_label(tracee); - end_current_label_crit_section(tracer); + __end_current_label_crit_section(tracer); return error; } @@ -130,11 +132,11 @@ static int apparmor_ptrace_traceme(struct task_struct *parent) struct aa_label *tracer, *tracee; int error; - tracee = begin_current_label_crit_section(); + tracee = __begin_current_label_crit_section(); tracer = aa_get_task_label(parent); error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE); aa_put_label(tracer); - end_current_label_crit_section(tracee); + __end_current_label_crit_section(tracee); return error; } @@ -1020,6 +1022,7 @@ static int apparmor_socket_shutdown(struct socket *sock, int how) return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock); } +#ifdef CONFIG_NETWORK_SECMARK /** * apparmor_socket_sock_recv_skb - check perms before associating skb to sk * @@ -1030,8 +1033,15 @@ static int apparmor_socket_shutdown(struct socket *sock, int how) */ static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { - return 0; + struct aa_sk_ctx *ctx = SK_CTX(sk); + + if (!skb->secmark) + return 0; + + return apparmor_secmark_check(ctx->label, OP_RECVMSG, AA_MAY_RECEIVE, + skb->secmark, sk); } +#endif static struct aa_label *sk_peer_label(struct sock *sk) @@ -1126,6 +1136,20 @@ static void apparmor_sock_graft(struct sock *sk, struct socket *parent) ctx->label = aa_get_current_label(); } +#ifdef CONFIG_NETWORK_SECMARK +static int apparmor_inet_conn_request(struct sock *sk, struct sk_buff *skb, + struct request_sock *req) +{ + struct aa_sk_ctx *ctx = SK_CTX(sk); + + if (!skb->secmark) + return 0; + + return apparmor_secmark_check(ctx->label, OP_CONNECT, AA_MAY_CONNECT, + skb->secmark, sk); +} +#endif + static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check), LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme), @@ -1177,12 +1201,17 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt), LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt), LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown), +#ifdef CONFIG_NETWORK_SECMARK LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb), +#endif LSM_HOOK_INIT(socket_getpeersec_stream, apparmor_socket_getpeersec_stream), LSM_HOOK_INIT(socket_getpeersec_dgram, apparmor_socket_getpeersec_dgram), LSM_HOOK_INIT(sock_graft, apparmor_sock_graft), +#ifdef CONFIG_NETWORK_SECMARK + LSM_HOOK_INIT(inet_conn_request, apparmor_inet_conn_request), +#endif LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank), LSM_HOOK_INIT(cred_free, apparmor_cred_free), @@ -1538,6 +1567,97 @@ static inline int apparmor_init_sysctl(void) } #endif /* CONFIG_SYSCTL */ +#if defined(CONFIG_NETFILTER) && defined(CONFIG_NETWORK_SECMARK) +static unsigned int apparmor_ip_postroute(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct aa_sk_ctx *ctx; + struct sock *sk; + + if (!skb->secmark) + return NF_ACCEPT; + + sk = skb_to_full_sk(skb); + if (sk == NULL) + return NF_ACCEPT; + + ctx = SK_CTX(sk); + if (!apparmor_secmark_check(ctx->label, OP_SENDMSG, AA_MAY_SEND, + skb->secmark, sk)) + return NF_ACCEPT; + + return NF_DROP_ERR(-ECONNREFUSED); + +} + +static unsigned int apparmor_ipv4_postroute(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + return apparmor_ip_postroute(priv, skb, state); +} + +static unsigned int apparmor_ipv6_postroute(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + return apparmor_ip_postroute(priv, skb, state); +} + +static const struct nf_hook_ops apparmor_nf_ops[] = { + { + .hook = apparmor_ipv4_postroute, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_POST_ROUTING, + .priority = NF_IP_PRI_SELINUX_FIRST, + }, +#if IS_ENABLED(CONFIG_IPV6) + { + .hook = apparmor_ipv6_postroute, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_POST_ROUTING, + .priority = NF_IP6_PRI_SELINUX_FIRST, + }, +#endif +}; + +static int __net_init apparmor_nf_register(struct net *net) +{ + int ret; + + ret = nf_register_net_hooks(net, apparmor_nf_ops, + ARRAY_SIZE(apparmor_nf_ops)); + return ret; +} + +static void __net_exit apparmor_nf_unregister(struct net *net) +{ + nf_unregister_net_hooks(net, apparmor_nf_ops, + ARRAY_SIZE(apparmor_nf_ops)); +} + +static struct pernet_operations apparmor_net_ops = { + .init = apparmor_nf_register, + .exit = apparmor_nf_unregister, +}; + +static int __init apparmor_nf_ip_init(void) +{ + int err; + + if (!apparmor_enabled) + return 0; + + err = register_pernet_subsys(&apparmor_net_ops); + if (err) + panic("Apparmor: register_pernet_subsys: error %d\n", err); + + return 0; +} +__initcall(apparmor_nf_ip_init); +#endif + static int __init apparmor_init(void) { int error; diff --git a/security/apparmor/net.c b/security/apparmor/net.c index bb24cfa0a164..c07fde444792 100644 --- a/security/apparmor/net.c +++ b/security/apparmor/net.c @@ -18,6 +18,7 @@ #include "include/label.h" #include "include/net.h" #include "include/policy.h" +#include "include/secid.h" #include "net_names.h" @@ -146,17 +147,20 @@ int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family, static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request, struct sock *sk) { - struct aa_profile *profile; - DEFINE_AUDIT_SK(sa, op, sk); + int error = 0; AA_BUG(!label); AA_BUG(!sk); - if (unconfined(label)) - return 0; + if (!unconfined(label)) { + struct aa_profile *profile; + DEFINE_AUDIT_SK(sa, op, sk); - return fn_for_each_confined(label, profile, - aa_profile_af_sk_perm(profile, &sa, request, sk)); + error = fn_for_each_confined(label, profile, + aa_profile_af_sk_perm(profile, &sa, request, sk)); + } + + return error; } int aa_sk_perm(const char *op, u32 request, struct sock *sk) @@ -185,3 +189,70 @@ int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request, return aa_label_sk_perm(label, op, request, sock->sk); } + +#ifdef CONFIG_NETWORK_SECMARK +static int apparmor_secmark_init(struct aa_secmark *secmark) +{ + struct aa_label *label; + + if (secmark->label[0] == '*') { + secmark->secid = AA_SECID_WILDCARD; + return 0; + } + + label = aa_label_strn_parse(&root_ns->unconfined->label, + secmark->label, strlen(secmark->label), + GFP_ATOMIC, false, false); + + if (IS_ERR(label)) + return PTR_ERR(label); + + secmark->secid = label->secid; + + return 0; +} + +static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid, + struct common_audit_data *sa, struct sock *sk) +{ + int i, ret; + struct aa_perms perms = { }; + + if (profile->secmark_count == 0) + return 0; + + for (i = 0; i < profile->secmark_count; i++) { + if (!profile->secmark[i].secid) { + ret = apparmor_secmark_init(&profile->secmark[i]); + if (ret) + return ret; + } + + if (profile->secmark[i].secid == secid || + profile->secmark[i].secid == AA_SECID_WILDCARD) { + if (profile->secmark[i].deny) + perms.deny = ALL_PERMS_MASK; + else + perms.allow = ALL_PERMS_MASK; + + if (profile->secmark[i].audit) + perms.audit = ALL_PERMS_MASK; + } + } + + aa_apply_modes_to_perms(profile, &perms); + + return aa_check_perms(profile, &perms, request, sa, audit_net_cb); +} + +int apparmor_secmark_check(struct aa_label *label, char *op, u32 request, + u32 secid, struct sock *sk) +{ + struct aa_profile *profile; + DEFINE_AUDIT_SK(sa, op, sk); + + return fn_for_each_confined(label, profile, + aa_secmark_perm(profile, request, secid, + &sa, sk)); +} +#endif diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 1590e2de4e84..df9c5890a878 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -231,6 +231,9 @@ void aa_free_profile(struct aa_profile *profile) for (i = 0; i < profile->xattr_count; i++) kzfree(profile->xattrs[i]); kzfree(profile->xattrs); + for (i = 0; i < profile->secmark_count; i++) + kzfree(profile->secmark[i].label); + kzfree(profile->secmark); kzfree(profile->dirname); aa_put_dfa(profile->xmatch); aa_put_dfa(profile->policy.dfa); diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 21cb384d712a..379682e2a8d5 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -292,6 +292,19 @@ fail: return 0; } +static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name) +{ + if (unpack_nameX(e, AA_U8, name)) { + if (!inbounds(e, sizeof(u8))) + return 0; + if (data) + *data = get_unaligned((u8 *)e->pos); + e->pos += sizeof(u8); + return 1; + } + return 0; +} + static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name) { if (unpack_nameX(e, AA_U32, name)) { @@ -529,6 +542,49 @@ fail: return 0; } +static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile) +{ + void *pos = e->pos; + int i, size; + + if (unpack_nameX(e, AA_STRUCT, "secmark")) { + size = unpack_array(e, NULL); + + profile->secmark = kcalloc(size, sizeof(struct aa_secmark), + GFP_KERNEL); + if (!profile->secmark) + goto fail; + + profile->secmark_count = size; + + for (i = 0; i < size; i++) { + if (!unpack_u8(e, &profile->secmark[i].audit, NULL)) + goto fail; + if (!unpack_u8(e, &profile->secmark[i].deny, NULL)) + goto fail; + if (!unpack_strdup(e, &profile->secmark[i].label, NULL)) + goto fail; + } + if (!unpack_nameX(e, AA_ARRAYEND, NULL)) + goto fail; + if (!unpack_nameX(e, AA_STRUCTEND, NULL)) + goto fail; + } + + return 1; + +fail: + if (profile->secmark) { + for (i = 0; i < size; i++) + kfree(profile->secmark[i].label); + kfree(profile->secmark); + profile->secmark_count = 0; + } + + e->pos = pos; + return 0; +} + static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile) { void *pos = e->pos; @@ -727,6 +783,11 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) goto fail; } + if (!unpack_secmark(e, profile)) { + info = "failed to unpack profile secmark rules"; + goto fail; + } + if (unpack_nameX(e, AA_STRUCT, "policydb")) { /* generic policy dfa - optional and may be NULL */ info = "failed to unpack policydb"; diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c index 4ccec1bcf6f5..05373d9a3d6a 100644 --- a/security/apparmor/secid.c +++ b/security/apparmor/secid.c @@ -32,8 +32,7 @@ * secids - do not pin labels with a refcount. They rely on the label * properly updating/freeing them */ - -#define AA_FIRST_SECID 1 +#define AA_FIRST_SECID 2 static DEFINE_IDR(aa_secids); static DEFINE_SPINLOCK(secid_lock); diff --git a/security/keys/Makefile b/security/keys/Makefile index ef1581b337a3..9cef54064f60 100644 --- a/security/keys/Makefile +++ b/security/keys/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_SYSCTL) += sysctl.o obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o obj-$(CONFIG_KEY_DH_OPERATIONS) += dh.o +obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += keyctl_pkey.o # # Key types diff --git a/security/keys/compat.c b/security/keys/compat.c index e87c89c0177c..9482df601dc3 100644 --- a/security/keys/compat.c +++ b/security/keys/compat.c @@ -141,6 +141,24 @@ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option, return keyctl_restrict_keyring(arg2, compat_ptr(arg3), compat_ptr(arg4)); + case KEYCTL_PKEY_QUERY: + if (arg3 != 0) + return -EINVAL; + return keyctl_pkey_query(arg2, + compat_ptr(arg4), + compat_ptr(arg5)); + + case KEYCTL_PKEY_ENCRYPT: + case KEYCTL_PKEY_DECRYPT: + case KEYCTL_PKEY_SIGN: + return keyctl_pkey_e_d_s(option, + compat_ptr(arg2), compat_ptr(arg3), + compat_ptr(arg4), compat_ptr(arg5)); + + case KEYCTL_PKEY_VERIFY: + return keyctl_pkey_verify(compat_ptr(arg2), compat_ptr(arg3), + compat_ptr(arg4), compat_ptr(arg5)); + default: return -EOPNOTSUPP; } diff --git a/security/keys/internal.h b/security/keys/internal.h index 9f8208dc0e55..74cb0ff42fed 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -298,6 +298,45 @@ static inline long compat_keyctl_dh_compute( #endif #endif +#ifdef CONFIG_ASYMMETRIC_KEY_TYPE +extern long keyctl_pkey_query(key_serial_t, + const char __user *, + struct keyctl_pkey_query __user *); + +extern long keyctl_pkey_verify(const struct keyctl_pkey_params __user *, + const char __user *, + const void __user *, const void __user *); + +extern long keyctl_pkey_e_d_s(int, + const struct keyctl_pkey_params __user *, + const char __user *, + const void __user *, void __user *); +#else +static inline long keyctl_pkey_query(key_serial_t id, + const char __user *_info, + struct keyctl_pkey_query __user *_res) +{ + return -EOPNOTSUPP; +} + +static inline long keyctl_pkey_verify(const struct keyctl_pkey_params __user *params, + const char __user *_info, + const void __user *_in, + const void __user *_in2) +{ + return -EOPNOTSUPP; +} + +static inline long keyctl_pkey_e_d_s(int op, + const struct keyctl_pkey_params __user *params, + const char __user *_info, + const void __user *_in, + void __user *_out) +{ + return -EOPNOTSUPP; +} +#endif + /* * Debugging key validation */ diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 1ffe60bb2845..18619690ce77 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -1747,6 +1747,30 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, (const char __user *) arg3, (const char __user *) arg4); + case KEYCTL_PKEY_QUERY: + if (arg3 != 0) + return -EINVAL; + return keyctl_pkey_query((key_serial_t)arg2, + (const char __user *)arg4, + (struct keyctl_pkey_query *)arg5); + + case KEYCTL_PKEY_ENCRYPT: + case KEYCTL_PKEY_DECRYPT: + case KEYCTL_PKEY_SIGN: + return keyctl_pkey_e_d_s( + option, + (const struct keyctl_pkey_params __user *)arg2, + (const char __user *)arg3, + (const void __user *)arg4, + (void __user *)arg5); + + case KEYCTL_PKEY_VERIFY: + return keyctl_pkey_verify( + (const struct keyctl_pkey_params __user *)arg2, + (const char __user *)arg3, + (const void __user *)arg4, + (const void __user *)arg5); + default: return -EOPNOTSUPP; } diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c new file mode 100644 index 000000000000..783978842f13 --- /dev/null +++ b/security/keys/keyctl_pkey.c @@ -0,0 +1,323 @@ +/* Public-key operation keyctls + * + * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/key.h> +#include <linux/keyctl.h> +#include <linux/parser.h> +#include <linux/uaccess.h> +#include <keys/user-type.h> +#include "internal.h" + +static void keyctl_pkey_params_free(struct kernel_pkey_params *params) +{ + kfree(params->info); + key_put(params->key); +} + +enum { + Opt_err = -1, + Opt_enc, /* "enc=<encoding>" eg. "enc=oaep" */ + Opt_hash, /* "hash=<digest-name>" eg. "hash=sha1" */ +}; + +static const match_table_t param_keys = { + { Opt_enc, "enc=%s" }, + { Opt_hash, "hash=%s" }, + { Opt_err, NULL } +}; + +/* + * Parse the information string which consists of key=val pairs. + */ +static int keyctl_pkey_params_parse(struct kernel_pkey_params *params) +{ + unsigned long token_mask = 0; + substring_t args[MAX_OPT_ARGS]; + char *c = params->info, *p, *q; + int token; + + while ((p = strsep(&c, " \t"))) { + if (*p == '\0' || *p == ' ' || *p == '\t') + continue; + token = match_token(p, param_keys, args); + if (__test_and_set_bit(token, &token_mask)) + return -EINVAL; + q = args[0].from; + if (!q[0]) + return -EINVAL; + + switch (token) { + case Opt_enc: + params->encoding = q; + break; + + case Opt_hash: + params->hash_algo = q; + break; + + default: + return -EINVAL; + } + } + + return 0; +} + +/* + * Interpret parameters. Callers must always call the free function + * on params, even if an error is returned. + */ +static int keyctl_pkey_params_get(key_serial_t id, + const char __user *_info, + struct kernel_pkey_params *params) +{ + key_ref_t key_ref; + void *p; + int ret; + + memset(params, 0, sizeof(*params)); + params->encoding = "raw"; + + p = strndup_user(_info, PAGE_SIZE); + if (IS_ERR(p)) + return PTR_ERR(p); + params->info = p; + + ret = keyctl_pkey_params_parse(params); + if (ret < 0) + return ret; + + key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH); + if (IS_ERR(key_ref)) + return PTR_ERR(key_ref); + params->key = key_ref_to_ptr(key_ref); + + if (!params->key->type->asym_query) + return -EOPNOTSUPP; + + return 0; +} + +/* + * Get parameters from userspace. Callers must always call the free function + * on params, even if an error is returned. + */ +static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_params, + const char __user *_info, + int op, + struct kernel_pkey_params *params) +{ + struct keyctl_pkey_params uparams; + struct kernel_pkey_query info; + int ret; + + memset(params, 0, sizeof(*params)); + params->encoding = "raw"; + + if (copy_from_user(&uparams, _params, sizeof(uparams)) != 0) + return -EFAULT; + + ret = keyctl_pkey_params_get(uparams.key_id, _info, params); + if (ret < 0) + return ret; + + ret = params->key->type->asym_query(params, &info); + if (ret < 0) + return ret; + + switch (op) { + case KEYCTL_PKEY_ENCRYPT: + case KEYCTL_PKEY_DECRYPT: + if (uparams.in_len > info.max_enc_size || + uparams.out_len > info.max_dec_size) + return -EINVAL; + break; + case KEYCTL_PKEY_SIGN: + case KEYCTL_PKEY_VERIFY: + if (uparams.in_len > info.max_sig_size || + uparams.out_len > info.max_data_size) + return -EINVAL; + break; + default: + BUG(); + } + + params->in_len = uparams.in_len; + params->out_len = uparams.out_len; + return 0; +} + +/* + * Query information about an asymmetric key. + */ +long keyctl_pkey_query(key_serial_t id, + const char __user *_info, + struct keyctl_pkey_query __user *_res) +{ + struct kernel_pkey_params params; + struct kernel_pkey_query res; + long ret; + + memset(¶ms, 0, sizeof(params)); + + ret = keyctl_pkey_params_get(id, _info, ¶ms); + if (ret < 0) + goto error; + + ret = params.key->type->asym_query(¶ms, &res); + if (ret < 0) + goto error; + + ret = -EFAULT; + if (copy_to_user(_res, &res, sizeof(res)) == 0 && + clear_user(_res->__spare, sizeof(_res->__spare)) == 0) + ret = 0; + +error: + keyctl_pkey_params_free(¶ms); + return ret; +} + +/* + * Encrypt/decrypt/sign + * + * Encrypt data, decrypt data or sign data using a public key. + * + * _info is a string of supplementary information in key=val format. For + * instance, it might contain: + * + * "enc=pkcs1 hash=sha256" + * + * where enc= specifies the encoding and hash= selects the OID to go in that + * particular encoding if required. If enc= isn't supplied, it's assumed that + * the caller is supplying raw values. + * + * If successful, the amount of data written into the output buffer is + * returned. + */ +long keyctl_pkey_e_d_s(int op, + const struct keyctl_pkey_params __user *_params, + const char __user *_info, + const void __user *_in, + void __user *_out) +{ + struct kernel_pkey_params params; + void *in, *out; + long ret; + + ret = keyctl_pkey_params_get_2(_params, _info, op, ¶ms); + if (ret < 0) + goto error_params; + + ret = -EOPNOTSUPP; + if (!params.key->type->asym_eds_op) + goto error_params; + + switch (op) { + case KEYCTL_PKEY_ENCRYPT: + params.op = kernel_pkey_encrypt; + break; + case KEYCTL_PKEY_DECRYPT: + params.op = kernel_pkey_decrypt; + break; + case KEYCTL_PKEY_SIGN: + params.op = kernel_pkey_sign; + break; + default: + BUG(); + } + + in = memdup_user(_in, params.in_len); + if (IS_ERR(in)) { + ret = PTR_ERR(in); + goto error_params; + } + + ret = -ENOMEM; + out = kmalloc(params.out_len, GFP_KERNEL); + if (!out) + goto error_in; + + ret = params.key->type->asym_eds_op(¶ms, in, out); + if (ret < 0) + goto error_out; + + if (copy_to_user(_out, out, ret) != 0) + ret = -EFAULT; + +error_out: + kfree(out); +error_in: + kfree(in); +error_params: + keyctl_pkey_params_free(¶ms); + return ret; +} + +/* + * Verify a signature. + * + * Verify a public key signature using the given key, or if not given, search + * for a matching key. + * + * _info is a string of supplementary information in key=val format. For + * instance, it might contain: + * + * "enc=pkcs1 hash=sha256" + * + * where enc= specifies the signature blob encoding and hash= selects the OID + * to go in that particular encoding. If enc= isn't supplied, it's assumed + * that the caller is supplying raw values. + * + * If successful, 0 is returned. + */ +long keyctl_pkey_verify(const struct keyctl_pkey_params __user *_params, + const char __user *_info, + const void __user *_in, + const void __user *_in2) +{ + struct kernel_pkey_params params; + void *in, *in2; + long ret; + + ret = keyctl_pkey_params_get_2(_params, _info, KEYCTL_PKEY_VERIFY, + ¶ms); + if (ret < 0) + goto error_params; + + ret = -EOPNOTSUPP; + if (!params.key->type->asym_verify_signature) + goto error_params; + + in = memdup_user(_in, params.in_len); + if (IS_ERR(in)) { + ret = PTR_ERR(in); + goto error_params; + } + + in2 = memdup_user(_in2, params.in2_len); + if (IS_ERR(in2)) { + ret = PTR_ERR(in2); + goto error_in; + } + + params.op = kernel_pkey_verify; + ret = params.key->type->asym_verify_signature(¶ms, in, in2); + + kfree(in2); +error_in: + kfree(in); +error_params: + keyctl_pkey_params_free(¶ms); + return ret; +} diff --git a/security/keys/trusted.c b/security/keys/trusted.c index b69d3b1777c2..ff6789365a12 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -30,7 +30,7 @@ #include <linux/tpm.h> #include <linux/tpm_command.h> -#include "trusted.h" +#include <keys/trusted.h> static const char hmac_alg[] = "hmac(sha1)"; static const char hash_alg[] = "sha1"; @@ -121,7 +121,7 @@ out: /* * calculate authorization info fields to send to TPM */ -static int TSS_authhmac(unsigned char *digest, const unsigned char *key, +int TSS_authhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, unsigned char *h1, unsigned char *h2, unsigned char h3, ...) { @@ -168,11 +168,12 @@ out: kzfree(sdesc); return ret; } +EXPORT_SYMBOL_GPL(TSS_authhmac); /* * verify the AUTH1_COMMAND (Seal) result from TPM */ -static int TSS_checkhmac1(unsigned char *buffer, +int TSS_checkhmac1(unsigned char *buffer, const uint32_t command, const unsigned char *ononce, const unsigned char *key, @@ -249,6 +250,7 @@ out: kzfree(sdesc); return ret; } +EXPORT_SYMBOL_GPL(TSS_checkhmac1); /* * verify the AUTH2_COMMAND (unseal) result from TPM @@ -355,7 +357,7 @@ out: * For key specific tpm requests, we will generate and send our * own TPM command packets using the drivers send function. */ -static int trusted_tpm_send(unsigned char *cmd, size_t buflen) +int trusted_tpm_send(unsigned char *cmd, size_t buflen) { int rc; @@ -367,6 +369,7 @@ static int trusted_tpm_send(unsigned char *cmd, size_t buflen) rc = -EPERM; return rc; } +EXPORT_SYMBOL_GPL(trusted_tpm_send); /* * Lock a trusted key, by extending a selected PCR. @@ -425,7 +428,7 @@ static int osap(struct tpm_buf *tb, struct osapsess *s, /* * Create an object independent authorisation protocol (oiap) session */ -static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) +int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) { int ret; @@ -442,6 +445,7 @@ static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) TPM_NONCE_SIZE); return 0; } +EXPORT_SYMBOL_GPL(oiap); struct tpm_digests { unsigned char encauth[SHA1_DIGEST_SIZE]; diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index fcd965f1d69e..9be76c808fcc 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c @@ -146,53 +146,22 @@ static int apply_constraint_to_size(struct snd_pcm_hw_params *params, struct snd_interval *s = hw_param_interval(params, rule->var); const struct snd_interval *r = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); - struct snd_interval t = { - .min = s->min, .max = s->max, .integer = 1, - }; + struct snd_interval t = {0}; + unsigned int step = 0; int i; for (i = 0; i < CIP_SFC_COUNT; ++i) { - unsigned int rate = amdtp_rate_table[i]; - unsigned int step = amdtp_syt_intervals[i]; - - if (!snd_interval_test(r, rate)) - continue; - - t.min = roundup(t.min, step); - t.max = rounddown(t.max, step); + if (snd_interval_test(r, amdtp_rate_table[i])) + step = max(step, amdtp_syt_intervals[i]); } - if (snd_interval_checkempty(&t)) - return -EINVAL; + t.min = roundup(s->min, step); + t.max = rounddown(s->max, step); + t.integer = 1; return snd_interval_refine(s, &t); } -static int apply_constraint_to_rate(struct snd_pcm_hw_params *params, - struct snd_pcm_hw_rule *rule) -{ - struct snd_interval *r = - hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); - const struct snd_interval *s = hw_param_interval_c(params, rule->deps[0]); - struct snd_interval t = { - .min = UINT_MAX, .max = 0, .integer = 1, - }; - int i; - - for (i = 0; i < CIP_SFC_COUNT; ++i) { - unsigned int step = amdtp_syt_intervals[i]; - unsigned int rate = amdtp_rate_table[i]; - - if (s->min % step || s->max % step) - continue; - - t.min = min(t.min, rate); - t.max = max(t.max, rate); - } - - return snd_interval_refine(r, &t); -} - /** * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream * @s: the AMDTP stream, which must be initialized. @@ -250,24 +219,16 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s, */ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, apply_constraint_to_size, NULL, + SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) goto end; - err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, - apply_constraint_to_rate, NULL, - SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1); - if (err < 0) - goto end; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, apply_constraint_to_size, NULL, + SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) goto end; - err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, - apply_constraint_to_rate, NULL, - SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1); - if (err < 0) - goto end; end: return err; } diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c index 0f6dbcffe711..ed50b222d36e 100644 --- a/sound/firewire/dice/dice.c +++ b/sound/firewire/dice/dice.c @@ -240,8 +240,8 @@ static void dice_remove(struct fw_unit *unit) cancel_delayed_work_sync(&dice->dwork); if (dice->registered) { - /* No need to wait for releasing card object in this context. */ - snd_card_free_when_closed(dice->card); + // Block till all of ALSA character devices are released. + snd_card_free(dice->card); } mutex_destroy(&dice->mutex); diff --git a/sound/pci/ca0106/ca0106.h b/sound/pci/ca0106/ca0106.h index 04402c14cb23..9847b669cf3c 100644 --- a/sound/pci/ca0106/ca0106.h +++ b/sound/pci/ca0106/ca0106.h @@ -582,7 +582,7 @@ #define SPI_PL_BIT_R_R (2<<7) /* right channel = right */ #define SPI_PL_BIT_R_C (3<<7) /* right channel = (L+R)/2 */ #define SPI_IZD_REG 2 -#define SPI_IZD_BIT (1<<4) /* infinite zero detect */ +#define SPI_IZD_BIT (0<<4) /* infinite zero detect */ #define SPI_FMT_REG 3 #define SPI_FMT_BIT_RJ (0<<0) /* right justified mode */ diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h index 5072cbd15c82..dae1584cf017 100644 --- a/tools/arch/arm64/include/uapi/asm/unistd.h +++ b/tools/arch/arm64/include/uapi/asm/unistd.h @@ -16,5 +16,6 @@ */ #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_NEW_STAT #include <asm-generic/unistd.h> diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h index 1b32b56a03d3..8c876c166ef2 100644 --- a/tools/arch/powerpc/include/uapi/asm/kvm.h +++ b/tools/arch/powerpc/include/uapi/asm/kvm.h @@ -634,6 +634,7 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) #define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf) +#define KVM_REG_PPC_PTCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h index 9a50f02b9894..16511d97e8dc 100644 --- a/tools/arch/s390/include/uapi/asm/kvm.h +++ b/tools/arch/s390/include/uapi/asm/kvm.h @@ -160,6 +160,8 @@ struct kvm_s390_vm_cpu_subfunc { #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1 #define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2 #define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3 +#define KVM_S390_VM_CRYPTO_ENABLE_APIE 4 +#define KVM_S390_VM_CRYPTO_DISABLE_APIE 5 /* kvm attributes for migration mode */ #define KVM_S390_VM_MIGRATION_STOP 0 diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 8a6eff9c27f3..dabfcf7c3941 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -300,10 +300,7 @@ struct kvm_vcpu_events { __u8 injected; __u8 nr; __u8 has_error_code; - union { - __u8 pad; - __u8 pending; - }; + __u8 pending; __u32 error_code; } exception; struct { @@ -387,6 +384,7 @@ struct kvm_sync_regs { #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 +#define KVM_STATE_NESTED_EVMCS 0x00000004 #define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_SMM_VMXON 0x00000002 diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index df4bedb9b01c..538546edbfbd 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h @@ -242,10 +242,12 @@ __SYSCALL(__NR_tee, sys_tee) /* fs/stat.c */ #define __NR_readlinkat 78 __SYSCALL(__NR_readlinkat, sys_readlinkat) +#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64) #define __NR3264_fstatat 79 __SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat) #define __NR3264_fstat 80 __SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat) +#endif /* fs/sync.c */ #define __NR_sync 81 diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h new file mode 100644 index 000000000000..a441ea1bfe6d --- /dev/null +++ b/tools/include/uapi/linux/fs.h @@ -0,0 +1,393 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_FS_H +#define _UAPI_LINUX_FS_H + +/* + * This file has definitions for some important file table structures + * and constants and structures used by various generic file system + * ioctl's. Please do not make any changes in this file before + * sending patches for review to linux-fsdevel@vger.kernel.org and + * linux-api@vger.kernel.org. + */ + +#include <linux/limits.h> +#include <linux/ioctl.h> +#include <linux/types.h> + +/* + * It's silly to have NR_OPEN bigger than NR_FILE, but you can change + * the file limit at runtime and only root can increase the per-process + * nr_file rlimit, so it's safe to set up a ridiculously high absolute + * upper limit on files-per-process. + * + * Some programs (notably those using select()) may have to be + * recompiled to take full advantage of the new limits.. + */ + +/* Fixed constants first: */ +#undef NR_OPEN +#define INR_OPEN_CUR 1024 /* Initial setting for nfile rlimits */ +#define INR_OPEN_MAX 4096 /* Hard limit for nfile rlimits */ + +#define BLOCK_SIZE_BITS 10 +#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS) + +#define SEEK_SET 0 /* seek relative to beginning of file */ +#define SEEK_CUR 1 /* seek relative to current file position */ +#define SEEK_END 2 /* seek relative to end of file */ +#define SEEK_DATA 3 /* seek to the next data */ +#define SEEK_HOLE 4 /* seek to the next hole */ +#define SEEK_MAX SEEK_HOLE + +#define RENAME_NOREPLACE (1 << 0) /* Don't overwrite target */ +#define RENAME_EXCHANGE (1 << 1) /* Exchange source and dest */ +#define RENAME_WHITEOUT (1 << 2) /* Whiteout source */ + +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + +struct fstrim_range { + __u64 start; + __u64 len; + __u64 minlen; +}; + +/* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */ +#define FILE_DEDUPE_RANGE_SAME 0 +#define FILE_DEDUPE_RANGE_DIFFERS 1 + +/* from struct btrfs_ioctl_file_extent_same_info */ +struct file_dedupe_range_info { + __s64 dest_fd; /* in - destination file */ + __u64 dest_offset; /* in - start of extent in destination */ + __u64 bytes_deduped; /* out - total # of bytes we were able + * to dedupe from this file. */ + /* status of this dedupe operation: + * < 0 for error + * == FILE_DEDUPE_RANGE_SAME if dedupe succeeds + * == FILE_DEDUPE_RANGE_DIFFERS if data differs + */ + __s32 status; /* out - see above description */ + __u32 reserved; /* must be zero */ +}; + +/* from struct btrfs_ioctl_file_extent_same_args */ +struct file_dedupe_range { + __u64 src_offset; /* in - start of extent in source */ + __u64 src_length; /* in - length of extent */ + __u16 dest_count; /* in - total elements in info array */ + __u16 reserved1; /* must be zero */ + __u32 reserved2; /* must be zero */ + struct file_dedupe_range_info info[0]; +}; + +/* And dynamically-tunable limits and defaults: */ +struct files_stat_struct { + unsigned long nr_files; /* read only */ + unsigned long nr_free_files; /* read only */ + unsigned long max_files; /* tunable */ +}; + +struct inodes_stat_t { + long nr_inodes; + long nr_unused; + long dummy[5]; /* padding for sysctl ABI compatibility */ +}; + + +#define NR_FILE 8192 /* this can well be larger on a larger system */ + + +/* + * These are the fs-independent mount-flags: up to 32 flags are supported + */ +#define MS_RDONLY 1 /* Mount read-only */ +#define MS_NOSUID 2 /* Ignore suid and sgid bits */ +#define MS_NODEV 4 /* Disallow access to device special files */ +#define MS_NOEXEC 8 /* Disallow program execution */ +#define MS_SYNCHRONOUS 16 /* Writes are synced at once */ +#define MS_REMOUNT 32 /* Alter flags of a mounted FS */ +#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ +#define MS_DIRSYNC 128 /* Directory modifications are synchronous */ +#define MS_NOATIME 1024 /* Do not update access times. */ +#define MS_NODIRATIME 2048 /* Do not update directory access times */ +#define MS_BIND 4096 +#define MS_MOVE 8192 +#define MS_REC 16384 +#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. + MS_VERBOSE is deprecated. */ +#define MS_SILENT 32768 +#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ +#define MS_UNBINDABLE (1<<17) /* change to unbindable */ +#define MS_PRIVATE (1<<18) /* change to private */ +#define MS_SLAVE (1<<19) /* change to slave */ +#define MS_SHARED (1<<20) /* change to shared */ +#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ +#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ +#define MS_I_VERSION (1<<23) /* Update inode I_version field */ +#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ +#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ + +/* These sb flags are internal to the kernel */ +#define MS_SUBMOUNT (1<<26) +#define MS_NOREMOTELOCK (1<<27) +#define MS_NOSEC (1<<28) +#define MS_BORN (1<<29) +#define MS_ACTIVE (1<<30) +#define MS_NOUSER (1<<31) + +/* + * Superblock flags that can be altered by MS_REMOUNT + */ +#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ + MS_LAZYTIME) + +/* + * Old magic mount flag and mask + */ +#define MS_MGC_VAL 0xC0ED0000 +#define MS_MGC_MSK 0xffff0000 + +/* + * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR. + */ +struct fsxattr { + __u32 fsx_xflags; /* xflags field value (get/set) */ + __u32 fsx_extsize; /* extsize field value (get/set)*/ + __u32 fsx_nextents; /* nextents field value (get) */ + __u32 fsx_projid; /* project identifier (get/set) */ + __u32 fsx_cowextsize; /* CoW extsize field value (get/set)*/ + unsigned char fsx_pad[8]; +}; + +/* + * Flags for the fsx_xflags field + */ +#define FS_XFLAG_REALTIME 0x00000001 /* data in realtime volume */ +#define FS_XFLAG_PREALLOC 0x00000002 /* preallocated file extents */ +#define FS_XFLAG_IMMUTABLE 0x00000008 /* file cannot be modified */ +#define FS_XFLAG_APPEND 0x00000010 /* all writes append */ +#define FS_XFLAG_SYNC 0x00000020 /* all writes synchronous */ +#define FS_XFLAG_NOATIME 0x00000040 /* do not update access time */ +#define FS_XFLAG_NODUMP 0x00000080 /* do not include in backups */ +#define FS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */ +#define FS_XFLAG_PROJINHERIT 0x00000200 /* create with parents projid */ +#define FS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */ +#define FS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */ +#define FS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */ +#define FS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */ +#define FS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */ +#define FS_XFLAG_DAX 0x00008000 /* use DAX for IO */ +#define FS_XFLAG_COWEXTSIZE 0x00010000 /* CoW extent size allocator hint */ +#define FS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ + +/* the read-only stuff doesn't really belong here, but any other place is + probably as bad and I don't want to create yet another include file. */ + +#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */ +#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */ +#define BLKRRPART _IO(0x12,95) /* re-read partition table */ +#define BLKGETSIZE _IO(0x12,96) /* return device size /512 (long *arg) */ +#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */ +#define BLKRASET _IO(0x12,98) /* set read ahead for block device */ +#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */ +#define BLKFRASET _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */ +#define BLKFRAGET _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */ +#define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */ +#define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */ +#define BLKSSZGET _IO(0x12,104)/* get block device sector size */ +#if 0 +#define BLKPG _IO(0x12,105)/* See blkpg.h */ + +/* Some people are morons. Do not use sizeof! */ + +#define BLKELVGET _IOR(0x12,106,size_t)/* elevator get */ +#define BLKELVSET _IOW(0x12,107,size_t)/* elevator set */ +/* This was here just to show that the number is taken - + probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */ +#endif +/* A jump here: 108-111 have been used for various private purposes. */ +#define BLKBSZGET _IOR(0x12,112,size_t) +#define BLKBSZSET _IOW(0x12,113,size_t) +#define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ +#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup) +#define BLKTRACESTART _IO(0x12,116) +#define BLKTRACESTOP _IO(0x12,117) +#define BLKTRACETEARDOWN _IO(0x12,118) +#define BLKDISCARD _IO(0x12,119) +#define BLKIOMIN _IO(0x12,120) +#define BLKIOOPT _IO(0x12,121) +#define BLKALIGNOFF _IO(0x12,122) +#define BLKPBSZGET _IO(0x12,123) +#define BLKDISCARDZEROES _IO(0x12,124) +#define BLKSECDISCARD _IO(0x12,125) +#define BLKROTATIONAL _IO(0x12,126) +#define BLKZEROOUT _IO(0x12,127) +/* + * A jump here: 130-131 are reserved for zoned block devices + * (see uapi/linux/blkzoned.h) + */ + +#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ +#define FIBMAP _IO(0x00,1) /* bmap access */ +#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */ +#define FIFREEZE _IOWR('X', 119, int) /* Freeze */ +#define FITHAW _IOWR('X', 120, int) /* Thaw */ +#define FITRIM _IOWR('X', 121, struct fstrim_range) /* Trim */ +#define FICLONE _IOW(0x94, 9, int) +#define FICLONERANGE _IOW(0x94, 13, struct file_clone_range) +#define FIDEDUPERANGE _IOWR(0x94, 54, struct file_dedupe_range) + +#define FSLABEL_MAX 256 /* Max chars for the interface; each fs may differ */ + +#define FS_IOC_GETFLAGS _IOR('f', 1, long) +#define FS_IOC_SETFLAGS _IOW('f', 2, long) +#define FS_IOC_GETVERSION _IOR('v', 1, long) +#define FS_IOC_SETVERSION _IOW('v', 2, long) +#define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap) +#define FS_IOC32_GETFLAGS _IOR('f', 1, int) +#define FS_IOC32_SETFLAGS _IOW('f', 2, int) +#define FS_IOC32_GETVERSION _IOR('v', 1, int) +#define FS_IOC32_SETVERSION _IOW('v', 2, int) +#define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr) +#define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr) +#define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX]) +#define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX]) + +/* + * File system encryption support + */ +/* Policy provided via an ioctl on the topmost directory */ +#define FS_KEY_DESCRIPTOR_SIZE 8 + +#define FS_POLICY_FLAGS_PAD_4 0x00 +#define FS_POLICY_FLAGS_PAD_8 0x01 +#define FS_POLICY_FLAGS_PAD_16 0x02 +#define FS_POLICY_FLAGS_PAD_32 0x03 +#define FS_POLICY_FLAGS_PAD_MASK 0x03 +#define FS_POLICY_FLAGS_VALID 0x03 + +/* Encryption algorithms */ +#define FS_ENCRYPTION_MODE_INVALID 0 +#define FS_ENCRYPTION_MODE_AES_256_XTS 1 +#define FS_ENCRYPTION_MODE_AES_256_GCM 2 +#define FS_ENCRYPTION_MODE_AES_256_CBC 3 +#define FS_ENCRYPTION_MODE_AES_256_CTS 4 +#define FS_ENCRYPTION_MODE_AES_128_CBC 5 +#define FS_ENCRYPTION_MODE_AES_128_CTS 6 +#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */ +#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */ + +struct fscrypt_policy { + __u8 version; + __u8 contents_encryption_mode; + __u8 filenames_encryption_mode; + __u8 flags; + __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; +}; + +#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) +#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) +#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) + +/* Parameters for passing an encryption key into the kernel keyring */ +#define FS_KEY_DESC_PREFIX "fscrypt:" +#define FS_KEY_DESC_PREFIX_SIZE 8 + +/* Structure that userspace passes to the kernel keyring */ +#define FS_MAX_KEY_SIZE 64 + +struct fscrypt_key { + __u32 mode; + __u8 raw[FS_MAX_KEY_SIZE]; + __u32 size; +}; + +/* + * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) + * + * Note: for historical reasons, these flags were originally used and + * defined for use by ext2/ext3, and then other file systems started + * using these flags so they wouldn't need to write their own version + * of chattr/lsattr (which was shipped as part of e2fsprogs). You + * should think twice before trying to use these flags in new + * contexts, or trying to assign these flags, since they are used both + * as the UAPI and the on-disk encoding for ext2/3/4. Also, we are + * almost out of 32-bit flags. :-) + * + * We have recently hoisted FS_IOC_FSGETXATTR / FS_IOC_FSSETXATTR from + * XFS to the generic FS level interface. This uses a structure that + * has padding and hence has more room to grow, so it may be more + * appropriate for many new use cases. + * + * Please do not change these flags or interfaces before checking with + * linux-fsdevel@vger.kernel.org and linux-api@vger.kernel.org. + */ +#define FS_SECRM_FL 0x00000001 /* Secure deletion */ +#define FS_UNRM_FL 0x00000002 /* Undelete */ +#define FS_COMPR_FL 0x00000004 /* Compress file */ +#define FS_SYNC_FL 0x00000008 /* Synchronous updates */ +#define FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ +#define FS_APPEND_FL 0x00000020 /* writes to file may only append */ +#define FS_NODUMP_FL 0x00000040 /* do not dump file */ +#define FS_NOATIME_FL 0x00000080 /* do not update atime */ +/* Reserved for compression usage... */ +#define FS_DIRTY_FL 0x00000100 +#define FS_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */ +#define FS_NOCOMP_FL 0x00000400 /* Don't compress */ +/* End compression flags --- maybe not all used */ +#define FS_ENCRYPT_FL 0x00000800 /* Encrypted file */ +#define FS_BTREE_FL 0x00001000 /* btree format dir */ +#define FS_INDEX_FL 0x00001000 /* hash-indexed directory */ +#define FS_IMAGIC_FL 0x00002000 /* AFS directory */ +#define FS_JOURNAL_DATA_FL 0x00004000 /* Reserved for ext3 */ +#define FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */ +#define FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ +#define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ +#define FS_HUGE_FILE_FL 0x00040000 /* Reserved for ext4 */ +#define FS_EXTENT_FL 0x00080000 /* Extents */ +#define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */ +#define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */ +#define FS_NOCOW_FL 0x00800000 /* Do not cow file */ +#define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */ +#define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ +#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ + +#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ +#define FS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ + + +#define SYNC_FILE_RANGE_WAIT_BEFORE 1 +#define SYNC_FILE_RANGE_WRITE 2 +#define SYNC_FILE_RANGE_WAIT_AFTER 4 + +/* + * Flags for preadv2/pwritev2: + */ + +typedef int __bitwise __kernel_rwf_t; + +/* high priority request, poll if possible */ +#define RWF_HIPRI ((__force __kernel_rwf_t)0x00000001) + +/* per-IO O_DSYNC */ +#define RWF_DSYNC ((__force __kernel_rwf_t)0x00000002) + +/* per-IO O_SYNC */ +#define RWF_SYNC ((__force __kernel_rwf_t)0x00000004) + +/* per-IO, return -EAGAIN if operation would block */ +#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008) + +/* per-IO O_APPEND */ +#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010) + +/* mask of flags supported by the kernel */ +#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\ + RWF_APPEND) + +#endif /* _UAPI_LINUX_FS_H */ diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 58faab897201..1debfa42cba1 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -287,6 +287,7 @@ enum { IFLA_BR_MCAST_STATS_ENABLED, IFLA_BR_MCAST_IGMP_VERSION, IFLA_BR_MCAST_MLD_VERSION, + IFLA_BR_VLAN_STATS_PER_PORT, __IFLA_BR_MAX, }; diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 2875ce85b322..2b7a652c9fa4 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -420,13 +420,19 @@ struct kvm_run { struct kvm_coalesced_mmio_zone { __u64 addr; __u32 size; - __u32 pad; + union { + __u32 pad; + __u32 pio; + }; }; struct kvm_coalesced_mmio { __u64 phys_addr; __u32 len; - __u32 pad; + union { + __u32 pad; + __u32 pio; + }; __u8 data[8]; }; @@ -752,6 +758,15 @@ struct kvm_ppc_resize_hpt { #define KVM_S390_SIE_PAGE_OFFSET 1 /* + * On arm64, machine type can be used to request the physical + * address size for the VM. Bits[7-0] are reserved for the guest + * PA size shift (i.e, log2(PA_Size)). For backward compatibility, + * value 0 implies the default IPA size, 40bits. + */ +#define KVM_VM_TYPE_ARM_IPA_SIZE_MASK 0xffULL +#define KVM_VM_TYPE_ARM_IPA_SIZE(x) \ + ((x) & KVM_VM_TYPE_ARM_IPA_SIZE_MASK) +/* * ioctls for /dev/kvm fds: */ #define KVM_GET_API_VERSION _IO(KVMIO, 0x00) @@ -958,6 +973,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_HYPERV_SEND_IPI 161 #define KVM_CAP_COALESCED_PIO 162 #define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 +#define KVM_CAP_EXCEPTION_PAYLOAD 164 +#define KVM_CAP_ARM_VM_IPA_SIZE 165 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/tools/include/uapi/linux/mman.h b/tools/include/uapi/linux/mman.h index bfd5938fede6..d0f515d53299 100644 --- a/tools/include/uapi/linux/mman.h +++ b/tools/include/uapi/linux/mman.h @@ -28,7 +28,9 @@ #define MAP_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB #define MAP_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB #define MAP_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB +#define MAP_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB #define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB +#define MAP_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB #define MAP_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB #define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB #define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB diff --git a/tools/include/uapi/linux/netlink.h b/tools/include/uapi/linux/netlink.h index 776bc92e9118..486ed1f0c0bc 100644 --- a/tools/include/uapi/linux/netlink.h +++ b/tools/include/uapi/linux/netlink.h @@ -155,6 +155,7 @@ enum nlmsgerr_attrs { #define NETLINK_LIST_MEMBERSHIPS 9 #define NETLINK_CAP_ACK 10 #define NETLINK_EXT_ACK 11 +#define NETLINK_DUMP_STRICT_CHK 12 struct nl_pktinfo { __u32 group; diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index f35eb72739c0..9de8780ac8d9 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -646,10 +646,12 @@ struct perf_event_mmap_page { * * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event + * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal) * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events */ #define PERF_RECORD_MISC_MMAP_DATA (1 << 13) #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) +#define PERF_RECORD_MISC_FORK_EXEC (1 << 13) #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) /* * These PERF_RECORD_MISC_* flags below are safely reused diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h index ed0a120d4f08..404d4b9ffe76 100644 --- a/tools/include/uapi/sound/asound.h +++ b/tools/include/uapi/sound/asound.h @@ -752,7 +752,7 @@ struct snd_timer_info { #define SNDRV_TIMER_PSFLG_EARLY_EVENT (1<<2) /* write early event to the poll queue */ struct snd_timer_params { - unsigned int flags; /* flags - SNDRV_MIXER_PSFLG_* */ + unsigned int flags; /* flags - SNDRV_TIMER_PSFLG_* */ unsigned int ticks; /* requested resolution in ticks */ unsigned int queue_size; /* total size of queue (32-1024) */ unsigned int reserved0; /* reserved, was: failure locations */ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index b607be7236d3..d6e62e90e8d4 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2084,19 +2084,19 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog, prog->expected_attach_type = type; } -#define BPF_PROG_SEC_IMPL(string, ptype, eatype, atype) \ - { string, sizeof(string) - 1, ptype, eatype, atype } +#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \ + { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype } /* Programs that can NOT be attached. */ -#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, -EINVAL) +#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0) /* Programs that can be attached. */ #define BPF_APROG_SEC(string, ptype, atype) \ - BPF_PROG_SEC_IMPL(string, ptype, 0, atype) + BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype) /* Programs that must specify expected attach type at load time. */ #define BPF_EAPROG_SEC(string, ptype, eatype) \ - BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype) + BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype) /* Programs that can be attached but attach type can't be identified by section * name. Kept for backward compatibility. @@ -2108,6 +2108,7 @@ static const struct { size_t len; enum bpf_prog_type prog_type; enum bpf_attach_type expected_attach_type; + int is_attachable; enum bpf_attach_type attach_type; } section_names[] = { BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), @@ -2198,7 +2199,7 @@ int libbpf_attach_type_by_name(const char *name, for (i = 0; i < ARRAY_SIZE(section_names); i++) { if (strncmp(name, section_names[i].sec, section_names[i].len)) continue; - if (section_names[i].attach_type == -EINVAL) + if (!section_names[i].is_attachable) return -EINVAL; *attach_type = section_names[i].attach_type; return 0; diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c index cb7154eccbdc..dbb9efbf718a 100644 --- a/tools/lib/subcmd/parse-options.c +++ b/tools/lib/subcmd/parse-options.c @@ -116,6 +116,7 @@ static int get_value(struct parse_opt_ctx_t *p, case OPTION_INTEGER: case OPTION_UINTEGER: case OPTION_LONG: + case OPTION_ULONG: case OPTION_U64: default: break; @@ -166,6 +167,7 @@ static int get_value(struct parse_opt_ctx_t *p, case OPTION_INTEGER: case OPTION_UINTEGER: case OPTION_LONG: + case OPTION_ULONG: case OPTION_U64: default: break; @@ -295,6 +297,22 @@ static int get_value(struct parse_opt_ctx_t *p, return opterror(opt, "expects a numerical value", flags); return 0; + case OPTION_ULONG: + if (unset) { + *(unsigned long *)opt->value = 0; + return 0; + } + if (opt->flags & PARSE_OPT_OPTARG && !p->opt) { + *(unsigned long *)opt->value = opt->defval; + return 0; + } + if (get_arg(p, opt, flags, &arg)) + return -1; + *(unsigned long *)opt->value = strtoul(arg, (char **)&s, 10); + if (*s) + return opterror(opt, "expects a numerical value", flags); + return 0; + case OPTION_U64: if (unset) { *(u64 *)opt->value = 0; @@ -703,6 +721,7 @@ static void print_option_help(const struct option *opts, int full) case OPTION_ARGUMENT: break; case OPTION_LONG: + case OPTION_ULONG: case OPTION_U64: case OPTION_INTEGER: case OPTION_UINTEGER: diff --git a/tools/lib/subcmd/parse-options.h b/tools/lib/subcmd/parse-options.h index 92fdbe1519f6..6ca2a8bfe716 100644 --- a/tools/lib/subcmd/parse-options.h +++ b/tools/lib/subcmd/parse-options.h @@ -25,6 +25,7 @@ enum parse_opt_type { OPTION_STRING, OPTION_INTEGER, OPTION_LONG, + OPTION_ULONG, OPTION_CALLBACK, OPTION_U64, OPTION_UINTEGER, @@ -133,6 +134,7 @@ struct option { #define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) } #define OPT_UINTEGER(s, l, v, h) { .type = OPTION_UINTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h) } #define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) } +#define OPT_ULONG(s, l, v, h) { .type = OPTION_ULONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned long *), .help = (h) } #define OPT_U64(s, l, v, h) { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) } #define OPT_STRING(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), .argh = (a), .help = (h) } #define OPT_STRING_OPTARG(s, l, v, a, h, d) \ diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 2928939b98ec..0414a0d52262 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -836,7 +836,7 @@ static int add_switch_table(struct objtool_file *file, struct instruction *insn, struct symbol *pfunc = insn->func->pfunc; unsigned int prev_offset = 0; - list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) { + list_for_each_entry_from(rela, &table->rela_sec->rela_list, list) { if (rela == next_table) break; @@ -926,6 +926,7 @@ static struct rela *find_switch_table(struct objtool_file *file, { struct rela *text_rela, *rodata_rela; struct instruction *orig_insn = insn; + struct section *rodata_sec; unsigned long table_offset; /* @@ -953,10 +954,13 @@ static struct rela *find_switch_table(struct objtool_file *file, /* look for a relocation which references .rodata */ text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len); - if (!text_rela || text_rela->sym != file->rodata->sym) + if (!text_rela || text_rela->sym->type != STT_SECTION || + !text_rela->sym->sec->rodata) continue; table_offset = text_rela->addend; + rodata_sec = text_rela->sym->sec; + if (text_rela->type == R_X86_64_PC32) table_offset += 4; @@ -964,10 +968,10 @@ static struct rela *find_switch_table(struct objtool_file *file, * Make sure the .rodata address isn't associated with a * symbol. gcc jump tables are anonymous data. */ - if (find_symbol_containing(file->rodata, table_offset)) + if (find_symbol_containing(rodata_sec, table_offset)) continue; - rodata_rela = find_rela_by_dest(file->rodata, table_offset); + rodata_rela = find_rela_by_dest(rodata_sec, table_offset); if (rodata_rela) { /* * Use of RIP-relative switch jumps is quite rare, and @@ -1052,7 +1056,7 @@ static int add_switch_table_alts(struct objtool_file *file) struct symbol *func; int ret; - if (!file->rodata || !file->rodata->rela) + if (!file->rodata) return 0; for_each_sec(file, sec) { @@ -1198,10 +1202,33 @@ static int read_retpoline_hints(struct objtool_file *file) return 0; } +static void mark_rodata(struct objtool_file *file) +{ + struct section *sec; + bool found = false; + + /* + * This searches for the .rodata section or multiple .rodata.func_name + * sections if -fdata-sections is being used. The .str.1.1 and .str.1.8 + * rodata sections are ignored as they don't contain jump tables. + */ + for_each_sec(file, sec) { + if (!strncmp(sec->name, ".rodata", 7) && + !strstr(sec->name, ".str1.")) { + sec->rodata = true; + found = true; + } + } + + file->rodata = found; +} + static int decode_sections(struct objtool_file *file) { int ret; + mark_rodata(file); + ret = decode_instructions(file); if (ret) return ret; @@ -2171,7 +2198,6 @@ int check(const char *_objname, bool orc) INIT_LIST_HEAD(&file.insn_list); hash_init(file.insn_hash); file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard"); - file.rodata = find_section_by_name(file.elf, ".rodata"); file.c_file = find_section_by_name(file.elf, ".comment"); file.ignore_unreachables = no_unreachable; file.hints = false; diff --git a/tools/objtool/check.h b/tools/objtool/check.h index 95700a2bcb7c..e6e8a655b556 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -60,8 +60,8 @@ struct objtool_file { struct elf *elf; struct list_head insn_list; DECLARE_HASHTABLE(insn_hash, 16); - struct section *rodata, *whitelist; - bool ignore_unreachables, c_file, hints; + struct section *whitelist; + bool ignore_unreachables, c_file, hints, rodata; }; int check(const char *objname, bool orc); diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 7ec85d567598..6dbb9fae0f9d 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -301,7 +301,7 @@ static int read_symbols(struct elf *elf) if (sym->type != STT_FUNC) continue; sym->pfunc = sym->cfunc = sym; - coldstr = strstr(sym->name, ".cold."); + coldstr = strstr(sym->name, ".cold"); if (!coldstr) continue; @@ -379,6 +379,7 @@ static int read_relas(struct elf *elf) rela->offset = rela->rela.r_offset; symndx = GELF_R_SYM(rela->rela.r_info); rela->sym = find_symbol_by_index(elf, symndx); + rela->rela_sec = sec; if (!rela->sym) { WARN("can't find rela entry symbol %d for %s", symndx, sec->name); diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index de5cd2ddded9..bc97ed86b9cd 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -48,7 +48,7 @@ struct section { char *name; int idx; unsigned int len; - bool changed, text; + bool changed, text, rodata; }; struct symbol { @@ -68,6 +68,7 @@ struct rela { struct list_head list; struct hlist_node hash; GElf_Rela rela; + struct section *rela_sec; struct symbol *sym; unsigned int type; unsigned long offset; diff --git a/tools/perf/Documentation/build-xed.txt b/tools/perf/Documentation/build-xed.txt new file mode 100644 index 000000000000..6222c1e7231f --- /dev/null +++ b/tools/perf/Documentation/build-xed.txt @@ -0,0 +1,19 @@ + +For --xed the xed tool is needed. Here is how to install it: + + $ git clone https://github.com/intelxed/mbuild.git mbuild + $ git clone https://github.com/intelxed/xed + $ cd xed + $ ./mfile.py --share + $ ./mfile.py examples + $ sudo ./mfile.py --prefix=/usr/local install + $ sudo ldconfig + $ sudo cp obj/examples/xed /usr/local/bin + +Basic xed testing: + + $ xed | head -3 + ERROR: required argument(s) were missing + Copyright (C) 2017, Intel Corporation. All rights reserved. + XED version: [v10.0-328-g7d62c8c49b7b] + $ diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt index 76971d2e4164..115eaacc455f 100644 --- a/tools/perf/Documentation/intel-pt.txt +++ b/tools/perf/Documentation/intel-pt.txt @@ -106,7 +106,7 @@ in transaction, respectively. While it is possible to create scripts to analyze the data, an alternative approach is available to export the data to a sqlite or postgresql database. Refer to script export-to-sqlite.py or export-to-postgresql.py for more details, -and to script call-graph-from-sql.py for an example of using the database. +and to script exported-sql-viewer.py for an example of using the database. There is also script intel-pt-events.py which provides an example of how to unpack the raw data for power events and PTWRITE. diff --git a/tools/perf/Documentation/itrace.txt b/tools/perf/Documentation/itrace.txt index a3abe04c779d..c2182cbabde3 100644 --- a/tools/perf/Documentation/itrace.txt +++ b/tools/perf/Documentation/itrace.txt @@ -11,10 +11,11 @@ l synthesize last branch entries (use with i or x) s skip initial number of events - The default is all events i.e. the same as --itrace=ibxwpe + The default is all events i.e. the same as --itrace=ibxwpe, + except for perf script where it is --itrace=ce - In addition, the period (default 100000) for instructions events - can be specified in units of: + In addition, the period (default 100000, except for perf script where it is 1) + for instructions events can be specified in units of: i instructions t ticks diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index afdafe2110a1..a2b37ce48094 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -383,6 +383,24 @@ include::itrace.txt[] will be printed. Each entry has function name and file/line. Enabled by default, disable with --no-inline. +--insn-trace:: + Show instruction stream for intel_pt traces. Combine with --xed to + show disassembly. + +--xed:: + Run xed disassembler on output. Requires installing the xed disassembler. + +--call-trace:: + Show call stream for intel_pt traces. The CPUs are interleaved, but + can be filtered with -C. + +--call-ret-trace:: + Show call and return stream for intel_pt traces. + +--graph-function:: + For itrace only show specified functions and their callees for + itrace. Multiple functions can be separated by comma. + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-script-perl[1], diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt index 114fda12aa49..808b664343c9 100644 --- a/tools/perf/Documentation/perf-top.txt +++ b/tools/perf/Documentation/perf-top.txt @@ -242,6 +242,16 @@ Default is to monitor all CPUS. --hierarchy:: Enable hierarchy output. +--overwrite:: + Enable this to use just the most recent records, which helps in high core count + machines such as Knights Landing/Mill, but right now is disabled by default as + the pausing used in this technique is leading to loss of metadata events such + as PERF_RECORD_MMAP which makes 'perf top' unable to resolve samples, leading + to lots of unknown samples appearing on the UI. Enable this if you are in such + machines and profiling a workload that doesn't creates short lived threads and/or + doesn't uses many executable mmap operations. Work is being planed to solve + this situation, till then, this will remain disabled by default. + --force:: Don't do ownership validation. diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt index 115db9e06ecd..e113450503d2 100644 --- a/tools/perf/Documentation/perf-trace.txt +++ b/tools/perf/Documentation/perf-trace.txt @@ -171,6 +171,11 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs. --kernel-syscall-graph:: Show the kernel callchains on the syscall exit path. +--max-events=N:: + Stop after processing N events. Note that strace-like events are considered + only at exit time or when a syscall is interrupted, i.e. in those cases this + option is equivalent to the number of lines printed. + --max-stack:: Set the stack depth limit when parsing the callchain, anything beyond the specified depth will be ignored. Note that at this point @@ -238,6 +243,68 @@ Trace syscalls, major and minor pagefaults: As you can see, there was major pagefault in python process, from CRYPTO_push_info_ routine which faulted somewhere in libcrypto.so. +Trace the first 4 open, openat or open_by_handle_at syscalls (in the future more syscalls may match here): + + $ perf trace -e open* --max-events 4 + [root@jouet perf]# trace -e open* --max-events 4 + 2272.992 ( 0.037 ms): gnome-shell/1370 openat(dfd: CWD, filename: /proc/self/stat) = 31 + 2277.481 ( 0.139 ms): gnome-shell/3039 openat(dfd: CWD, filename: /proc/self/stat) = 65 + 3026.398 ( 0.076 ms): gnome-shell/3039 openat(dfd: CWD, filename: /proc/self/stat) = 65 + 4294.665 ( 0.015 ms): sed/15879 openat(dfd: CWD, filename: /etc/ld.so.cache, flags: CLOEXEC) = 3 + $ + +Trace the first minor page fault when running a workload: + + # perf trace -F min --max-stack=7 --max-events 1 sleep 1 + 0.000 ( 0.000 ms): sleep/18006 minfault [__clear_user+0x1a] => 0x5626efa56080 (?k) + __clear_user ([kernel.kallsyms]) + load_elf_binary ([kernel.kallsyms]) + search_binary_handler ([kernel.kallsyms]) + __do_execve_file.isra.33 ([kernel.kallsyms]) + __x64_sys_execve ([kernel.kallsyms]) + do_syscall_64 ([kernel.kallsyms]) + entry_SYSCALL_64 ([kernel.kallsyms]) + # + +Trace the next min page page fault to take place on the first CPU: + + # perf trace -F min --call-graph=dwarf --max-events 1 --cpu 0 + 0.000 ( 0.000 ms): Web Content/17136 minfault [js::gc::Chunk::fetchNextDecommittedArena+0x4b] => 0x7fbe6181b000 (?.) + js::gc::FreeSpan::initAsEmpty (inlined) + js::gc::Arena::setAsNotAllocated (inlined) + js::gc::Chunk::fetchNextDecommittedArena (/usr/lib64/firefox/libxul.so) + js::gc::Chunk::allocateArena (/usr/lib64/firefox/libxul.so) + js::gc::GCRuntime::allocateArena (/usr/lib64/firefox/libxul.so) + js::gc::ArenaLists::allocateFromArena (/usr/lib64/firefox/libxul.so) + js::gc::GCRuntime::tryNewTenuredThing<JSString, (js::AllowGC)1> (inlined) + js::AllocateString<JSString, (js::AllowGC)1> (/usr/lib64/firefox/libxul.so) + js::Allocate<JSThinInlineString, (js::AllowGC)1> (inlined) + JSThinInlineString::new_<(js::AllowGC)1> (inlined) + AllocateInlineString<(js::AllowGC)1, unsigned char> (inlined) + js::ConcatStrings<(js::AllowGC)1> (/usr/lib64/firefox/libxul.so) + [0x18b26e6bc2bd] (/tmp/perf-17136.map) + # + +Trace the next two sched:sched_switch events, four block:*_plug events, the +next block:*_unplug and the next three net:*dev_queue events, this last one +with a backtrace of at most 16 entries, system wide: + + # perf trace -e sched:*switch/nr=2/,block:*_plug/nr=4/,block:*_unplug/nr=1/,net:*dev_queue/nr=3,max-stack=16/ + 0.000 :0/0 sched:sched_switch:swapper/2:0 [120] S ==> rcu_sched:10 [120] + 0.015 rcu_sched/10 sched:sched_switch:rcu_sched:10 [120] R ==> swapper/2:0 [120] + 254.198 irq/50-iwlwifi/680 net:net_dev_queue:dev=wlp3s0 skbaddr=0xffff93498051f600 len=66 + __dev_queue_xmit ([kernel.kallsyms]) + 273.977 :0/0 net:net_dev_queue:dev=wlp3s0 skbaddr=0xffff93498051f600 len=78 + __dev_queue_xmit ([kernel.kallsyms]) + 274.007 :0/0 net:net_dev_queue:dev=wlp3s0 skbaddr=0xffff93498051ff00 len=78 + __dev_queue_xmit ([kernel.kallsyms]) + 2930.140 kworker/u16:58/2722 block:block_plug:[kworker/u16:58] + 2930.162 kworker/u16:58/2722 block:block_unplug:[kworker/u16:58] 1 + 4466.094 jbd2/dm-2-8/748 block:block_plug:[jbd2/dm-2-8] + 8050.123 kworker/u16:30/2694 block:block_plug:[kworker/u16:30] + 8050.271 kworker/u16:30/2694 block:block_plug:[kworker/u16:30] + # + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-script[1] diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 2f3bf025e305..3ccb4f0bf088 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -1,4 +1,5 @@ include ../scripts/Makefile.include +include ../scripts/Makefile.arch # The default target of this Makefile is... all: @@ -385,6 +386,8 @@ export INSTALL SHELL_PATH SHELL = $(SHELL_PATH) linux_uapi_dir := $(srctree)/tools/include/uapi/linux +asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic +arch_asm_uapi_dir := $(srctree)/tools/arch/$(ARCH)/include/uapi/asm/ beauty_outdir := $(OUTPUT)trace/beauty/generated beauty_ioctl_outdir := $(beauty_outdir)/ioctl @@ -460,6 +463,18 @@ madvise_behavior_tbl := $(srctree)/tools/perf/trace/beauty/madvise_behavior.sh $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_tbl) $(Q)$(SHELL) '$(madvise_behavior_tbl)' $(madvise_hdr_dir) > $@ +mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c +mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh + +$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(arch_asm_uapi_dir)/mman.h $(mmap_flags_tbl) + $(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@ + +mount_flags_array := $(beauty_outdir)/mount_flags_array.c +mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh + +$(mount_flags_array): $(linux_uapi_dir)/fs.h $(mount_flags_tbl) + $(Q)$(SHELL) '$(mount_flags_tbl)' $(linux_uapi_dir) > $@ + prctl_option_array := $(beauty_outdir)/prctl_option_array.c prctl_hdr_dir := $(srctree)/tools/include/uapi/linux/ prctl_option_tbl := $(srctree)/tools/perf/trace/beauty/prctl_option.sh @@ -577,6 +592,8 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc $(socket_ipproto_array) \ $(vhost_virtio_ioctl_array) \ $(madvise_behavior_array) \ + $(mmap_flags_array) \ + $(mount_flags_array) \ $(perf_ioctl_array) \ $(prctl_option_array) \ $(arch_errno_name_array) @@ -863,6 +880,8 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea $(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \ $(OUTPUT)pmu-events/pmu-events.c \ $(OUTPUT)$(madvise_behavior_array) \ + $(OUTPUT)$(mmap_flags_array) \ + $(OUTPUT)$(mount_flags_array) \ $(OUTPUT)$(drm_ioctl_array) \ $(OUTPUT)$(pkey_alloc_access_rights_array) \ $(OUTPUT)$(sndrv_ctl_ioctl_array) \ diff --git a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl index 2dbb8cade048..c88fd32563eb 100755 --- a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl +++ b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl @@ -23,7 +23,7 @@ create_table_from_c() { local sc nr last_sc - create_table_exe=`mktemp /tmp/create-table-XXXXXX` + create_table_exe=`mktemp ${TMPDIR:-/tmp}/create-table-XXXXXX` { diff --git a/tools/perf/arch/sparc/Makefile b/tools/perf/arch/sparc/Makefile index 7fbca175099e..275dea7ff59a 100644 --- a/tools/perf/arch/sparc/Makefile +++ b/tools/perf/arch/sparc/Makefile @@ -1,3 +1,5 @@ ifndef NO_DWARF PERF_HAVE_DWARF_REGS := 1 endif + +PERF_HAVE_JITDUMP := 1 diff --git a/tools/perf/arch/sparc/annotate/instructions.c b/tools/perf/arch/sparc/annotate/instructions.c new file mode 100644 index 000000000000..2614c010c235 --- /dev/null +++ b/tools/perf/arch/sparc/annotate/instructions.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 + +static int is_branch_cond(const char *cond) +{ + if (cond[0] == '\0') + return 1; + + if (cond[0] == 'a' && cond[1] == '\0') + return 1; + + if (cond[0] == 'c' && + (cond[1] == 'c' || cond[1] == 's') && + cond[2] == '\0') + return 1; + + if (cond[0] == 'e' && + (cond[1] == '\0' || + (cond[1] == 'q' && cond[2] == '\0'))) + return 1; + + if (cond[0] == 'g' && + (cond[1] == '\0' || + (cond[1] == 't' && cond[2] == '\0') || + (cond[1] == 'e' && cond[2] == '\0') || + (cond[1] == 'e' && cond[2] == 'u' && cond[3] == '\0'))) + return 1; + + if (cond[0] == 'l' && + (cond[1] == '\0' || + (cond[1] == 't' && cond[2] == '\0') || + (cond[1] == 'u' && cond[2] == '\0') || + (cond[1] == 'e' && cond[2] == '\0') || + (cond[1] == 'e' && cond[2] == 'u' && cond[3] == '\0'))) + return 1; + + if (cond[0] == 'n' && + (cond[1] == '\0' || + (cond[1] == 'e' && cond[2] == '\0') || + (cond[1] == 'z' && cond[2] == '\0') || + (cond[1] == 'e' && cond[2] == 'g' && cond[3] == '\0'))) + return 1; + + if (cond[0] == 'b' && + cond[1] == 'p' && + cond[2] == 'o' && + cond[3] == 's' && + cond[4] == '\0') + return 1; + + if (cond[0] == 'v' && + (cond[1] == 'c' || cond[1] == 's') && + cond[2] == '\0') + return 1; + + if (cond[0] == 'b' && + cond[1] == 'z' && + cond[2] == '\0') + return 1; + + return 0; +} + +static int is_branch_reg_cond(const char *cond) +{ + if ((cond[0] == 'n' || cond[0] == 'l') && + cond[1] == 'z' && + cond[2] == '\0') + return 1; + + if (cond[0] == 'z' && + cond[1] == '\0') + return 1; + + if ((cond[0] == 'g' || cond[0] == 'l') && + cond[1] == 'e' && + cond[2] == 'z' && + cond[3] == '\0') + return 1; + + if (cond[0] == 'g' && + cond[1] == 'z' && + cond[2] == '\0') + return 1; + + return 0; +} + +static int is_branch_float_cond(const char *cond) +{ + if (cond[0] == '\0') + return 1; + + if ((cond[0] == 'a' || cond[0] == 'e' || + cond[0] == 'z' || cond[0] == 'g' || + cond[0] == 'l' || cond[0] == 'n' || + cond[0] == 'o' || cond[0] == 'u') && + cond[1] == '\0') + return 1; + + if (((cond[0] == 'g' && cond[1] == 'e') || + (cond[0] == 'l' && (cond[1] == 'e' || + cond[1] == 'g')) || + (cond[0] == 'n' && (cond[1] == 'e' || + cond[1] == 'z')) || + (cond[0] == 'u' && (cond[1] == 'e' || + cond[1] == 'g' || + cond[1] == 'l'))) && + cond[2] == '\0') + return 1; + + if (cond[0] == 'u' && + (cond[1] == 'g' || cond[1] == 'l') && + cond[2] == 'e' && + cond[3] == '\0') + return 1; + + return 0; +} + +static struct ins_ops *sparc__associate_instruction_ops(struct arch *arch, const char *name) +{ + struct ins_ops *ops = NULL; + + if (!strcmp(name, "call") || + !strcmp(name, "jmp") || + !strcmp(name, "jmpl")) { + ops = &call_ops; + } else if (!strcmp(name, "ret") || + !strcmp(name, "retl") || + !strcmp(name, "return")) { + ops = &ret_ops; + } else if (!strcmp(name, "mov")) { + ops = &mov_ops; + } else { + if (name[0] == 'c' && + (name[1] == 'w' || name[1] == 'x')) + name += 2; + + if (name[0] == 'b') { + const char *cond = name + 1; + + if (cond[0] == 'r') { + if (is_branch_reg_cond(cond + 1)) + ops = &jump_ops; + } else if (is_branch_cond(cond)) { + ops = &jump_ops; + } + } else if (name[0] == 'f' && name[1] == 'b') { + if (is_branch_float_cond(name + 2)) + ops = &jump_ops; + } + } + + if (ops) + arch__associate_ins_ops(arch, name, ops); + + return ops; +} + +static int sparc__annotate_init(struct arch *arch, char *cpuid __maybe_unused) +{ + if (!arch->initialized) { + arch->initialized = true; + arch->associate_instruction_ops = sparc__associate_instruction_ops; + arch->objdump.comment_char = '#'; + } + + return 0; +} diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 0980dfe3396b..10cf889c6d75 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -592,6 +592,9 @@ static void record__init_features(struct record *rec) if (!rec->opts.full_auxtrace) perf_header__clear_feat(&session->header, HEADER_AUXTRACE); + if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns)) + perf_header__clear_feat(&session->header, HEADER_CLOCKID); + perf_header__clear_feat(&session->header, HEADER_STAT); } @@ -897,6 +900,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) record__init_features(rec); + if (rec->opts.use_clockid && rec->opts.clockid_res_ns) + session->header.env.clockid_res_ns = rec->opts.clockid_res_ns; + if (forks) { err = perf_evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe, @@ -1337,6 +1343,19 @@ static const struct clockid_map clockids[] = { CLOCKID_END, }; +static int get_clockid_res(clockid_t clk_id, u64 *res_ns) +{ + struct timespec res; + + *res_ns = 0; + if (!clock_getres(clk_id, &res)) + *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC; + else + pr_warning("WARNING: Failed to determine specified clock resolution.\n"); + + return 0; +} + static int parse_clockid(const struct option *opt, const char *str, int unset) { struct record_opts *opts = (struct record_opts *)opt->value; @@ -1360,7 +1379,7 @@ static int parse_clockid(const struct option *opt, const char *str, int unset) /* if its a number, we're done */ if (sscanf(str, "%d", &opts->clockid) == 1) - return 0; + return get_clockid_res(opts->clockid, &opts->clockid_res_ns); /* allow a "CLOCK_" prefix to the name */ if (!strncasecmp(str, "CLOCK_", 6)) @@ -1369,7 +1388,8 @@ static int parse_clockid(const struct option *opt, const char *str, int unset) for (cm = clockids; cm->name; cm++) { if (!strcasecmp(str, cm->name)) { opts->clockid = cm->clockid; - return 0; + return get_clockid_res(opts->clockid, + &opts->clockid_res_ns); } } diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 4da5e32b9e03..b5bc85bd0bbe 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -44,6 +44,7 @@ #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> +#include <subcmd/pager.h> #include "sane_ctype.h" @@ -912,7 +913,7 @@ static int grab_bb(u8 *buffer, u64 start, u64 end, static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en, struct perf_insn *x, u8 *inbuf, int len, - int insn, FILE *fp) + int insn, FILE *fp, int *total_cycles) { int printed = fprintf(fp, "\t%016" PRIx64 "\t%-30s\t#%s%s%s%s", ip, dump_insn(x, ip, inbuf, len, NULL), @@ -921,7 +922,8 @@ static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en, en->flags.in_tx ? " INTX" : "", en->flags.abort ? " ABORT" : ""); if (en->flags.cycles) { - printed += fprintf(fp, " %d cycles", en->flags.cycles); + *total_cycles += en->flags.cycles; + printed += fprintf(fp, " %d cycles [%d]", en->flags.cycles, *total_cycles); if (insn) printed += fprintf(fp, " %.2f IPC", (float)insn / en->flags.cycles); } @@ -978,6 +980,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, u8 buffer[MAXBB]; unsigned off; struct symbol *lastsym = NULL; + int total_cycles = 0; if (!(br && br->nr)) return 0; @@ -998,7 +1001,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, printed += ip__fprintf_sym(br->entries[nr - 1].from, thread, x.cpumode, x.cpu, &lastsym, attr, fp); printed += ip__fprintf_jump(br->entries[nr - 1].from, &br->entries[nr - 1], - &x, buffer, len, 0, fp); + &x, buffer, len, 0, fp, &total_cycles); } /* Print all blocks */ @@ -1026,7 +1029,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp); if (ip == end) { - printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp); + printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp, + &total_cycles); break; } else { printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip, @@ -1104,6 +1108,35 @@ out: return printed; } +static const char *resolve_branch_sym(struct perf_sample *sample, + struct perf_evsel *evsel, + struct thread *thread, + struct addr_location *al, + u64 *ip) +{ + struct addr_location addr_al; + struct perf_event_attr *attr = &evsel->attr; + const char *name = NULL; + + if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) { + if (sample_addr_correlates_sym(attr)) { + thread__resolve(thread, &addr_al, sample); + if (addr_al.sym) + name = addr_al.sym->name; + else + *ip = sample->addr; + } else { + *ip = sample->addr; + } + } else if (sample->flags & (PERF_IP_FLAG_RETURN | PERF_IP_FLAG_TRACE_END)) { + if (al->sym) + name = al->sym->name; + else + *ip = sample->ip; + } + return name; +} + static int perf_sample__fprintf_callindent(struct perf_sample *sample, struct perf_evsel *evsel, struct thread *thread, @@ -1111,7 +1144,6 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample, { struct perf_event_attr *attr = &evsel->attr; size_t depth = thread_stack__depth(thread); - struct addr_location addr_al; const char *name = NULL; static int spacing; int len = 0; @@ -1125,22 +1157,7 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample, if (thread->ts && sample->flags & PERF_IP_FLAG_RETURN) depth += 1; - if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) { - if (sample_addr_correlates_sym(attr)) { - thread__resolve(thread, &addr_al, sample); - if (addr_al.sym) - name = addr_al.sym->name; - else - ip = sample->addr; - } else { - ip = sample->addr; - } - } else if (sample->flags & (PERF_IP_FLAG_RETURN | PERF_IP_FLAG_TRACE_END)) { - if (al->sym) - name = al->sym->name; - else - ip = sample->ip; - } + name = resolve_branch_sym(sample, evsel, thread, al, &ip); if (PRINT_FIELD(DSO) && !(PRINT_FIELD(IP) || PRINT_FIELD(ADDR))) { dlen += fprintf(fp, "("); @@ -1646,6 +1663,47 @@ static void perf_sample__fprint_metric(struct perf_script *script, } } +static bool show_event(struct perf_sample *sample, + struct perf_evsel *evsel, + struct thread *thread, + struct addr_location *al) +{ + int depth = thread_stack__depth(thread); + + if (!symbol_conf.graph_function) + return true; + + if (thread->filter) { + if (depth <= thread->filter_entry_depth) { + thread->filter = false; + return false; + } + return true; + } else { + const char *s = symbol_conf.graph_function; + u64 ip; + const char *name = resolve_branch_sym(sample, evsel, thread, al, + &ip); + unsigned nlen; + + if (!name) + return false; + nlen = strlen(name); + while (*s) { + unsigned len = strcspn(s, ","); + if (nlen == len && !strncmp(name, s, len)) { + thread->filter = true; + thread->filter_entry_depth = depth; + return true; + } + s += len; + if (*s == ',') + s++; + } + return false; + } +} + static void process_event(struct perf_script *script, struct perf_sample *sample, struct perf_evsel *evsel, struct addr_location *al, @@ -1660,6 +1718,9 @@ static void process_event(struct perf_script *script, if (output[type].fields == 0) return; + if (!show_event(sample, evsel, thread, al)) + return; + ++es->samples; perf_sample__fprintf_start(sample, thread, evsel, @@ -1737,6 +1798,9 @@ static void process_event(struct perf_script *script, if (PRINT_FIELD(METRIC)) perf_sample__fprint_metric(script, thread, evsel, sample, fp); + + if (verbose) + fflush(fp); } static struct scripting_ops *scripting_ops; @@ -3100,6 +3164,44 @@ static int perf_script__process_auxtrace_info(struct perf_session *session, #define perf_script__process_auxtrace_info 0 #endif +static int parse_insn_trace(const struct option *opt __maybe_unused, + const char *str __maybe_unused, + int unset __maybe_unused) +{ + parse_output_fields(NULL, "+insn,-event,-period", 0); + itrace_parse_synth_opts(opt, "i0ns", 0); + nanosecs = true; + return 0; +} + +static int parse_xed(const struct option *opt __maybe_unused, + const char *str __maybe_unused, + int unset __maybe_unused) +{ + force_pager("xed -F insn: -A -64 | less"); + return 0; +} + +static int parse_call_trace(const struct option *opt __maybe_unused, + const char *str __maybe_unused, + int unset __maybe_unused) +{ + parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0); + itrace_parse_synth_opts(opt, "cewp", 0); + nanosecs = true; + return 0; +} + +static int parse_callret_trace(const struct option *opt __maybe_unused, + const char *str __maybe_unused, + int unset __maybe_unused) +{ + parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0); + itrace_parse_synth_opts(opt, "crewp", 0); + nanosecs = true; + return 0; +} + int cmd_script(int argc, const char **argv) { bool show_full_info = false; @@ -3109,7 +3211,10 @@ int cmd_script(int argc, const char **argv) char *rec_script_path = NULL; char *rep_script_path = NULL; struct perf_session *session; - struct itrace_synth_opts itrace_synth_opts = { .set = false, }; + struct itrace_synth_opts itrace_synth_opts = { + .set = false, + .default_no_sample = true, + }; char *script_path = NULL; const char **__argv; int i, j, err = 0; @@ -3184,6 +3289,16 @@ int cmd_script(int argc, const char **argv) "system-wide collection from all CPUs"), OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", "only consider these symbols"), + OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, NULL, + "Decode instructions from itrace", parse_insn_trace), + OPT_CALLBACK_OPTARG(0, "xed", NULL, NULL, NULL, + "Run xed disassembler on output", parse_xed), + OPT_CALLBACK_OPTARG(0, "call-trace", &itrace_synth_opts, NULL, NULL, + "Decode calls from from itrace", parse_call_trace), + OPT_CALLBACK_OPTARG(0, "call-ret-trace", &itrace_synth_opts, NULL, NULL, + "Decode calls and returns from itrace", parse_callret_trace), + OPT_STRING(0, "graph-function", &symbol_conf.graph_function, "symbol[,symbol...]", + "Only print symbols and callees with --call-trace/--call-ret-trace"), OPT_STRING(0, "stop-bt", &symbol_conf.bt_stop_list_str, "symbol[,symbol...]", "Stop display of callgraph at these symbols"), OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"), @@ -3417,8 +3532,10 @@ int cmd_script(int argc, const char **argv) exit(-1); } - if (!script_name) + if (!script_name) { setup_pager(); + use_browser = 0; + } session = perf_session__new(&data, false, &script.tool); if (session == NULL) @@ -3439,7 +3556,8 @@ int cmd_script(int argc, const char **argv) script.session = session; script__setup_sample_type(&script); - if (output[PERF_TYPE_HARDWARE].fields & PERF_OUTPUT_CALLINDENT) + if ((output[PERF_TYPE_HARDWARE].fields & PERF_OUTPUT_CALLINDENT) || + symbol_conf.graph_function) itrace_synth_opts.thread_stack = true; session->itrace_synth_opts = &itrace_synth_opts; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index b86aba1c8028..d1028d7755bb 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -409,6 +409,28 @@ static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel) return leader; } +static bool is_target_alive(struct target *_target, + struct thread_map *threads) +{ + struct stat st; + int i; + + if (!target__has_task(_target)) + return true; + + for (i = 0; i < threads->nr; i++) { + char path[PATH_MAX]; + + scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(), + threads->map[i].pid); + + if (!stat(path, &st)) + return true; + } + + return false; +} + static int __run_perf_stat(int argc, const char **argv, int run_idx) { int interval = stat_config.interval; @@ -579,6 +601,8 @@ try_again: enable_counters(); while (!done) { nanosleep(&ts, NULL); + if (!is_target_alive(&target, evsel_list->threads)) + break; if (timeout) break; if (interval) { diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index d21d8751e749..b2838de13de0 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1134,11 +1134,6 @@ static int __cmd_top(struct perf_top *top) if (!target__none(&opts->target)) perf_evlist__enable(top->evlist); - /* Wait for a minimal set of events before starting the snapshot */ - perf_evlist__poll(top->evlist, 100); - - perf_top__mmap_read(top); - ret = -1; if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui : display_thread), top)) { @@ -1156,6 +1151,11 @@ static int __cmd_top(struct perf_top *top) } } + /* Wait for a minimal set of events before starting the snapshot */ + perf_evlist__poll(top->evlist, 100); + + perf_top__mmap_read(top); + while (!done) { u64 hits = top->samples; @@ -1257,7 +1257,14 @@ int cmd_top(int argc, const char **argv) .uses_mmap = true, }, .proc_map_timeout = 500, - .overwrite = 1, + /* + * FIXME: This will lose PERF_RECORD_MMAP and other metadata + * when we pause, fix that and reenable. Probably using a + * separate evlist with a dummy event, i.e. a non-overwrite + * ring buffer just for metadata events, while PERF_RECORD_SAMPLE + * stays in overwrite mode. -acme + * */ + .overwrite = 0, }, .max_stack = sysctl__max_stack(), .annotation_opts = annotation__default_options, @@ -1372,6 +1379,8 @@ int cmd_top(int argc, const char **argv) "Show raw trace event output (do not use print fmt or plugins)"), OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy, "Show entries in a hierarchy"), + OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite, + "Use a backward ring buffer, default: no"), OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"), OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize, "number of thread to run event synthesize"), diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 90289f31dd87..dc8a6c4986ce 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -89,6 +89,8 @@ struct trace { u64 base_time; FILE *output; unsigned long nr_events; + unsigned long nr_events_printed; + unsigned long max_events; struct strlist *ev_qualifier; struct { size_t nr; @@ -612,6 +614,7 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size, struct syscall_arg_fmt { size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg); + unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val); void *parm; const char *name; bool show_zero; @@ -723,6 +726,10 @@ static struct syscall_fmt { .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ }, [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ }, }, }, + { .name = "mount", + .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ }, + [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */ + .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, }, { .name = "mprotect", .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, }, @@ -832,7 +839,8 @@ static struct syscall_fmt { .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, { .name = "tkill", .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, - { .name = "umount2", .alias = "umount", }, + { .name = "umount2", .alias = "umount", + .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, }, { .name = "uname", .alias = "newuname", }, { .name = "unlinkat", .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, @@ -856,6 +864,18 @@ static struct syscall_fmt *syscall_fmt__find(const char *name) return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); } +static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias) +{ + int i, nmemb = ARRAY_SIZE(syscall_fmts); + + for (i = 0; i < nmemb; ++i) { + if (syscall_fmts[i].alias && strcmp(syscall_fmts[i].alias, alias) == 0) + return &syscall_fmts[i]; + } + + return NULL; +} + /* * is_exit: is this "exit" or "exit_group"? * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter. @@ -1485,6 +1505,19 @@ static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size, return scnprintf(bf, size, "arg%d: ", arg->idx); } +/* + * Check if the value is in fact zero, i.e. mask whatever needs masking, such + * as mount 'flags' argument that needs ignoring some magic flag, see comment + * in tools/perf/trace/beauty/mount_flags.c + */ +static unsigned long syscall__mask_val(struct syscall *sc, struct syscall_arg *arg, unsigned long val) +{ + if (sc->arg_fmt && sc->arg_fmt[arg->idx].mask_val) + return sc->arg_fmt[arg->idx].mask_val(arg, val); + + return val; +} + static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size, struct syscall_arg *arg, unsigned long val) { @@ -1533,6 +1566,11 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, continue; val = syscall_arg__val(&arg, arg.idx); + /* + * Some syscall args need some mask, most don't and + * return val untouched. + */ + val = syscall__mask_val(sc, &arg, val); /* * Suppress this argument if its value is zero and @@ -1664,6 +1702,8 @@ static int trace__printf_interrupted_entry(struct trace *trace) printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str); ttrace->entry_pending = false; + ++trace->nr_events_printed; + return printed; } @@ -1810,12 +1850,14 @@ static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evse int max_stack = evsel->attr.sample_max_stack ? evsel->attr.sample_max_stack : trace->max_stack; + int err; - if (machine__resolve(trace->host, &al, sample) < 0 || - thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack)) + if (machine__resolve(trace->host, &al, sample) < 0) return -1; - return 0; + err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack); + addr_location__put(&al); + return err; } static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) @@ -1940,6 +1982,13 @@ errno_print: { fputc('\n', trace->output); + /* + * We only consider an 'event' for the sake of --max-events a non-filtered + * sys_enter + sys_exit and other tracepoint events. + */ + if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) + interrupted = true; + if (callchain_ret > 0) trace__fprintf_callchain(trace, sample); else if (callchain_ret < 0) @@ -2072,14 +2121,25 @@ static void bpf_output__fprintf(struct trace *trace, { binary__fprintf(sample->raw_data, sample->raw_size, 8, bpf_output__printer, NULL, trace->output); + ++trace->nr_events_printed; } static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel, union perf_event *event __maybe_unused, struct perf_sample *sample) { - struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); + struct thread *thread; int callchain_ret = 0; + /* + * Check if we called perf_evsel__disable(evsel) due to, for instance, + * this event's max_events having been hit and this is an entry coming + * from the ring buffer that we should discard, since the max events + * have already been considered/printed. + */ + if (evsel->disabled) + return 0; + + thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); if (sample->callchain) { callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor); @@ -2127,6 +2187,12 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel, event_format__fprintf(evsel->tp_format, sample->cpu, sample->raw_data, sample->raw_size, trace->output); + ++trace->nr_events_printed; + + if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) { + perf_evsel__disable(evsel); + perf_evsel__close(evsel); + } } } @@ -2137,8 +2203,8 @@ newline: trace__fprintf_callchain(trace, sample); else if (callchain_ret < 0) pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel)); - thread__put(thread); out: + thread__put(thread); return 0; } @@ -2225,6 +2291,8 @@ static int trace__pgfault(struct trace *trace, trace__fprintf_callchain(trace, sample); else if (callchain_ret < 0) pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel)); + + ++trace->nr_events_printed; out: err = 0; out_put: @@ -2402,6 +2470,9 @@ static void trace__handle_event(struct trace *trace, union perf_event *event, st tracepoint_handler handler = evsel->handler; handler(trace, evsel, event, sample); } + + if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) + interrupted = true; } static int trace__add_syscall_newtp(struct trace *trace) @@ -2706,7 +2777,7 @@ next_event: int timeout = done ? 100 : -1; if (!draining && perf_evlist__poll(evlist, timeout) > 0) { - if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0) + if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) draining = true; goto again; @@ -3138,6 +3209,7 @@ static int trace__parse_events_option(const struct option *opt, const char *str, int len = strlen(str) + 1, err = -1, list, idx; char *strace_groups_dir = system_path(STRACE_GROUPS_DIR); char group_name[PATH_MAX]; + struct syscall_fmt *fmt; if (strace_groups_dir == NULL) return -1; @@ -3155,12 +3227,19 @@ static int trace__parse_events_option(const struct option *opt, const char *str, if (syscalltbl__id(trace->sctbl, s) >= 0 || syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) { list = 1; + goto do_concat; + } + + fmt = syscall_fmt__find_by_alias(s); + if (fmt != NULL) { + list = 1; + s = fmt->name; } else { path__join(group_name, sizeof(group_name), strace_groups_dir, s); if (access(group_name, R_OK) == 0) list = 1; } - +do_concat: if (lists[list]) { sprintf(lists[list] + strlen(lists[list]), ",%s", s); } else { @@ -3249,6 +3328,7 @@ int cmd_trace(int argc, const char **argv) .trace_syscalls = false, .kernel_syscallchains = false, .max_stack = UINT_MAX, + .max_events = ULONG_MAX, }; const char *output_name = NULL; const struct option trace_options[] = { @@ -3301,6 +3381,8 @@ int cmd_trace(int argc, const char **argv) &record_parse_callchain_opt), OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, "Show the kernel callchains on the syscall exit path"), + OPT_ULONG(0, "max-events", &trace.max_events, + "Set the maximum number of events to print, exit after that is reached. "), OPT_UINTEGER(0, "min-stack", &trace.min_stack, "Set the minimum stack depth when parsing the callchain, " "anything below the specified depth will be ignored."), diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index c72cc73a6b09..9531f7bd7d9b 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -5,6 +5,7 @@ HEADERS=' include/uapi/drm/drm.h include/uapi/drm/i915_drm.h include/uapi/linux/fcntl.h +include/uapi/linux/fs.h include/uapi/linux/kcmp.h include/uapi/linux/kvm.h include/uapi/linux/in.h diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 21bf7f5a3cf5..0ed4a34c74c4 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -81,6 +81,7 @@ struct record_opts { unsigned initial_delay; bool use_clockid; clockid_t clockid; + u64 clockid_res_ns; unsigned int proc_map_timeout; }; diff --git a/tools/perf/scripts/python/call-graph-from-sql.py b/tools/perf/scripts/python/call-graph-from-sql.py deleted file mode 100644 index b494a67a1c67..000000000000 --- a/tools/perf/scripts/python/call-graph-from-sql.py +++ /dev/null @@ -1,339 +0,0 @@ -#!/usr/bin/python2 -# call-graph-from-sql.py: create call-graph from sql database -# Copyright (c) 2014-2017, Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. - -# To use this script you will need to have exported data using either the -# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those -# scripts for details. -# -# Following on from the example in the export scripts, a -# call-graph can be displayed for the pt_example database like this: -# -# python tools/perf/scripts/python/call-graph-from-sql.py pt_example -# -# Note that for PostgreSQL, this script supports connecting to remote databases -# by setting hostname, port, username, password, and dbname e.g. -# -# python tools/perf/scripts/python/call-graph-from-sql.py "hostname=myhost username=myuser password=mypassword dbname=pt_example" -# -# The result is a GUI window with a tree representing a context-sensitive -# call-graph. Expanding a couple of levels of the tree and adjusting column -# widths to suit will display something like: -# -# Call Graph: pt_example -# Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%) -# v- ls -# v- 2638:2638 -# v- _start ld-2.19.so 1 10074071 100.0 211135 100.0 -# |- unknown unknown 1 13198 0.1 1 0.0 -# >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3 -# >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3 -# v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4 -# >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1 -# >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0 -# >- __libc_csu_init ls 1 10354 0.1 10 0.0 -# |- _setjmp libc-2.19.so 1 0 0.0 4 0.0 -# v- main ls 1 8182043 99.6 180254 99.9 -# -# Points to note: -# The top level is a command name (comm) -# The next level is a thread (pid:tid) -# Subsequent levels are functions -# 'Count' is the number of calls -# 'Time' is the elapsed time until the function returns -# Percentages are relative to the level above -# 'Branch Count' is the total number of branches for that function and all -# functions that it calls - -import sys -from PySide.QtCore import * -from PySide.QtGui import * -from PySide.QtSql import * -from decimal import * - -class TreeItem(): - - def __init__(self, db, row, parent_item): - self.db = db - self.row = row - self.parent_item = parent_item - self.query_done = False; - self.child_count = 0 - self.child_items = [] - self.data = ["", "", "", "", "", "", ""] - self.comm_id = 0 - self.thread_id = 0 - self.call_path_id = 1 - self.branch_count = 0 - self.time = 0 - if not parent_item: - self.setUpRoot() - - def setUpRoot(self): - self.query_done = True - query = QSqlQuery(self.db) - ret = query.exec_('SELECT id, comm FROM comms') - if not ret: - raise Exception("Query failed: " + query.lastError().text()) - while query.next(): - if not query.value(0): - continue - child_item = TreeItem(self.db, self.child_count, self) - self.child_items.append(child_item) - self.child_count += 1 - child_item.setUpLevel1(query.value(0), query.value(1)) - - def setUpLevel1(self, comm_id, comm): - self.query_done = True; - self.comm_id = comm_id - self.data[0] = comm - self.child_items = [] - self.child_count = 0 - query = QSqlQuery(self.db) - ret = query.exec_('SELECT thread_id, ( SELECT pid FROM threads WHERE id = thread_id ), ( SELECT tid FROM threads WHERE id = thread_id ) FROM comm_threads WHERE comm_id = ' + str(comm_id)) - if not ret: - raise Exception("Query failed: " + query.lastError().text()) - while query.next(): - child_item = TreeItem(self.db, self.child_count, self) - self.child_items.append(child_item) - self.child_count += 1 - child_item.setUpLevel2(comm_id, query.value(0), query.value(1), query.value(2)) - - def setUpLevel2(self, comm_id, thread_id, pid, tid): - self.comm_id = comm_id - self.thread_id = thread_id - self.data[0] = str(pid) + ":" + str(tid) - - def getChildItem(self, row): - return self.child_items[row] - - def getParentItem(self): - return self.parent_item - - def getRow(self): - return self.row - - def timePercent(self, b): - if not self.time: - return "0.0" - x = (b * Decimal(100)) / self.time - return str(x.quantize(Decimal('.1'), rounding=ROUND_HALF_UP)) - - def branchPercent(self, b): - if not self.branch_count: - return "0.0" - x = (b * Decimal(100)) / self.branch_count - return str(x.quantize(Decimal('.1'), rounding=ROUND_HALF_UP)) - - def addChild(self, call_path_id, name, dso, count, time, branch_count): - child_item = TreeItem(self.db, self.child_count, self) - child_item.comm_id = self.comm_id - child_item.thread_id = self.thread_id - child_item.call_path_id = call_path_id - child_item.branch_count = branch_count - child_item.time = time - child_item.data[0] = name - if dso == "[kernel.kallsyms]": - dso = "[kernel]" - child_item.data[1] = dso - child_item.data[2] = str(count) - child_item.data[3] = str(time) - child_item.data[4] = self.timePercent(time) - child_item.data[5] = str(branch_count) - child_item.data[6] = self.branchPercent(branch_count) - self.child_items.append(child_item) - self.child_count += 1 - - def selectCalls(self): - self.query_done = True; - query = QSqlQuery(self.db) - ret = query.exec_('SELECT id, call_path_id, branch_count, call_time, return_time, ' - '( SELECT name FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ), ' - '( SELECT short_name FROM dsos WHERE id = ( SELECT dso_id FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ) ), ' - '( SELECT ip FROM call_paths where id = call_path_id ) ' - 'FROM calls WHERE parent_call_path_id = ' + str(self.call_path_id) + ' AND comm_id = ' + str(self.comm_id) + ' AND thread_id = ' + str(self.thread_id) + - ' ORDER BY call_path_id') - if not ret: - raise Exception("Query failed: " + query.lastError().text()) - last_call_path_id = 0 - name = "" - dso = "" - count = 0 - branch_count = 0 - total_branch_count = 0 - time = 0 - total_time = 0 - while query.next(): - if query.value(1) == last_call_path_id: - count += 1 - branch_count += query.value(2) - time += query.value(4) - query.value(3) - else: - if count: - self.addChild(last_call_path_id, name, dso, count, time, branch_count) - last_call_path_id = query.value(1) - name = query.value(5) - dso = query.value(6) - count = 1 - total_branch_count += branch_count - total_time += time - branch_count = query.value(2) - time = query.value(4) - query.value(3) - if count: - self.addChild(last_call_path_id, name, dso, count, time, branch_count) - total_branch_count += branch_count - total_time += time - # Top level does not have time or branch count, so fix that here - if total_branch_count > self.branch_count: - self.branch_count = total_branch_count - if self.branch_count: - for child_item in self.child_items: - child_item.data[6] = self.branchPercent(child_item.branch_count) - if total_time > self.time: - self.time = total_time - if self.time: - for child_item in self.child_items: - child_item.data[4] = self.timePercent(child_item.time) - - def childCount(self): - if not self.query_done: - self.selectCalls() - return self.child_count - - def columnCount(self): - return 7 - - def columnHeader(self, column): - headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] - return headers[column] - - def getData(self, column): - return self.data[column] - -class TreeModel(QAbstractItemModel): - - def __init__(self, db, parent=None): - super(TreeModel, self).__init__(parent) - self.db = db - self.root = TreeItem(db, 0, None) - - def columnCount(self, parent): - return self.root.columnCount() - - def rowCount(self, parent): - if parent.isValid(): - parent_item = parent.internalPointer() - else: - parent_item = self.root - return parent_item.childCount() - - def headerData(self, section, orientation, role): - if role == Qt.TextAlignmentRole: - if section > 1: - return Qt.AlignRight - if role != Qt.DisplayRole: - return None - if orientation != Qt.Horizontal: - return None - return self.root.columnHeader(section) - - def parent(self, child): - child_item = child.internalPointer() - if child_item is self.root: - return QModelIndex() - parent_item = child_item.getParentItem() - return self.createIndex(parent_item.getRow(), 0, parent_item) - - def index(self, row, column, parent): - if parent.isValid(): - parent_item = parent.internalPointer() - else: - parent_item = self.root - child_item = parent_item.getChildItem(row) - return self.createIndex(row, column, child_item) - - def data(self, index, role): - if role == Qt.TextAlignmentRole: - if index.column() > 1: - return Qt.AlignRight - if role != Qt.DisplayRole: - return None - index_item = index.internalPointer() - return index_item.getData(index.column()) - -class MainWindow(QMainWindow): - - def __init__(self, db, dbname, parent=None): - super(MainWindow, self).__init__(parent) - - self.setObjectName("MainWindow") - self.setWindowTitle("Call Graph: " + dbname) - self.move(100, 100) - self.resize(800, 600) - style = self.style() - icon = style.standardIcon(QStyle.SP_MessageBoxInformation) - self.setWindowIcon(icon); - - self.model = TreeModel(db) - - self.view = QTreeView() - self.view.setModel(self.model) - - self.setCentralWidget(self.view) - -if __name__ == '__main__': - if (len(sys.argv) < 2): - print >> sys.stderr, "Usage is: call-graph-from-sql.py <database name>" - raise Exception("Too few arguments") - - dbname = sys.argv[1] - - is_sqlite3 = False - try: - f = open(dbname) - if f.read(15) == "SQLite format 3": - is_sqlite3 = True - f.close() - except: - pass - - if is_sqlite3: - db = QSqlDatabase.addDatabase('QSQLITE') - else: - db = QSqlDatabase.addDatabase('QPSQL') - opts = dbname.split() - for opt in opts: - if '=' in opt: - opt = opt.split('=') - if opt[0] == 'hostname': - db.setHostName(opt[1]) - elif opt[0] == 'port': - db.setPort(int(opt[1])) - elif opt[0] == 'username': - db.setUserName(opt[1]) - elif opt[0] == 'password': - db.setPassword(opt[1]) - elif opt[0] == 'dbname': - dbname = opt[1] - else: - dbname = opt - - db.setDatabaseName(dbname) - if not db.open(): - raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text()) - - app = QApplication(sys.argv) - window = MainWindow(db, dbname) - window.show() - err = app.exec_() - db.close() - sys.exit(err) diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index e46f51b17513..0564dd7377f2 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -59,7 +59,7 @@ import datetime # pt_example=# \q # # An example of using the database is provided by the script -# call-graph-from-sql.py. Refer to that script for details. +# exported-sql-viewer.py. Refer to that script for details. # # Tables: # diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py index e4bb82c8aba9..245caf2643ed 100644 --- a/tools/perf/scripts/python/export-to-sqlite.py +++ b/tools/perf/scripts/python/export-to-sqlite.py @@ -40,7 +40,7 @@ import datetime # sqlite> .quit # # An example of using the database is provided by the script -# call-graph-from-sql.py. Refer to that script for details. +# exported-sql-viewer.py. Refer to that script for details. # # The database structure is practically the same as created by the script # export-to-postgresql.py. Refer to that script for details. A notable diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py new file mode 100755 index 000000000000..24cb0bd56afa --- /dev/null +++ b/tools/perf/scripts/python/exported-sql-viewer.py @@ -0,0 +1,2128 @@ +#!/usr/bin/python2 +# SPDX-License-Identifier: GPL-2.0 +# exported-sql-viewer.py: view data from sql database +# Copyright (c) 2014-2018, Intel Corporation. + +# To use this script you will need to have exported data using either the +# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those +# scripts for details. +# +# Following on from the example in the export scripts, a +# call-graph can be displayed for the pt_example database like this: +# +# python tools/perf/scripts/python/exported-sql-viewer.py pt_example +# +# Note that for PostgreSQL, this script supports connecting to remote databases +# by setting hostname, port, username, password, and dbname e.g. +# +# python tools/perf/scripts/python/exported-sql-viewer.py "hostname=myhost username=myuser password=mypassword dbname=pt_example" +# +# The result is a GUI window with a tree representing a context-sensitive +# call-graph. Expanding a couple of levels of the tree and adjusting column +# widths to suit will display something like: +# +# Call Graph: pt_example +# Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%) +# v- ls +# v- 2638:2638 +# v- _start ld-2.19.so 1 10074071 100.0 211135 100.0 +# |- unknown unknown 1 13198 0.1 1 0.0 +# >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3 +# >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3 +# v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4 +# >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1 +# >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0 +# >- __libc_csu_init ls 1 10354 0.1 10 0.0 +# |- _setjmp libc-2.19.so 1 0 0.0 4 0.0 +# v- main ls 1 8182043 99.6 180254 99.9 +# +# Points to note: +# The top level is a command name (comm) +# The next level is a thread (pid:tid) +# Subsequent levels are functions +# 'Count' is the number of calls +# 'Time' is the elapsed time until the function returns +# Percentages are relative to the level above +# 'Branch Count' is the total number of branches for that function and all +# functions that it calls + +# There is also a "All branches" report, which displays branches and +# possibly disassembly. However, presently, the only supported disassembler is +# Intel XED, and additionally the object code must be present in perf build ID +# cache. To use Intel XED, libxed.so must be present. To build and install +# libxed.so: +# git clone https://github.com/intelxed/mbuild.git mbuild +# git clone https://github.com/intelxed/xed +# cd xed +# ./mfile.py --share +# sudo ./mfile.py --prefix=/usr/local install +# sudo ldconfig +# +# Example report: +# +# Time CPU Command PID TID Branch Type In Tx Branch +# 8107675239590 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so) +# 7fab593ea260 48 89 e7 mov %rsp, %rdi +# 8107675239899 2 ls 22011 22011 hardware interrupt No 7fab593ea260 _start (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) +# 8107675241900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so) +# 7fab593ea260 48 89 e7 mov %rsp, %rdi +# 7fab593ea263 e8 c8 06 00 00 callq 0x7fab593ea930 +# 8107675241900 2 ls 22011 22011 call No 7fab593ea263 _start+0x3 (ld-2.19.so) -> 7fab593ea930 _dl_start (ld-2.19.so) +# 7fab593ea930 55 pushq %rbp +# 7fab593ea931 48 89 e5 mov %rsp, %rbp +# 7fab593ea934 41 57 pushq %r15 +# 7fab593ea936 41 56 pushq %r14 +# 7fab593ea938 41 55 pushq %r13 +# 7fab593ea93a 41 54 pushq %r12 +# 7fab593ea93c 53 pushq %rbx +# 7fab593ea93d 48 89 fb mov %rdi, %rbx +# 7fab593ea940 48 83 ec 68 sub $0x68, %rsp +# 7fab593ea944 0f 31 rdtsc +# 7fab593ea946 48 c1 e2 20 shl $0x20, %rdx +# 7fab593ea94a 89 c0 mov %eax, %eax +# 7fab593ea94c 48 09 c2 or %rax, %rdx +# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax +# 8107675242232 2 ls 22011 22011 hardware interrupt No 7fab593ea94f _dl_start+0x1f (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) +# 8107675242900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea94f _dl_start+0x1f (ld-2.19.so) +# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax +# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip) +# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) + +import sys +import weakref +import threading +import string +import cPickle +import re +import os +from PySide.QtCore import * +from PySide.QtGui import * +from PySide.QtSql import * +from decimal import * +from ctypes import * +from multiprocessing import Process, Array, Value, Event + +# Data formatting helpers + +def tohex(ip): + if ip < 0: + ip += 1 << 64 + return "%x" % ip + +def offstr(offset): + if offset: + return "+0x%x" % offset + return "" + +def dsoname(name): + if name == "[kernel.kallsyms]": + return "[kernel]" + return name + +# Percent to one decimal place + +def PercentToOneDP(n, d): + if not d: + return "0.0" + x = (n * Decimal(100)) / d + return str(x.quantize(Decimal(".1"), rounding=ROUND_HALF_UP)) + +# Helper for queries that must not fail + +def QueryExec(query, stmt): + ret = query.exec_(stmt) + if not ret: + raise Exception("Query failed: " + query.lastError().text()) + +# Background thread + +class Thread(QThread): + + done = Signal(object) + + def __init__(self, task, param=None, parent=None): + super(Thread, self).__init__(parent) + self.task = task + self.param = param + + def run(self): + while True: + if self.param is None: + done, result = self.task() + else: + done, result = self.task(self.param) + self.done.emit(result) + if done: + break + +# Tree data model + +class TreeModel(QAbstractItemModel): + + def __init__(self, root, parent=None): + super(TreeModel, self).__init__(parent) + self.root = root + self.last_row_read = 0 + + def Item(self, parent): + if parent.isValid(): + return parent.internalPointer() + else: + return self.root + + def rowCount(self, parent): + result = self.Item(parent).childCount() + if result < 0: + result = 0 + self.dataChanged.emit(parent, parent) + return result + + def hasChildren(self, parent): + return self.Item(parent).hasChildren() + + def headerData(self, section, orientation, role): + if role == Qt.TextAlignmentRole: + return self.columnAlignment(section) + if role != Qt.DisplayRole: + return None + if orientation != Qt.Horizontal: + return None + return self.columnHeader(section) + + def parent(self, child): + child_item = child.internalPointer() + if child_item is self.root: + return QModelIndex() + parent_item = child_item.getParentItem() + return self.createIndex(parent_item.getRow(), 0, parent_item) + + def index(self, row, column, parent): + child_item = self.Item(parent).getChildItem(row) + return self.createIndex(row, column, child_item) + + def DisplayData(self, item, index): + return item.getData(index.column()) + + def FetchIfNeeded(self, row): + if row > self.last_row_read: + self.last_row_read = row + if row + 10 >= self.root.child_count: + self.fetcher.Fetch(glb_chunk_sz) + + def columnAlignment(self, column): + return Qt.AlignLeft + + def columnFont(self, column): + return None + + def data(self, index, role): + if role == Qt.TextAlignmentRole: + return self.columnAlignment(index.column()) + if role == Qt.FontRole: + return self.columnFont(index.column()) + if role != Qt.DisplayRole: + return None + item = index.internalPointer() + return self.DisplayData(item, index) + +# Table data model + +class TableModel(QAbstractTableModel): + + def __init__(self, parent=None): + super(TableModel, self).__init__(parent) + self.child_count = 0 + self.child_items = [] + self.last_row_read = 0 + + def Item(self, parent): + if parent.isValid(): + return parent.internalPointer() + else: + return self + + def rowCount(self, parent): + return self.child_count + + def headerData(self, section, orientation, role): + if role == Qt.TextAlignmentRole: + return self.columnAlignment(section) + if role != Qt.DisplayRole: + return None + if orientation != Qt.Horizontal: + return None + return self.columnHeader(section) + + def index(self, row, column, parent): + return self.createIndex(row, column, self.child_items[row]) + + def DisplayData(self, item, index): + return item.getData(index.column()) + + def FetchIfNeeded(self, row): + if row > self.last_row_read: + self.last_row_read = row + if row + 10 >= self.child_count: + self.fetcher.Fetch(glb_chunk_sz) + + def columnAlignment(self, column): + return Qt.AlignLeft + + def columnFont(self, column): + return None + + def data(self, index, role): + if role == Qt.TextAlignmentRole: + return self.columnAlignment(index.column()) + if role == Qt.FontRole: + return self.columnFont(index.column()) + if role != Qt.DisplayRole: + return None + item = index.internalPointer() + return self.DisplayData(item, index) + +# Model cache + +model_cache = weakref.WeakValueDictionary() +model_cache_lock = threading.Lock() + +def LookupCreateModel(model_name, create_fn): + model_cache_lock.acquire() + try: + model = model_cache[model_name] + except: + model = None + if model is None: + model = create_fn() + model_cache[model_name] = model + model_cache_lock.release() + return model + +# Find bar + +class FindBar(): + + def __init__(self, parent, finder, is_reg_expr=False): + self.finder = finder + self.context = [] + self.last_value = None + self.last_pattern = None + + label = QLabel("Find:") + label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) + + self.textbox = QComboBox() + self.textbox.setEditable(True) + self.textbox.currentIndexChanged.connect(self.ValueChanged) + + self.progress = QProgressBar() + self.progress.setRange(0, 0) + self.progress.hide() + + if is_reg_expr: + self.pattern = QCheckBox("Regular Expression") + else: + self.pattern = QCheckBox("Pattern") + self.pattern.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) + + self.next_button = QToolButton() + self.next_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowDown)) + self.next_button.released.connect(lambda: self.NextPrev(1)) + + self.prev_button = QToolButton() + self.prev_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowUp)) + self.prev_button.released.connect(lambda: self.NextPrev(-1)) + + self.close_button = QToolButton() + self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton)) + self.close_button.released.connect(self.Deactivate) + + self.hbox = QHBoxLayout() + self.hbox.setContentsMargins(0, 0, 0, 0) + + self.hbox.addWidget(label) + self.hbox.addWidget(self.textbox) + self.hbox.addWidget(self.progress) + self.hbox.addWidget(self.pattern) + self.hbox.addWidget(self.next_button) + self.hbox.addWidget(self.prev_button) + self.hbox.addWidget(self.close_button) + + self.bar = QWidget() + self.bar.setLayout(self.hbox); + self.bar.hide() + + def Widget(self): + return self.bar + + def Activate(self): + self.bar.show() + self.textbox.setFocus() + + def Deactivate(self): + self.bar.hide() + + def Busy(self): + self.textbox.setEnabled(False) + self.pattern.hide() + self.next_button.hide() + self.prev_button.hide() + self.progress.show() + + def Idle(self): + self.textbox.setEnabled(True) + self.progress.hide() + self.pattern.show() + self.next_button.show() + self.prev_button.show() + + def Find(self, direction): + value = self.textbox.currentText() + pattern = self.pattern.isChecked() + self.last_value = value + self.last_pattern = pattern + self.finder.Find(value, direction, pattern, self.context) + + def ValueChanged(self): + value = self.textbox.currentText() + pattern = self.pattern.isChecked() + index = self.textbox.currentIndex() + data = self.textbox.itemData(index) + # Store the pattern in the combo box to keep it with the text value + if data == None: + self.textbox.setItemData(index, pattern) + else: + self.pattern.setChecked(data) + self.Find(0) + + def NextPrev(self, direction): + value = self.textbox.currentText() + pattern = self.pattern.isChecked() + if value != self.last_value: + index = self.textbox.findText(value) + # Allow for a button press before the value has been added to the combo box + if index < 0: + index = self.textbox.count() + self.textbox.addItem(value, pattern) + self.textbox.setCurrentIndex(index) + return + else: + self.textbox.setItemData(index, pattern) + elif pattern != self.last_pattern: + # Keep the pattern recorded in the combo box up to date + index = self.textbox.currentIndex() + self.textbox.setItemData(index, pattern) + self.Find(direction) + + def NotFound(self): + QMessageBox.information(self.bar, "Find", "'" + self.textbox.currentText() + "' not found") + +# Context-sensitive call graph data model item base + +class CallGraphLevelItemBase(object): + + def __init__(self, glb, row, parent_item): + self.glb = glb + self.row = row + self.parent_item = parent_item + self.query_done = False; + self.child_count = 0 + self.child_items = [] + + def getChildItem(self, row): + return self.child_items[row] + + def getParentItem(self): + return self.parent_item + + def getRow(self): + return self.row + + def childCount(self): + if not self.query_done: + self.Select() + if not self.child_count: + return -1 + return self.child_count + + def hasChildren(self): + if not self.query_done: + return True + return self.child_count > 0 + + def getData(self, column): + return self.data[column] + +# Context-sensitive call graph data model level 2+ item base + +class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase): + + def __init__(self, glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item): + super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, row, parent_item) + self.comm_id = comm_id + self.thread_id = thread_id + self.call_path_id = call_path_id + self.branch_count = branch_count + self.time = time + + def Select(self): + self.query_done = True; + query = QSqlQuery(self.glb.db) + QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time), SUM(branch_count)" + " FROM calls" + " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" + " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" + " INNER JOIN dsos ON symbols.dso_id = dsos.id" + " WHERE parent_call_path_id = " + str(self.call_path_id) + + " AND comm_id = " + str(self.comm_id) + + " AND thread_id = " + str(self.thread_id) + + " GROUP BY call_path_id, name, short_name" + " ORDER BY call_path_id") + while query.next(): + child_item = CallGraphLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self) + self.child_items.append(child_item) + self.child_count += 1 + +# Context-sensitive call graph data model level three item + +class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase): + + def __init__(self, glb, row, comm_id, thread_id, call_path_id, name, dso, count, time, branch_count, parent_item): + super(CallGraphLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item) + dso = dsoname(dso) + self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ] + self.dbid = call_path_id + +# Context-sensitive call graph data model level two item + +class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase): + + def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item): + super(CallGraphLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 1, 0, 0, parent_item) + self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""] + self.dbid = thread_id + + def Select(self): + super(CallGraphLevelTwoItem, self).Select() + for child_item in self.child_items: + self.time += child_item.time + self.branch_count += child_item.branch_count + for child_item in self.child_items: + child_item.data[4] = PercentToOneDP(child_item.time, self.time) + child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count) + +# Context-sensitive call graph data model level one item + +class CallGraphLevelOneItem(CallGraphLevelItemBase): + + def __init__(self, glb, row, comm_id, comm, parent_item): + super(CallGraphLevelOneItem, self).__init__(glb, row, parent_item) + self.data = [comm, "", "", "", "", "", ""] + self.dbid = comm_id + + def Select(self): + self.query_done = True; + query = QSqlQuery(self.glb.db) + QueryExec(query, "SELECT thread_id, pid, tid" + " FROM comm_threads" + " INNER JOIN threads ON thread_id = threads.id" + " WHERE comm_id = " + str(self.dbid)) + while query.next(): + child_item = CallGraphLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self) + self.child_items.append(child_item) + self.child_count += 1 + +# Context-sensitive call graph data model root item + +class CallGraphRootItem(CallGraphLevelItemBase): + + def __init__(self, glb): + super(CallGraphRootItem, self).__init__(glb, 0, None) + self.dbid = 0 + self.query_done = True; + query = QSqlQuery(glb.db) + QueryExec(query, "SELECT id, comm FROM comms") + while query.next(): + if not query.value(0): + continue + child_item = CallGraphLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self) + self.child_items.append(child_item) + self.child_count += 1 + +# Context-sensitive call graph data model + +class CallGraphModel(TreeModel): + + def __init__(self, glb, parent=None): + super(CallGraphModel, self).__init__(CallGraphRootItem(glb), parent) + self.glb = glb + + def columnCount(self, parent=None): + return 7 + + def columnHeader(self, column): + headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] + return headers[column] + + def columnAlignment(self, column): + alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] + return alignment[column] + + def FindSelect(self, value, pattern, query): + if pattern: + # postgresql and sqlite pattern patching differences: + # postgresql LIKE is case sensitive but sqlite LIKE is not + # postgresql LIKE allows % and _ to be escaped with \ but sqlite LIKE does not + # postgresql supports ILIKE which is case insensitive + # sqlite supports GLOB (text only) which uses * and ? and is case sensitive + if not self.glb.dbref.is_sqlite3: + # Escape % and _ + s = value.replace("%", "\%") + s = s.replace("_", "\_") + # Translate * and ? into SQL LIKE pattern characters % and _ + trans = string.maketrans("*?", "%_") + match = " LIKE '" + str(s).translate(trans) + "'" + else: + match = " GLOB '" + str(value) + "'" + else: + match = " = '" + str(value) + "'" + QueryExec(query, "SELECT call_path_id, comm_id, thread_id" + " FROM calls" + " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" + " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" + " WHERE symbols.name" + match + + " GROUP BY comm_id, thread_id, call_path_id" + " ORDER BY comm_id, thread_id, call_path_id") + + def FindPath(self, query): + # Turn the query result into a list of ids that the tree view can walk + # to open the tree at the right place. + ids = [] + parent_id = query.value(0) + while parent_id: + ids.insert(0, parent_id) + q2 = QSqlQuery(self.glb.db) + QueryExec(q2, "SELECT parent_id" + " FROM call_paths" + " WHERE id = " + str(parent_id)) + if not q2.next(): + break + parent_id = q2.value(0) + # The call path root is not used + if ids[0] == 1: + del ids[0] + ids.insert(0, query.value(2)) + ids.insert(0, query.value(1)) + return ids + + def Found(self, query, found): + if found: + return self.FindPath(query) + return [] + + def FindValue(self, value, pattern, query, last_value, last_pattern): + if last_value == value and pattern == last_pattern: + found = query.first() + else: + self.FindSelect(value, pattern, query) + found = query.next() + return self.Found(query, found) + + def FindNext(self, query): + found = query.next() + if not found: + found = query.first() + return self.Found(query, found) + + def FindPrev(self, query): + found = query.previous() + if not found: + found = query.last() + return self.Found(query, found) + + def FindThread(self, c): + if c.direction == 0 or c.value != c.last_value or c.pattern != c.last_pattern: + ids = self.FindValue(c.value, c.pattern, c.query, c.last_value, c.last_pattern) + elif c.direction > 0: + ids = self.FindNext(c.query) + else: + ids = self.FindPrev(c.query) + return (True, ids) + + def Find(self, value, direction, pattern, context, callback): + class Context(): + def __init__(self, *x): + self.value, self.direction, self.pattern, self.query, self.last_value, self.last_pattern = x + def Update(self, *x): + self.value, self.direction, self.pattern, self.last_value, self.last_pattern = x + (self.value, self.pattern) + if len(context): + context[0].Update(value, direction, pattern) + else: + context.append(Context(value, direction, pattern, QSqlQuery(self.glb.db), None, None)) + # Use a thread so the UI is not blocked during the SELECT + thread = Thread(self.FindThread, context[0]) + thread.done.connect(lambda ids, t=thread, c=callback: self.FindDone(t, c, ids), Qt.QueuedConnection) + thread.start() + + def FindDone(self, thread, callback, ids): + callback(ids) + +# Vertical widget layout + +class VBox(): + + def __init__(self, w1, w2, w3=None): + self.vbox = QWidget() + self.vbox.setLayout(QVBoxLayout()); + + self.vbox.layout().setContentsMargins(0, 0, 0, 0) + + self.vbox.layout().addWidget(w1) + self.vbox.layout().addWidget(w2) + if w3: + self.vbox.layout().addWidget(w3) + + def Widget(self): + return self.vbox + +# Context-sensitive call graph window + +class CallGraphWindow(QMdiSubWindow): + + def __init__(self, glb, parent=None): + super(CallGraphWindow, self).__init__(parent) + + self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x)) + + self.view = QTreeView() + self.view.setModel(self.model) + + for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)): + self.view.setColumnWidth(c, w) + + self.find_bar = FindBar(self, self) + + self.vbox = VBox(self.view, self.find_bar.Widget()) + + self.setWidget(self.vbox.Widget()) + + AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph") + + def DisplayFound(self, ids): + if not len(ids): + return False + parent = QModelIndex() + for dbid in ids: + found = False + n = self.model.rowCount(parent) + for row in xrange(n): + child = self.model.index(row, 0, parent) + if child.internalPointer().dbid == dbid: + found = True + self.view.setCurrentIndex(child) + parent = child + break + if not found: + break + return found + + def Find(self, value, direction, pattern, context): + self.view.setFocus() + self.find_bar.Busy() + self.model.Find(value, direction, pattern, context, self.FindDone) + + def FindDone(self, ids): + found = True + if not self.DisplayFound(ids): + found = False + self.find_bar.Idle() + if not found: + self.find_bar.NotFound() + +# Child data item finder + +class ChildDataItemFinder(): + + def __init__(self, root): + self.root = root + self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (None,) * 5 + self.rows = [] + self.pos = 0 + + def FindSelect(self): + self.rows = [] + if self.pattern: + pattern = re.compile(self.value) + for child in self.root.child_items: + for column_data in child.data: + if re.search(pattern, str(column_data)) is not None: + self.rows.append(child.row) + break + else: + for child in self.root.child_items: + for column_data in child.data: + if self.value in str(column_data): + self.rows.append(child.row) + break + + def FindValue(self): + self.pos = 0 + if self.last_value != self.value or self.pattern != self.last_pattern: + self.FindSelect() + if not len(self.rows): + return -1 + return self.rows[self.pos] + + def FindThread(self): + if self.direction == 0 or self.value != self.last_value or self.pattern != self.last_pattern: + row = self.FindValue() + elif len(self.rows): + if self.direction > 0: + self.pos += 1 + if self.pos >= len(self.rows): + self.pos = 0 + else: + self.pos -= 1 + if self.pos < 0: + self.pos = len(self.rows) - 1 + row = self.rows[self.pos] + else: + row = -1 + return (True, row) + + def Find(self, value, direction, pattern, context, callback): + self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (value, direction,pattern, self.value, self.pattern) + # Use a thread so the UI is not blocked + thread = Thread(self.FindThread) + thread.done.connect(lambda row, t=thread, c=callback: self.FindDone(t, c, row), Qt.QueuedConnection) + thread.start() + + def FindDone(self, thread, callback, row): + callback(row) + +# Number of database records to fetch in one go + +glb_chunk_sz = 10000 + +# size of pickled integer big enough for record size + +glb_nsz = 8 + +# Background process for SQL data fetcher + +class SQLFetcherProcess(): + + def __init__(self, dbref, sql, buffer, head, tail, fetch_count, fetching_done, process_target, wait_event, fetched_event, prep): + # Need a unique connection name + conn_name = "SQLFetcher" + str(os.getpid()) + self.db, dbname = dbref.Open(conn_name) + self.sql = sql + self.buffer = buffer + self.head = head + self.tail = tail + self.fetch_count = fetch_count + self.fetching_done = fetching_done + self.process_target = process_target + self.wait_event = wait_event + self.fetched_event = fetched_event + self.prep = prep + self.query = QSqlQuery(self.db) + self.query_limit = 0 if "$$last_id$$" in sql else 2 + self.last_id = -1 + self.fetched = 0 + self.more = True + self.local_head = self.head.value + self.local_tail = self.tail.value + + def Select(self): + if self.query_limit: + if self.query_limit == 1: + return + self.query_limit -= 1 + stmt = self.sql.replace("$$last_id$$", str(self.last_id)) + QueryExec(self.query, stmt) + + def Next(self): + if not self.query.next(): + self.Select() + if not self.query.next(): + return None + self.last_id = self.query.value(0) + return self.prep(self.query) + + def WaitForTarget(self): + while True: + self.wait_event.clear() + target = self.process_target.value + if target > self.fetched or target < 0: + break + self.wait_event.wait() + return target + + def HasSpace(self, sz): + if self.local_tail <= self.local_head: + space = len(self.buffer) - self.local_head + if space > sz: + return True + if space >= glb_nsz: + # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer + nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL) + self.buffer[self.local_head : self.local_head + len(nd)] = nd + self.local_head = 0 + if self.local_tail - self.local_head > sz: + return True + return False + + def WaitForSpace(self, sz): + if self.HasSpace(sz): + return + while True: + self.wait_event.clear() + self.local_tail = self.tail.value + if self.HasSpace(sz): + return + self.wait_event.wait() + + def AddToBuffer(self, obj): + d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL) + n = len(d) + nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL) + sz = n + glb_nsz + self.WaitForSpace(sz) + pos = self.local_head + self.buffer[pos : pos + len(nd)] = nd + self.buffer[pos + glb_nsz : pos + sz] = d + self.local_head += sz + + def FetchBatch(self, batch_size): + fetched = 0 + while batch_size > fetched: + obj = self.Next() + if obj is None: + self.more = False + break + self.AddToBuffer(obj) + fetched += 1 + if fetched: + self.fetched += fetched + with self.fetch_count.get_lock(): + self.fetch_count.value += fetched + self.head.value = self.local_head + self.fetched_event.set() + + def Run(self): + while self.more: + target = self.WaitForTarget() + if target < 0: + break + batch_size = min(glb_chunk_sz, target - self.fetched) + self.FetchBatch(batch_size) + self.fetching_done.value = True + self.fetched_event.set() + +def SQLFetcherFn(*x): + process = SQLFetcherProcess(*x) + process.Run() + +# SQL data fetcher + +class SQLFetcher(QObject): + + done = Signal(object) + + def __init__(self, glb, sql, prep, process_data, parent=None): + super(SQLFetcher, self).__init__(parent) + self.process_data = process_data + self.more = True + self.target = 0 + self.last_target = 0 + self.fetched = 0 + self.buffer_size = 16 * 1024 * 1024 + self.buffer = Array(c_char, self.buffer_size, lock=False) + self.head = Value(c_longlong) + self.tail = Value(c_longlong) + self.local_tail = 0 + self.fetch_count = Value(c_longlong) + self.fetching_done = Value(c_bool) + self.last_count = 0 + self.process_target = Value(c_longlong) + self.wait_event = Event() + self.fetched_event = Event() + glb.AddInstanceToShutdownOnExit(self) + self.process = Process(target=SQLFetcherFn, args=(glb.dbref, sql, self.buffer, self.head, self.tail, self.fetch_count, self.fetching_done, self.process_target, self.wait_event, self.fetched_event, prep)) + self.process.start() + self.thread = Thread(self.Thread) + self.thread.done.connect(self.ProcessData, Qt.QueuedConnection) + self.thread.start() + + def Shutdown(self): + # Tell the thread and process to exit + self.process_target.value = -1 + self.wait_event.set() + self.more = False + self.fetching_done.value = True + self.fetched_event.set() + + def Thread(self): + if not self.more: + return True, 0 + while True: + self.fetched_event.clear() + fetch_count = self.fetch_count.value + if fetch_count != self.last_count: + break + if self.fetching_done.value: + self.more = False + return True, 0 + self.fetched_event.wait() + count = fetch_count - self.last_count + self.last_count = fetch_count + self.fetched += count + return False, count + + def Fetch(self, nr): + if not self.more: + # -1 inidcates there are no more + return -1 + result = self.fetched + extra = result + nr - self.target + if extra > 0: + self.target += extra + # process_target < 0 indicates shutting down + if self.process_target.value >= 0: + self.process_target.value = self.target + self.wait_event.set() + return result + + def RemoveFromBuffer(self): + pos = self.local_tail + if len(self.buffer) - pos < glb_nsz: + pos = 0 + n = cPickle.loads(self.buffer[pos : pos + glb_nsz]) + if n == 0: + pos = 0 + n = cPickle.loads(self.buffer[0 : glb_nsz]) + pos += glb_nsz + obj = cPickle.loads(self.buffer[pos : pos + n]) + self.local_tail = pos + n + return obj + + def ProcessData(self, count): + for i in xrange(count): + obj = self.RemoveFromBuffer() + self.process_data(obj) + self.tail.value = self.local_tail + self.wait_event.set() + self.done.emit(count) + +# Fetch more records bar + +class FetchMoreRecordsBar(): + + def __init__(self, model, parent): + self.model = model + + self.label = QLabel("Number of records (x " + "{:,}".format(glb_chunk_sz) + ") to fetch:") + self.label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) + + self.fetch_count = QSpinBox() + self.fetch_count.setRange(1, 1000000) + self.fetch_count.setValue(10) + self.fetch_count.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) + + self.fetch = QPushButton("Go!") + self.fetch.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) + self.fetch.released.connect(self.FetchMoreRecords) + + self.progress = QProgressBar() + self.progress.setRange(0, 100) + self.progress.hide() + + self.done_label = QLabel("All records fetched") + self.done_label.hide() + + self.spacer = QLabel("") + + self.close_button = QToolButton() + self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton)) + self.close_button.released.connect(self.Deactivate) + + self.hbox = QHBoxLayout() + self.hbox.setContentsMargins(0, 0, 0, 0) + + self.hbox.addWidget(self.label) + self.hbox.addWidget(self.fetch_count) + self.hbox.addWidget(self.fetch) + self.hbox.addWidget(self.spacer) + self.hbox.addWidget(self.progress) + self.hbox.addWidget(self.done_label) + self.hbox.addWidget(self.close_button) + + self.bar = QWidget() + self.bar.setLayout(self.hbox); + self.bar.show() + + self.in_progress = False + self.model.progress.connect(self.Progress) + + self.done = False + + if not model.HasMoreRecords(): + self.Done() + + def Widget(self): + return self.bar + + def Activate(self): + self.bar.show() + self.fetch.setFocus() + + def Deactivate(self): + self.bar.hide() + + def Enable(self, enable): + self.fetch.setEnabled(enable) + self.fetch_count.setEnabled(enable) + + def Busy(self): + self.Enable(False) + self.fetch.hide() + self.spacer.hide() + self.progress.show() + + def Idle(self): + self.in_progress = False + self.Enable(True) + self.progress.hide() + self.fetch.show() + self.spacer.show() + + def Target(self): + return self.fetch_count.value() * glb_chunk_sz + + def Done(self): + self.done = True + self.Idle() + self.label.hide() + self.fetch_count.hide() + self.fetch.hide() + self.spacer.hide() + self.done_label.show() + + def Progress(self, count): + if self.in_progress: + if count: + percent = ((count - self.start) * 100) / self.Target() + if percent >= 100: + self.Idle() + else: + self.progress.setValue(percent) + if not count: + # Count value of zero means no more records + self.Done() + + def FetchMoreRecords(self): + if self.done: + return + self.progress.setValue(0) + self.Busy() + self.in_progress = True + self.start = self.model.FetchMoreRecords(self.Target()) + +# Brance data model level two item + +class BranchLevelTwoItem(): + + def __init__(self, row, text, parent_item): + self.row = row + self.parent_item = parent_item + self.data = [""] * 8 + self.data[7] = text + self.level = 2 + + def getParentItem(self): + return self.parent_item + + def getRow(self): + return self.row + + def childCount(self): + return 0 + + def hasChildren(self): + return False + + def getData(self, column): + return self.data[column] + +# Brance data model level one item + +class BranchLevelOneItem(): + + def __init__(self, glb, row, data, parent_item): + self.glb = glb + self.row = row + self.parent_item = parent_item + self.child_count = 0 + self.child_items = [] + self.data = data[1:] + self.dbid = data[0] + self.level = 1 + self.query_done = False + + def getChildItem(self, row): + return self.child_items[row] + + def getParentItem(self): + return self.parent_item + + def getRow(self): + return self.row + + def Select(self): + self.query_done = True + + if not self.glb.have_disassembler: + return + + query = QSqlQuery(self.glb.db) + + QueryExec(query, "SELECT cpu, to_dso_id, to_symbol_id, to_sym_offset, short_name, long_name, build_id, sym_start, to_ip" + " FROM samples" + " INNER JOIN dsos ON samples.to_dso_id = dsos.id" + " INNER JOIN symbols ON samples.to_symbol_id = symbols.id" + " WHERE samples.id = " + str(self.dbid)) + if not query.next(): + return + cpu = query.value(0) + dso = query.value(1) + sym = query.value(2) + if dso == 0 or sym == 0: + return + off = query.value(3) + short_name = query.value(4) + long_name = query.value(5) + build_id = query.value(6) + sym_start = query.value(7) + ip = query.value(8) + + QueryExec(query, "SELECT samples.dso_id, symbol_id, sym_offset, sym_start" + " FROM samples" + " INNER JOIN symbols ON samples.symbol_id = symbols.id" + " WHERE samples.id > " + str(self.dbid) + " AND cpu = " + str(cpu) + + " ORDER BY samples.id" + " LIMIT 1") + if not query.next(): + return + if query.value(0) != dso: + # Cannot disassemble from one dso to another + return + bsym = query.value(1) + boff = query.value(2) + bsym_start = query.value(3) + if bsym == 0: + return + tot = bsym_start + boff + 1 - sym_start - off + if tot <= 0 or tot > 16384: + return + + inst = self.glb.disassembler.Instruction() + f = self.glb.FileFromNamesAndBuildId(short_name, long_name, build_id) + if not f: + return + mode = 0 if Is64Bit(f) else 1 + self.glb.disassembler.SetMode(inst, mode) + + buf_sz = tot + 16 + buf = create_string_buffer(tot + 16) + f.seek(sym_start + off) + buf.value = f.read(buf_sz) + buf_ptr = addressof(buf) + i = 0 + while tot > 0: + cnt, text = self.glb.disassembler.DisassembleOne(inst, buf_ptr, buf_sz, ip) + if cnt: + byte_str = tohex(ip).rjust(16) + for k in xrange(cnt): + byte_str += " %02x" % ord(buf[i]) + i += 1 + while k < 15: + byte_str += " " + k += 1 + self.child_items.append(BranchLevelTwoItem(0, byte_str + " " + text, self)) + self.child_count += 1 + else: + return + buf_ptr += cnt + tot -= cnt + buf_sz -= cnt + ip += cnt + + def childCount(self): + if not self.query_done: + self.Select() + if not self.child_count: + return -1 + return self.child_count + + def hasChildren(self): + if not self.query_done: + return True + return self.child_count > 0 + + def getData(self, column): + return self.data[column] + +# Brance data model root item + +class BranchRootItem(): + + def __init__(self): + self.child_count = 0 + self.child_items = [] + self.level = 0 + + def getChildItem(self, row): + return self.child_items[row] + + def getParentItem(self): + return None + + def getRow(self): + return 0 + + def childCount(self): + return self.child_count + + def hasChildren(self): + return self.child_count > 0 + + def getData(self, column): + return "" + +# Branch data preparation + +def BranchDataPrep(query): + data = [] + for i in xrange(0, 8): + data.append(query.value(i)) + data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) + + " (" + dsoname(query.value(11)) + ")" + " -> " + + tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) + + " (" + dsoname(query.value(15)) + ")") + return data + +# Branch data model + +class BranchModel(TreeModel): + + progress = Signal(object) + + def __init__(self, glb, event_id, where_clause, parent=None): + super(BranchModel, self).__init__(BranchRootItem(), parent) + self.glb = glb + self.event_id = event_id + self.more = True + self.populated = 0 + sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name," + " CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END," + " ip, symbols.name, sym_offset, dsos.short_name," + " to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name" + " FROM samples" + " INNER JOIN comms ON comm_id = comms.id" + " INNER JOIN threads ON thread_id = threads.id" + " INNER JOIN branch_types ON branch_type = branch_types.id" + " INNER JOIN symbols ON symbol_id = symbols.id" + " INNER JOIN symbols to_symbols ON to_symbol_id = to_symbols.id" + " INNER JOIN dsos ON samples.dso_id = dsos.id" + " INNER JOIN dsos AS to_dsos ON samples.to_dso_id = to_dsos.id" + " WHERE samples.id > $$last_id$$" + where_clause + + " AND evsel_id = " + str(self.event_id) + + " ORDER BY samples.id" + " LIMIT " + str(glb_chunk_sz)) + self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample) + self.fetcher.done.connect(self.Update) + self.fetcher.Fetch(glb_chunk_sz) + + def columnCount(self, parent=None): + return 8 + + def columnHeader(self, column): + return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column] + + def columnFont(self, column): + if column != 7: + return None + return QFont("Monospace") + + def DisplayData(self, item, index): + if item.level == 1: + self.FetchIfNeeded(item.row) + return item.getData(index.column()) + + def AddSample(self, data): + child = BranchLevelOneItem(self.glb, self.populated, data, self.root) + self.root.child_items.append(child) + self.populated += 1 + + def Update(self, fetched): + if not fetched: + self.more = False + self.progress.emit(0) + child_count = self.root.child_count + count = self.populated - child_count + if count > 0: + parent = QModelIndex() + self.beginInsertRows(parent, child_count, child_count + count - 1) + self.insertRows(child_count, count, parent) + self.root.child_count += count + self.endInsertRows() + self.progress.emit(self.root.child_count) + + def FetchMoreRecords(self, count): + current = self.root.child_count + if self.more: + self.fetcher.Fetch(count) + else: + self.progress.emit(0) + return current + + def HasMoreRecords(self): + return self.more + +# Branch window + +class BranchWindow(QMdiSubWindow): + + def __init__(self, glb, event_id, name, where_clause, parent=None): + super(BranchWindow, self).__init__(parent) + + model_name = "Branch Events " + str(event_id) + if len(where_clause): + model_name = where_clause + " " + model_name + + self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, where_clause)) + + self.view = QTreeView() + self.view.setUniformRowHeights(True) + self.view.setModel(self.model) + + self.ResizeColumnsToContents() + + self.find_bar = FindBar(self, self, True) + + self.finder = ChildDataItemFinder(self.model.root) + + self.fetch_bar = FetchMoreRecordsBar(self.model, self) + + self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget()) + + self.setWidget(self.vbox.Widget()) + + AddSubWindow(glb.mainwindow.mdi_area, self, name + " Branch Events") + + def ResizeColumnToContents(self, column, n): + # Using the view's resizeColumnToContents() here is extrememly slow + # so implement a crude alternative + mm = "MM" if column else "MMMM" + font = self.view.font() + metrics = QFontMetrics(font) + max = 0 + for row in xrange(n): + val = self.model.root.child_items[row].data[column] + len = metrics.width(str(val) + mm) + max = len if len > max else max + val = self.model.columnHeader(column) + len = metrics.width(str(val) + mm) + max = len if len > max else max + self.view.setColumnWidth(column, max) + + def ResizeColumnsToContents(self): + n = min(self.model.root.child_count, 100) + if n < 1: + # No data yet, so connect a signal to notify when there is + self.model.rowsInserted.connect(self.UpdateColumnWidths) + return + columns = self.model.columnCount() + for i in xrange(columns): + self.ResizeColumnToContents(i, n) + + def UpdateColumnWidths(self, *x): + # This only needs to be done once, so disconnect the signal now + self.model.rowsInserted.disconnect(self.UpdateColumnWidths) + self.ResizeColumnsToContents() + + def Find(self, value, direction, pattern, context): + self.view.setFocus() + self.find_bar.Busy() + self.finder.Find(value, direction, pattern, context, self.FindDone) + + def FindDone(self, row): + self.find_bar.Idle() + if row >= 0: + self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex())) + else: + self.find_bar.NotFound() + +# Event list + +def GetEventList(db): + events = [] + query = QSqlQuery(db) + QueryExec(query, "SELECT name FROM selected_events WHERE id > 0 ORDER BY id") + while query.next(): + events.append(query.value(0)) + return events + +# SQL data preparation + +def SQLTableDataPrep(query, count): + data = [] + for i in xrange(count): + data.append(query.value(i)) + return data + +# SQL table data model item + +class SQLTableItem(): + + def __init__(self, row, data): + self.row = row + self.data = data + + def getData(self, column): + return self.data[column] + +# SQL table data model + +class SQLTableModel(TableModel): + + progress = Signal(object) + + def __init__(self, glb, sql, column_count, parent=None): + super(SQLTableModel, self).__init__(parent) + self.glb = glb + self.more = True + self.populated = 0 + self.fetcher = SQLFetcher(glb, sql, lambda x, y=column_count: SQLTableDataPrep(x, y), self.AddSample) + self.fetcher.done.connect(self.Update) + self.fetcher.Fetch(glb_chunk_sz) + + def DisplayData(self, item, index): + self.FetchIfNeeded(item.row) + return item.getData(index.column()) + + def AddSample(self, data): + child = SQLTableItem(self.populated, data) + self.child_items.append(child) + self.populated += 1 + + def Update(self, fetched): + if not fetched: + self.more = False + self.progress.emit(0) + child_count = self.child_count + count = self.populated - child_count + if count > 0: + parent = QModelIndex() + self.beginInsertRows(parent, child_count, child_count + count - 1) + self.insertRows(child_count, count, parent) + self.child_count += count + self.endInsertRows() + self.progress.emit(self.child_count) + + def FetchMoreRecords(self, count): + current = self.child_count + if self.more: + self.fetcher.Fetch(count) + else: + self.progress.emit(0) + return current + + def HasMoreRecords(self): + return self.more + +# SQL automatic table data model + +class SQLAutoTableModel(SQLTableModel): + + def __init__(self, glb, table_name, parent=None): + sql = "SELECT * FROM " + table_name + " WHERE id > $$last_id$$ ORDER BY id LIMIT " + str(glb_chunk_sz) + if table_name == "comm_threads_view": + # For now, comm_threads_view has no id column + sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz) + self.column_headers = [] + query = QSqlQuery(glb.db) + if glb.dbref.is_sqlite3: + QueryExec(query, "PRAGMA table_info(" + table_name + ")") + while query.next(): + self.column_headers.append(query.value(1)) + if table_name == "sqlite_master": + sql = "SELECT * FROM " + table_name + else: + if table_name[:19] == "information_schema.": + sql = "SELECT * FROM " + table_name + select_table_name = table_name[19:] + schema = "information_schema" + else: + select_table_name = table_name + schema = "public" + QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'") + while query.next(): + self.column_headers.append(query.value(0)) + super(SQLAutoTableModel, self).__init__(glb, sql, len(self.column_headers), parent) + + def columnCount(self, parent=None): + return len(self.column_headers) + + def columnHeader(self, column): + return self.column_headers[column] + +# Base class for custom ResizeColumnsToContents + +class ResizeColumnsToContentsBase(QObject): + + def __init__(self, parent=None): + super(ResizeColumnsToContentsBase, self).__init__(parent) + + def ResizeColumnToContents(self, column, n): + # Using the view's resizeColumnToContents() here is extrememly slow + # so implement a crude alternative + font = self.view.font() + metrics = QFontMetrics(font) + max = 0 + for row in xrange(n): + val = self.data_model.child_items[row].data[column] + len = metrics.width(str(val) + "MM") + max = len if len > max else max + val = self.data_model.columnHeader(column) + len = metrics.width(str(val) + "MM") + max = len if len > max else max + self.view.setColumnWidth(column, max) + + def ResizeColumnsToContents(self): + n = min(self.data_model.child_count, 100) + if n < 1: + # No data yet, so connect a signal to notify when there is + self.data_model.rowsInserted.connect(self.UpdateColumnWidths) + return + columns = self.data_model.columnCount() + for i in xrange(columns): + self.ResizeColumnToContents(i, n) + + def UpdateColumnWidths(self, *x): + # This only needs to be done once, so disconnect the signal now + self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths) + self.ResizeColumnsToContents() + +# Table window + +class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase): + + def __init__(self, glb, table_name, parent=None): + super(TableWindow, self).__init__(parent) + + self.data_model = LookupCreateModel(table_name + " Table", lambda: SQLAutoTableModel(glb, table_name)) + + self.model = QSortFilterProxyModel() + self.model.setSourceModel(self.data_model) + + self.view = QTableView() + self.view.setModel(self.model) + self.view.setEditTriggers(QAbstractItemView.NoEditTriggers) + self.view.verticalHeader().setVisible(False) + self.view.sortByColumn(-1, Qt.AscendingOrder) + self.view.setSortingEnabled(True) + + self.ResizeColumnsToContents() + + self.find_bar = FindBar(self, self, True) + + self.finder = ChildDataItemFinder(self.data_model) + + self.fetch_bar = FetchMoreRecordsBar(self.data_model, self) + + self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget()) + + self.setWidget(self.vbox.Widget()) + + AddSubWindow(glb.mainwindow.mdi_area, self, table_name + " Table") + + def Find(self, value, direction, pattern, context): + self.view.setFocus() + self.find_bar.Busy() + self.finder.Find(value, direction, pattern, context, self.FindDone) + + def FindDone(self, row): + self.find_bar.Idle() + if row >= 0: + self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex())) + else: + self.find_bar.NotFound() + +# Table list + +def GetTableList(glb): + tables = [] + query = QSqlQuery(glb.db) + if glb.dbref.is_sqlite3: + QueryExec(query, "SELECT name FROM sqlite_master WHERE type IN ( 'table' , 'view' ) ORDER BY name") + else: + QueryExec(query, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type IN ( 'BASE TABLE' , 'VIEW' ) ORDER BY table_name") + while query.next(): + tables.append(query.value(0)) + if glb.dbref.is_sqlite3: + tables.append("sqlite_master") + else: + tables.append("information_schema.tables") + tables.append("information_schema.views") + tables.append("information_schema.columns") + return tables + +# Action Definition + +def CreateAction(label, tip, callback, parent=None, shortcut=None): + action = QAction(label, parent) + if shortcut != None: + action.setShortcuts(shortcut) + action.setStatusTip(tip) + action.triggered.connect(callback) + return action + +# Typical application actions + +def CreateExitAction(app, parent=None): + return CreateAction("&Quit", "Exit the application", app.closeAllWindows, parent, QKeySequence.Quit) + +# Typical MDI actions + +def CreateCloseActiveWindowAction(mdi_area): + return CreateAction("Cl&ose", "Close the active window", mdi_area.closeActiveSubWindow, mdi_area) + +def CreateCloseAllWindowsAction(mdi_area): + return CreateAction("Close &All", "Close all the windows", mdi_area.closeAllSubWindows, mdi_area) + +def CreateTileWindowsAction(mdi_area): + return CreateAction("&Tile", "Tile the windows", mdi_area.tileSubWindows, mdi_area) + +def CreateCascadeWindowsAction(mdi_area): + return CreateAction("&Cascade", "Cascade the windows", mdi_area.cascadeSubWindows, mdi_area) + +def CreateNextWindowAction(mdi_area): + return CreateAction("Ne&xt", "Move the focus to the next window", mdi_area.activateNextSubWindow, mdi_area, QKeySequence.NextChild) + +def CreatePreviousWindowAction(mdi_area): + return CreateAction("Pre&vious", "Move the focus to the previous window", mdi_area.activatePreviousSubWindow, mdi_area, QKeySequence.PreviousChild) + +# Typical MDI window menu + +class WindowMenu(): + + def __init__(self, mdi_area, menu): + self.mdi_area = mdi_area + self.window_menu = menu.addMenu("&Windows") + self.close_active_window = CreateCloseActiveWindowAction(mdi_area) + self.close_all_windows = CreateCloseAllWindowsAction(mdi_area) + self.tile_windows = CreateTileWindowsAction(mdi_area) + self.cascade_windows = CreateCascadeWindowsAction(mdi_area) + self.next_window = CreateNextWindowAction(mdi_area) + self.previous_window = CreatePreviousWindowAction(mdi_area) + self.window_menu.aboutToShow.connect(self.Update) + + def Update(self): + self.window_menu.clear() + sub_window_count = len(self.mdi_area.subWindowList()) + have_sub_windows = sub_window_count != 0 + self.close_active_window.setEnabled(have_sub_windows) + self.close_all_windows.setEnabled(have_sub_windows) + self.tile_windows.setEnabled(have_sub_windows) + self.cascade_windows.setEnabled(have_sub_windows) + self.next_window.setEnabled(have_sub_windows) + self.previous_window.setEnabled(have_sub_windows) + self.window_menu.addAction(self.close_active_window) + self.window_menu.addAction(self.close_all_windows) + self.window_menu.addSeparator() + self.window_menu.addAction(self.tile_windows) + self.window_menu.addAction(self.cascade_windows) + self.window_menu.addSeparator() + self.window_menu.addAction(self.next_window) + self.window_menu.addAction(self.previous_window) + if sub_window_count == 0: + return + self.window_menu.addSeparator() + nr = 1 + for sub_window in self.mdi_area.subWindowList(): + label = str(nr) + " " + sub_window.name + if nr < 10: + label = "&" + label + action = self.window_menu.addAction(label) + action.setCheckable(True) + action.setChecked(sub_window == self.mdi_area.activeSubWindow()) + action.triggered.connect(lambda x=nr: self.setActiveSubWindow(x)) + self.window_menu.addAction(action) + nr += 1 + + def setActiveSubWindow(self, nr): + self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1]) + +# Font resize + +def ResizeFont(widget, diff): + font = widget.font() + sz = font.pointSize() + font.setPointSize(sz + diff) + widget.setFont(font) + +def ShrinkFont(widget): + ResizeFont(widget, -1) + +def EnlargeFont(widget): + ResizeFont(widget, 1) + +# Unique name for sub-windows + +def NumberedWindowName(name, nr): + if nr > 1: + name += " <" + str(nr) + ">" + return name + +def UniqueSubWindowName(mdi_area, name): + nr = 1 + while True: + unique_name = NumberedWindowName(name, nr) + ok = True + for sub_window in mdi_area.subWindowList(): + if sub_window.name == unique_name: + ok = False + break + if ok: + return unique_name + nr += 1 + +# Add a sub-window + +def AddSubWindow(mdi_area, sub_window, name): + unique_name = UniqueSubWindowName(mdi_area, name) + sub_window.setMinimumSize(200, 100) + sub_window.resize(800, 600) + sub_window.setWindowTitle(unique_name) + sub_window.setAttribute(Qt.WA_DeleteOnClose) + sub_window.setWindowIcon(sub_window.style().standardIcon(QStyle.SP_FileIcon)) + sub_window.name = unique_name + mdi_area.addSubWindow(sub_window) + sub_window.show() + +# Main window + +class MainWindow(QMainWindow): + + def __init__(self, glb, parent=None): + super(MainWindow, self).__init__(parent) + + self.glb = glb + + self.setWindowTitle("Exported SQL Viewer: " + glb.dbname) + self.setWindowIcon(self.style().standardIcon(QStyle.SP_ComputerIcon)) + self.setMinimumSize(200, 100) + + self.mdi_area = QMdiArea() + self.mdi_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded) + self.mdi_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded) + + self.setCentralWidget(self.mdi_area) + + menu = self.menuBar() + + file_menu = menu.addMenu("&File") + file_menu.addAction(CreateExitAction(glb.app, self)) + + edit_menu = menu.addMenu("&Edit") + edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find)) + edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)])) + edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")])) + edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")])) + + reports_menu = menu.addMenu("&Reports") + reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self)) + + self.EventMenu(GetEventList(glb.db), reports_menu) + + self.TableMenu(GetTableList(glb), menu) + + self.window_menu = WindowMenu(self.mdi_area, menu) + + def Find(self): + win = self.mdi_area.activeSubWindow() + if win: + try: + win.find_bar.Activate() + except: + pass + + def FetchMoreRecords(self): + win = self.mdi_area.activeSubWindow() + if win: + try: + win.fetch_bar.Activate() + except: + pass + + def ShrinkFont(self): + win = self.mdi_area.activeSubWindow() + ShrinkFont(win.view) + + def EnlargeFont(self): + win = self.mdi_area.activeSubWindow() + EnlargeFont(win.view) + + def EventMenu(self, events, reports_menu): + branches_events = 0 + for event in events: + event = event.split(":")[0] + if event == "branches": + branches_events += 1 + dbid = 0 + for event in events: + dbid += 1 + event = event.split(":")[0] + if event == "branches": + label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")" + reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self)) + + def TableMenu(self, tables, menu): + table_menu = menu.addMenu("&Tables") + for table in tables: + table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda t=table: self.NewTableView(t), self)) + + def NewCallGraph(self): + CallGraphWindow(self.glb, self) + + def NewBranchView(self, event_id): + BranchWindow(self.glb, event_id, "", "", self) + + def NewTableView(self, table_name): + TableWindow(self.glb, table_name, self) + +# XED Disassembler + +class xed_state_t(Structure): + + _fields_ = [ + ("mode", c_int), + ("width", c_int) + ] + +class XEDInstruction(): + + def __init__(self, libxed): + # Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion + xedd_t = c_byte * 512 + self.xedd = xedd_t() + self.xedp = addressof(self.xedd) + libxed.xed_decoded_inst_zero(self.xedp) + self.state = xed_state_t() + self.statep = addressof(self.state) + # Buffer for disassembled instruction text + self.buffer = create_string_buffer(256) + self.bufferp = addressof(self.buffer) + +class LibXED(): + + def __init__(self): + self.libxed = CDLL("libxed.so") + + self.xed_tables_init = self.libxed.xed_tables_init + self.xed_tables_init.restype = None + self.xed_tables_init.argtypes = [] + + self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero + self.xed_decoded_inst_zero.restype = None + self.xed_decoded_inst_zero.argtypes = [ c_void_p ] + + self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode + self.xed_operand_values_set_mode.restype = None + self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ] + + self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode + self.xed_decoded_inst_zero_keep_mode.restype = None + self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ] + + self.xed_decode = self.libxed.xed_decode + self.xed_decode.restype = c_int + self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ] + + self.xed_format_context = self.libxed.xed_format_context + self.xed_format_context.restype = c_uint + self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ] + + self.xed_tables_init() + + def Instruction(self): + return XEDInstruction(self) + + def SetMode(self, inst, mode): + if mode: + inst.state.mode = 4 # 32-bit + inst.state.width = 4 # 4 bytes + else: + inst.state.mode = 1 # 64-bit + inst.state.width = 8 # 8 bytes + self.xed_operand_values_set_mode(inst.xedp, inst.statep) + + def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip): + self.xed_decoded_inst_zero_keep_mode(inst.xedp) + err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt) + if err: + return 0, "" + # Use AT&T mode (2), alternative is Intel (3) + ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0) + if not ok: + return 0, "" + # Return instruction length and the disassembled instruction text + # For now, assume the length is in byte 166 + return inst.xedd[166], inst.buffer.value + +def TryOpen(file_name): + try: + return open(file_name, "rb") + except: + return None + +def Is64Bit(f): + result = sizeof(c_void_p) + # ELF support only + pos = f.tell() + f.seek(0) + header = f.read(7) + f.seek(pos) + magic = header[0:4] + eclass = ord(header[4]) + encoding = ord(header[5]) + version = ord(header[6]) + if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1: + result = True if eclass == 2 else False + return result + +# Global data + +class Glb(): + + def __init__(self, dbref, db, dbname): + self.dbref = dbref + self.db = db + self.dbname = dbname + self.home_dir = os.path.expanduser("~") + self.buildid_dir = os.getenv("PERF_BUILDID_DIR") + if self.buildid_dir: + self.buildid_dir += "/.build-id/" + else: + self.buildid_dir = self.home_dir + "/.debug/.build-id/" + self.app = None + self.mainwindow = None + self.instances_to_shutdown_on_exit = weakref.WeakSet() + try: + self.disassembler = LibXED() + self.have_disassembler = True + except: + self.have_disassembler = False + + def FileFromBuildId(self, build_id): + file_name = self.buildid_dir + build_id[0:2] + "/" + build_id[2:] + "/elf" + return TryOpen(file_name) + + def FileFromNamesAndBuildId(self, short_name, long_name, build_id): + # Assume current machine i.e. no support for virtualization + if short_name[0:7] == "[kernel" and os.path.basename(long_name) == "kcore": + file_name = os.getenv("PERF_KCORE") + f = TryOpen(file_name) if file_name else None + if f: + return f + # For now, no special handling if long_name is /proc/kcore + f = TryOpen(long_name) + if f: + return f + f = self.FileFromBuildId(build_id) + if f: + return f + return None + + def AddInstanceToShutdownOnExit(self, instance): + self.instances_to_shutdown_on_exit.add(instance) + + # Shutdown any background processes or threads + def ShutdownInstances(self): + for x in self.instances_to_shutdown_on_exit: + try: + x.Shutdown() + except: + pass + +# Database reference + +class DBRef(): + + def __init__(self, is_sqlite3, dbname): + self.is_sqlite3 = is_sqlite3 + self.dbname = dbname + + def Open(self, connection_name): + dbname = self.dbname + if self.is_sqlite3: + db = QSqlDatabase.addDatabase("QSQLITE", connection_name) + else: + db = QSqlDatabase.addDatabase("QPSQL", connection_name) + opts = dbname.split() + for opt in opts: + if "=" in opt: + opt = opt.split("=") + if opt[0] == "hostname": + db.setHostName(opt[1]) + elif opt[0] == "port": + db.setPort(int(opt[1])) + elif opt[0] == "username": + db.setUserName(opt[1]) + elif opt[0] == "password": + db.setPassword(opt[1]) + elif opt[0] == "dbname": + dbname = opt[1] + else: + dbname = opt + + db.setDatabaseName(dbname) + if not db.open(): + raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text()) + return db, dbname + +# Main + +def Main(): + if (len(sys.argv) < 2): + print >> sys.stderr, "Usage is: exported-sql-viewer.py <database name>" + raise Exception("Too few arguments") + + dbname = sys.argv[1] + + is_sqlite3 = False + try: + f = open(dbname) + if f.read(15) == "SQLite format 3": + is_sqlite3 = True + f.close() + except: + pass + + dbref = DBRef(is_sqlite3, dbname) + db, dbname = dbref.Open("main") + glb = Glb(dbref, db, dbname) + app = QApplication(sys.argv) + glb.app = app + mainwindow = MainWindow(glb) + glb.mainwindow = mainwindow + mainwindow.show() + err = app.exec_() + glb.ShutdownInstances() + db.close() + sys.exit(err) + +if __name__ == "__main__": + Main() diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build index c3b0afd67760..304313073242 100644 --- a/tools/perf/trace/beauty/Build +++ b/tools/perf/trace/beauty/Build @@ -5,6 +5,7 @@ ifeq ($(SRCARCH),$(filter $(SRCARCH),x86)) libperf-y += ioctl.o endif libperf-y += kcmp.o +libperf-y += mount_flags.o libperf-y += pkey_alloc.o libperf-y += prctl.o libperf-y += sockaddr.o diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h index 2570152d3909..039c29039b2c 100644 --- a/tools/perf/trace/beauty/beauty.h +++ b/tools/perf/trace/beauty/beauty.h @@ -24,6 +24,7 @@ struct strarray { } size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, int val); +size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, unsigned long flags); struct trace; struct thread; @@ -122,6 +123,12 @@ size_t syscall_arg__scnprintf_kcmp_type(char *bf, size_t size, struct syscall_ar size_t syscall_arg__scnprintf_kcmp_idx(char *bf, size_t size, struct syscall_arg *arg); #define SCA_KCMP_IDX syscall_arg__scnprintf_kcmp_idx +unsigned long syscall_arg__mask_val_mount_flags(struct syscall_arg *arg, unsigned long flags); +#define SCAMV_MOUNT_FLAGS syscall_arg__mask_val_mount_flags + +size_t syscall_arg__scnprintf_mount_flags(char *bf, size_t size, struct syscall_arg *arg); +#define SCA_MOUNT_FLAGS syscall_arg__scnprintf_mount_flags + size_t syscall_arg__scnprintf_pkey_alloc_access_rights(char *bf, size_t size, struct syscall_arg *arg); #define SCA_PKEY_ALLOC_ACCESS_RIGHTS syscall_arg__scnprintf_pkey_alloc_access_rights diff --git a/tools/perf/trace/beauty/clone.c b/tools/perf/trace/beauty/clone.c index d64d049ab991..010406500c30 100644 --- a/tools/perf/trace/beauty/clone.c +++ b/tools/perf/trace/beauty/clone.c @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: LGPL-2.1 /* * trace/beauty/cone.c * * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> - * - * Released under the GPL v2. (and only v2, not any later version) */ #include "trace/beauty/beauty.h" diff --git a/tools/perf/trace/beauty/drm_ioctl.sh b/tools/perf/trace/beauty/drm_ioctl.sh index 9d3816815e60..9aa94fd523a9 100755 --- a/tools/perf/trace/beauty/drm_ioctl.sh +++ b/tools/perf/trace/beauty/drm_ioctl.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/drm/ diff --git a/tools/perf/trace/beauty/eventfd.c b/tools/perf/trace/beauty/eventfd.c index 5d6a477a6400..db5b9b492113 100644 --- a/tools/perf/trace/beauty/eventfd.c +++ b/tools/perf/trace/beauty/eventfd.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #ifndef EFD_SEMAPHORE #define EFD_SEMAPHORE 1 #endif diff --git a/tools/perf/trace/beauty/fcntl.c b/tools/perf/trace/beauty/fcntl.c index 9e8900c13cb1..e6de31674e24 100644 --- a/tools/perf/trace/beauty/fcntl.c +++ b/tools/perf/trace/beauty/fcntl.c @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: LGPL-2.1 /* * trace/beauty/fcntl.c * * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> - * - * Released under the GPL v2. (and only v2, not any later version) */ #include "trace/beauty/beauty.h" diff --git a/tools/perf/trace/beauty/flock.c b/tools/perf/trace/beauty/flock.c index c4ff6ad30b06..cf02ae5f0ba6 100644 --- a/tools/perf/trace/beauty/flock.c +++ b/tools/perf/trace/beauty/flock.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #include "trace/beauty/beauty.h" #include <linux/kernel.h> diff --git a/tools/perf/trace/beauty/futex_op.c b/tools/perf/trace/beauty/futex_op.c index 61850fbc85ff..1136bde56406 100644 --- a/tools/perf/trace/beauty/futex_op.c +++ b/tools/perf/trace/beauty/futex_op.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #include <linux/futex.h> #ifndef FUTEX_WAIT_BITSET diff --git a/tools/perf/trace/beauty/futex_val3.c b/tools/perf/trace/beauty/futex_val3.c index 26f6b3253511..138b7d588a70 100644 --- a/tools/perf/trace/beauty/futex_val3.c +++ b/tools/perf/trace/beauty/futex_val3.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #include <linux/futex.h> #ifndef FUTEX_BITSET_MATCH_ANY diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c index 1be3b4cf0827..5d2a7fd8d407 100644 --- a/tools/perf/trace/beauty/ioctl.c +++ b/tools/perf/trace/beauty/ioctl.c @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: LGPL-2.1 /* * trace/beauty/ioctl.c * * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> - * - * Released under the GPL v2. (and only v2, not any later version) */ #include "trace/beauty/beauty.h" diff --git a/tools/perf/trace/beauty/kcmp.c b/tools/perf/trace/beauty/kcmp.c index f62040eb9d5c..b276a274f203 100644 --- a/tools/perf/trace/beauty/kcmp.c +++ b/tools/perf/trace/beauty/kcmp.c @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: LGPL-2.1 /* * trace/beauty/kcmp.c * * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> - * - * Released under the GPL v2. (and only v2, not any later version) */ #include "trace/beauty/beauty.h" diff --git a/tools/perf/trace/beauty/kcmp_type.sh b/tools/perf/trace/beauty/kcmp_type.sh index a3c304caa336..df8b17486d57 100755 --- a/tools/perf/trace/beauty/kcmp_type.sh +++ b/tools/perf/trace/beauty/kcmp_type.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ diff --git a/tools/perf/trace/beauty/kvm_ioctl.sh b/tools/perf/trace/beauty/kvm_ioctl.sh index c4699fd46bb6..4ce54f5bf756 100755 --- a/tools/perf/trace/beauty/kvm_ioctl.sh +++ b/tools/perf/trace/beauty/kvm_ioctl.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ diff --git a/tools/perf/trace/beauty/madvise_behavior.sh b/tools/perf/trace/beauty/madvise_behavior.sh index 431639eb4d29..4527d290cdfc 100755 --- a/tools/perf/trace/beauty/madvise_behavior.sh +++ b/tools/perf/trace/beauty/madvise_behavior.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/asm-generic/ diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c index 9f68077b241b..c534bd96ef5c 100644 --- a/tools/perf/trace/beauty/mmap.c +++ b/tools/perf/trace/beauty/mmap.c @@ -1,5 +1,6 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #include <uapi/linux/mman.h> +#include <linux/log2.h> static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size, struct syscall_arg *arg) @@ -30,50 +31,23 @@ static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size, #define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot +static size_t mmap__scnprintf_flags(unsigned long flags, char *bf, size_t size) +{ +#include "trace/beauty/generated/mmap_flags_array.c" + static DEFINE_STRARRAY(mmap_flags); + + return strarray__scnprintf_flags(&strarray__mmap_flags, bf, size, flags); +} + static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size, struct syscall_arg *arg) { - int printed = 0, flags = arg->val; + unsigned long flags = arg->val; if (flags & MAP_ANONYMOUS) arg->mask |= (1 << 4) | (1 << 5); /* Mask 4th ('fd') and 5th ('offset') args, ignored */ -#define P_MMAP_FLAG(n) \ - if (flags & MAP_##n) { \ - printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ - flags &= ~MAP_##n; \ - } - - P_MMAP_FLAG(SHARED); - P_MMAP_FLAG(PRIVATE); -#ifdef MAP_32BIT - P_MMAP_FLAG(32BIT); -#endif - P_MMAP_FLAG(ANONYMOUS); - P_MMAP_FLAG(DENYWRITE); - P_MMAP_FLAG(EXECUTABLE); - P_MMAP_FLAG(FILE); - P_MMAP_FLAG(FIXED); -#ifdef MAP_FIXED_NOREPLACE - P_MMAP_FLAG(FIXED_NOREPLACE); -#endif - P_MMAP_FLAG(GROWSDOWN); - P_MMAP_FLAG(HUGETLB); - P_MMAP_FLAG(LOCKED); - P_MMAP_FLAG(NONBLOCK); - P_MMAP_FLAG(NORESERVE); - P_MMAP_FLAG(POPULATE); - P_MMAP_FLAG(STACK); - P_MMAP_FLAG(UNINITIALIZED); -#ifdef MAP_SYNC - P_MMAP_FLAG(SYNC); -#endif -#undef P_MMAP_FLAG - - if (flags) - printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); - - return printed; + return mmap__scnprintf_flags(flags, bf, size); } #define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags diff --git a/tools/perf/trace/beauty/mmap_flags.sh b/tools/perf/trace/beauty/mmap_flags.sh new file mode 100755 index 000000000000..22c3fdca8975 --- /dev/null +++ b/tools/perf/trace/beauty/mmap_flags.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 + +if [ $# -ne 2 ] ; then + [ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/` + header_dir=tools/include/uapi/asm-generic + arch_header_dir=tools/arch/${hostarch}/include/uapi/asm +else + header_dir=$1 + arch_header_dir=$2 +fi + +arch_mman=${arch_header_dir}/mman.h + +# those in egrep -vw are flags, we want just the bits + +printf "static const char *mmap_flags[] = {\n" +regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MAP_([[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*.*' +egrep -q $regex ${arch_mman} && \ +(egrep $regex ${arch_mman} | \ + sed -r "s/$regex/\2 \1/g" | \ + xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n") +egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman} && +(egrep $regex ${header_dir}/mman-common.h | \ + egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \ + sed -r "s/$regex/\2 \1/g" | \ + xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n") +egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.h>.*' ${arch_mman} && +(egrep $regex ${header_dir}/mman.h | \ + sed -r "s/$regex/\2 \1/g" | \ + xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n") +printf "};\n" diff --git a/tools/perf/trace/beauty/mode_t.c b/tools/perf/trace/beauty/mode_t.c index d929ad7dd97b..6879d36d3004 100644 --- a/tools/perf/trace/beauty/mode_t.c +++ b/tools/perf/trace/beauty/mode_t.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> diff --git a/tools/perf/trace/beauty/mount_flags.c b/tools/perf/trace/beauty/mount_flags.c new file mode 100644 index 000000000000..712935c6620a --- /dev/null +++ b/tools/perf/trace/beauty/mount_flags.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: LGPL-2.1 +/* + * trace/beauty/mount_flags.c + * + * Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> + */ + +#include "trace/beauty/beauty.h" +#include <linux/compiler.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <sys/mount.h> + +static size_t mount__scnprintf_flags(unsigned long flags, char *bf, size_t size) +{ +#include "trace/beauty/generated/mount_flags_array.c" + static DEFINE_STRARRAY(mount_flags); + + return strarray__scnprintf_flags(&strarray__mount_flags, bf, size, flags); +} + +unsigned long syscall_arg__mask_val_mount_flags(struct syscall_arg *arg __maybe_unused, unsigned long flags) +{ + // do_mount in fs/namespace.c: + /* + * Pre-0.97 versions of mount() didn't have a flags word. When the + * flags word was introduced its top half was required to have the + * magic value 0xC0ED, and this remained so until 2.4.0-test9. + * Therefore, if this magic number is present, it carries no + * information and must be discarded. + */ + if ((flags & MS_MGC_MSK) == MS_MGC_VAL) + flags &= ~MS_MGC_MSK; + + return flags; +} + +size_t syscall_arg__scnprintf_mount_flags(char *bf, size_t size, struct syscall_arg *arg) +{ + unsigned long flags = arg->val; + + return mount__scnprintf_flags(flags, bf, size); +} diff --git a/tools/perf/trace/beauty/mount_flags.sh b/tools/perf/trace/beauty/mount_flags.sh new file mode 100755 index 000000000000..45547573a1db --- /dev/null +++ b/tools/perf/trace/beauty/mount_flags.sh @@ -0,0 +1,15 @@ +#!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 + +[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ + +printf "static const char *mount_flags[] = {\n" +regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*' +egrep $regex ${header_dir}/fs.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \ + sed -r "s/$regex/\2 \2 \1/g" | sort -n | \ + xargs printf "\t[%s ? (ilog2(%s) + 1) : 0] = \"%s\",\n" +regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+\(1<<([[:digit:]]+)\)[[:space:]]*.*' +egrep $regex ${header_dir}/fs.h | \ + sed -r "s/$regex/\2 \1/g" | \ + xargs printf "\t[%s + 1] = \"%s\",\n" +printf "};\n" diff --git a/tools/perf/trace/beauty/msg_flags.c b/tools/perf/trace/beauty/msg_flags.c index c064d6aae659..1b9d6306d274 100644 --- a/tools/perf/trace/beauty/msg_flags.c +++ b/tools/perf/trace/beauty/msg_flags.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #include <sys/types.h> #include <sys/socket.h> diff --git a/tools/perf/trace/beauty/open_flags.c b/tools/perf/trace/beauty/open_flags.c index 6aec6178a99d..cc673fec9184 100644 --- a/tools/perf/trace/beauty/open_flags.c +++ b/tools/perf/trace/beauty/open_flags.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> diff --git a/tools/perf/trace/beauty/perf_event_open.c b/tools/perf/trace/beauty/perf_event_open.c index 2bafd7c995ff..981185c1974b 100644 --- a/tools/perf/trace/beauty/perf_event_open.c +++ b/tools/perf/trace/beauty/perf_event_open.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #ifndef PERF_FLAG_FD_NO_GROUP # define PERF_FLAG_FD_NO_GROUP (1UL << 0) #endif diff --git a/tools/perf/trace/beauty/perf_ioctl.sh b/tools/perf/trace/beauty/perf_ioctl.sh index 6492c74df928..9aabd9743ef6 100755 --- a/tools/perf/trace/beauty/perf_ioctl.sh +++ b/tools/perf/trace/beauty/perf_ioctl.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ diff --git a/tools/perf/trace/beauty/pid.c b/tools/perf/trace/beauty/pid.c index 0313df342830..1a6acc46807b 100644 --- a/tools/perf/trace/beauty/pid.c +++ b/tools/perf/trace/beauty/pid.c @@ -1,4 +1,5 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 + size_t syscall_arg__scnprintf_pid(char *bf, size_t size, struct syscall_arg *arg) { int pid = arg->val; diff --git a/tools/perf/trace/beauty/pkey_alloc.c b/tools/perf/trace/beauty/pkey_alloc.c index 2ba784a3734a..1b8ed4cac815 100644 --- a/tools/perf/trace/beauty/pkey_alloc.c +++ b/tools/perf/trace/beauty/pkey_alloc.c @@ -1,40 +1,36 @@ +// SPDX-License-Identifier: LGPL-2.1 /* * trace/beauty/pkey_alloc.c * * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> - * - * Released under the GPL v2. (and only v2, not any later version) */ #include "trace/beauty/beauty.h" #include <linux/kernel.h> #include <linux/log2.h> -static size_t pkey_alloc__scnprintf_access_rights(int access_rights, char *bf, size_t size) +size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, unsigned long flags) { int i, printed = 0; -#include "trace/beauty/generated/pkey_alloc_access_rights_array.c" - static DEFINE_STRARRAY(pkey_alloc_access_rights); - - if (access_rights == 0) { - const char *s = strarray__pkey_alloc_access_rights.entries[0]; + if (flags == 0) { + const char *s = sa->entries[0]; if (s) return scnprintf(bf, size, "%s", s); return scnprintf(bf, size, "%d", 0); } - for (i = 1; i < strarray__pkey_alloc_access_rights.nr_entries; ++i) { - int bit = 1 << (i - 1); + for (i = 1; i < sa->nr_entries; ++i) { + unsigned long bit = 1UL << (i - 1); - if (!(access_rights & bit)) + if (!(flags & bit)) continue; if (printed != 0) printed += scnprintf(bf + printed, size - printed, "|"); - if (strarray__pkey_alloc_access_rights.entries[i] != NULL) - printed += scnprintf(bf + printed, size - printed, "%s", strarray__pkey_alloc_access_rights.entries[i]); + if (sa->entries[i] != NULL) + printed += scnprintf(bf + printed, size - printed, "%s", sa->entries[i]); else printed += scnprintf(bf + printed, size - printed, "0x%#", bit); } @@ -42,6 +38,14 @@ static size_t pkey_alloc__scnprintf_access_rights(int access_rights, char *bf, s return printed; } +static size_t pkey_alloc__scnprintf_access_rights(int access_rights, char *bf, size_t size) +{ +#include "trace/beauty/generated/pkey_alloc_access_rights_array.c" + static DEFINE_STRARRAY(pkey_alloc_access_rights); + + return strarray__scnprintf_flags(&strarray__pkey_alloc_access_rights, bf, size, access_rights); +} + size_t syscall_arg__scnprintf_pkey_alloc_access_rights(char *bf, size_t size, struct syscall_arg *arg) { unsigned long cmd = arg->val; diff --git a/tools/perf/trace/beauty/pkey_alloc_access_rights.sh b/tools/perf/trace/beauty/pkey_alloc_access_rights.sh index e0a51aeb20b2..f8f1b560cf8a 100755 --- a/tools/perf/trace/beauty/pkey_alloc_access_rights.sh +++ b/tools/perf/trace/beauty/pkey_alloc_access_rights.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/asm-generic/ diff --git a/tools/perf/trace/beauty/prctl.c b/tools/perf/trace/beauty/prctl.c index 246130dad6c4..be7a5d395975 100644 --- a/tools/perf/trace/beauty/prctl.c +++ b/tools/perf/trace/beauty/prctl.c @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: LGPL-2.1 /* * trace/beauty/prctl.c * * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> - * - * Released under the GPL v2. (and only v2, not any later version) */ #include "trace/beauty/beauty.h" diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh index f24722146ebe..d32f8f1124af 100755 --- a/tools/perf/trace/beauty/prctl_option.sh +++ b/tools/perf/trace/beauty/prctl_option.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ diff --git a/tools/perf/trace/beauty/sched_policy.c b/tools/perf/trace/beauty/sched_policy.c index ba5096ae76b6..48f2b5c9aa3e 100644 --- a/tools/perf/trace/beauty/sched_policy.c +++ b/tools/perf/trace/beauty/sched_policy.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #include <sched.h> /* diff --git a/tools/perf/trace/beauty/seccomp.c b/tools/perf/trace/beauty/seccomp.c index b7097fd5fed9..e36156b19c70 100644 --- a/tools/perf/trace/beauty/seccomp.c +++ b/tools/perf/trace/beauty/seccomp.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #ifndef SECCOMP_SET_MODE_STRICT #define SECCOMP_SET_MODE_STRICT 0 #endif diff --git a/tools/perf/trace/beauty/signum.c b/tools/perf/trace/beauty/signum.c index bde18a53f090..587fec545b8a 100644 --- a/tools/perf/trace/beauty/signum.c +++ b/tools/perf/trace/beauty/signum.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #include <signal.h> static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg) diff --git a/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh b/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh index eb511bb5fbd3..e0803b957593 100755 --- a/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh +++ b/tools/perf/trace/beauty/sndrv_ctl_ioctl.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/ diff --git a/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh b/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh index 6818392968b2..7a464a7bf913 100755 --- a/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh +++ b/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/ diff --git a/tools/perf/trace/beauty/sockaddr.c b/tools/perf/trace/beauty/sockaddr.c index 71a79f72d9d9..9410ad230f10 100644 --- a/tools/perf/trace/beauty/sockaddr.c +++ b/tools/perf/trace/beauty/sockaddr.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 // Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> #include "trace/beauty/beauty.h" diff --git a/tools/perf/trace/beauty/socket.c b/tools/perf/trace/beauty/socket.c index 65227269384b..d971a2596417 100644 --- a/tools/perf/trace/beauty/socket.c +++ b/tools/perf/trace/beauty/socket.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 /* * trace/beauty/socket.c * diff --git a/tools/perf/trace/beauty/socket_ipproto.sh b/tools/perf/trace/beauty/socket_ipproto.sh index a3cc24633bec..de0f2f29017f 100755 --- a/tools/perf/trace/beauty/socket_ipproto.sh +++ b/tools/perf/trace/beauty/socket_ipproto.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ diff --git a/tools/perf/trace/beauty/socket_type.c b/tools/perf/trace/beauty/socket_type.c index bca26aef4a77..a63a9a332aa0 100644 --- a/tools/perf/trace/beauty/socket_type.c +++ b/tools/perf/trace/beauty/socket_type.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #include <sys/types.h> #include <sys/socket.h> diff --git a/tools/perf/trace/beauty/statx.c b/tools/perf/trace/beauty/statx.c index 5643b692af4c..630f2760dd66 100644 --- a/tools/perf/trace/beauty/statx.c +++ b/tools/perf/trace/beauty/statx.c @@ -1,9 +1,8 @@ +// SPDX-License-Identifier: LGPL-2.1 /* * trace/beauty/statx.c * * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> - * - * Released under the GPL v2. (and only v2, not any later version) */ #include "trace/beauty/beauty.h" diff --git a/tools/perf/trace/beauty/vhost_virtio_ioctl.sh b/tools/perf/trace/beauty/vhost_virtio_ioctl.sh index 0f6a5197d0be..439773daaf77 100755 --- a/tools/perf/trace/beauty/vhost_virtio_ioctl.sh +++ b/tools/perf/trace/beauty/vhost_virtio_ioctl.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: LGPL-2.1 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ diff --git a/tools/perf/trace/beauty/waitid_options.c b/tools/perf/trace/beauty/waitid_options.c index 8465281a093d..42ff58ad613b 100644 --- a/tools/perf/trace/beauty/waitid_options.c +++ b/tools/perf/trace/beauty/waitid_options.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: LGPL-2.1 #include <sys/types.h> #include <sys/wait.h> diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 28cd6a17491b..6936daf89ddd 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -139,6 +139,7 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i #include "arch/x86/annotate/instructions.c" #include "arch/powerpc/annotate/instructions.c" #include "arch/s390/annotate/instructions.c" +#include "arch/sparc/annotate/instructions.c" static struct arch architectures[] = { { @@ -170,6 +171,13 @@ static struct arch architectures[] = { .comment_char = '#', }, }, + { + .name = "sparc", + .init = sparc__annotate_init, + .objdump = { + .comment_char = '#', + }, + }, }; static void ins__delete(struct ins_operands *ops) diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index c4617bcfd521..72d5ba2479bf 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -962,16 +962,23 @@ s64 perf_event__process_auxtrace(struct perf_session *session, #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024 -void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts) +void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts, + bool no_sample) { - synth_opts->instructions = true; synth_opts->branches = true; synth_opts->transactions = true; synth_opts->ptwrites = true; synth_opts->pwr_events = true; synth_opts->errors = true; - synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE; - synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; + if (no_sample) { + synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS; + synth_opts->period = 1; + synth_opts->calls = true; + } else { + synth_opts->instructions = true; + synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE; + synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; + } synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ; synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ; synth_opts->initial_skip = 0; @@ -999,7 +1006,7 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str, } if (!str) { - itrace_synth_opts__set_default(synth_opts); + itrace_synth_opts__set_default(synth_opts, false); return 0; } diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h index d88f6e9eb461..8e50f96d4b23 100644 --- a/tools/perf/util/auxtrace.h +++ b/tools/perf/util/auxtrace.h @@ -58,6 +58,7 @@ enum itrace_period_type { /** * struct itrace_synth_opts - AUX area tracing synthesis options. * @set: indicates whether or not options have been set + * @default_no_sample: Default to no sampling. * @inject: indicates the event (not just the sample) must be fully synthesized * because 'perf inject' will write it out * @instructions: whether to synthesize 'instructions' events @@ -82,6 +83,7 @@ enum itrace_period_type { */ struct itrace_synth_opts { bool set; + bool default_no_sample; bool inject; bool instructions; bool branches; @@ -528,7 +530,8 @@ int perf_event__process_auxtrace_error(struct perf_session *session, union perf_event *event); int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset); -void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts); +void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts, + bool no_sample); size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp); void perf_session__auxtrace_error_inc(struct perf_session *session, diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 2ae640257fdb..73430b73570d 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -244,6 +244,27 @@ static void cs_etm__free(struct perf_session *session) zfree(&aux); } +static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address) +{ + struct machine *machine; + + machine = etmq->etm->machine; + + if (address >= etmq->etm->kernel_start) { + if (machine__is_host(machine)) + return PERF_RECORD_MISC_KERNEL; + else + return PERF_RECORD_MISC_GUEST_KERNEL; + } else { + if (machine__is_host(machine)) + return PERF_RECORD_MISC_USER; + else if (perf_guest) + return PERF_RECORD_MISC_GUEST_USER; + else + return PERF_RECORD_MISC_HYPERVISOR; + } +} + static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address, size_t size, u8 *buffer) { @@ -258,10 +279,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address, return -1; machine = etmq->etm->machine; - if (address >= etmq->etm->kernel_start) - cpumode = PERF_RECORD_MISC_KERNEL; - else - cpumode = PERF_RECORD_MISC_USER; + cpumode = cs_etm__cpu_mode(etmq, address); thread = etmq->thread; if (!thread) { @@ -653,7 +671,7 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq, struct perf_sample sample = {.ip = 0,}; event->sample.header.type = PERF_RECORD_SAMPLE; - event->sample.header.misc = PERF_RECORD_MISC_USER; + event->sample.header.misc = cs_etm__cpu_mode(etmq, addr); event->sample.header.size = sizeof(struct perf_event_header); sample.ip = addr; @@ -665,7 +683,7 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq, sample.cpu = etmq->packet->cpu; sample.flags = 0; sample.insn_len = 1; - sample.cpumode = event->header.misc; + sample.cpumode = event->sample.header.misc; if (etm->synth_opts.last_branch) { cs_etm__copy_last_branch_rb(etmq); @@ -706,12 +724,15 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq) u64 nr; struct branch_entry entries; } dummy_bs; + u64 ip; + + ip = cs_etm__last_executed_instr(etmq->prev_packet); event->sample.header.type = PERF_RECORD_SAMPLE; - event->sample.header.misc = PERF_RECORD_MISC_USER; + event->sample.header.misc = cs_etm__cpu_mode(etmq, ip); event->sample.header.size = sizeof(struct perf_event_header); - sample.ip = cs_etm__last_executed_instr(etmq->prev_packet); + sample.ip = ip; sample.pid = etmq->pid; sample.tid = etmq->tid; sample.addr = cs_etm__first_executed_instr(etmq->packet); @@ -720,7 +741,7 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq) sample.period = 1; sample.cpu = etmq->packet->cpu; sample.flags = 0; - sample.cpumode = PERF_RECORD_MISC_USER; + sample.cpumode = event->sample.header.misc; /* * perf report cannot handle events without a branch stack @@ -1432,7 +1453,8 @@ int cs_etm__process_auxtrace_info(union perf_event *event, if (session->itrace_synth_opts && session->itrace_synth_opts->set) { etm->synth_opts = *session->itrace_synth_opts; } else { - itrace_synth_opts__set_default(&etm->synth_opts); + itrace_synth_opts__set_default(&etm->synth_opts, + session->itrace_synth_opts->default_no_sample); etm->synth_opts.callchain = false; } diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h index 1f3ccc368530..d01b8355f4ca 100644 --- a/tools/perf/util/env.h +++ b/tools/perf/util/env.h @@ -63,6 +63,7 @@ struct perf_env { struct numa_node *numa_nodes; struct memory_node *memory_nodes; unsigned long long memory_bsize; + u64 clockid_res_ns; }; extern struct perf_env perf_env; diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index bc646185f8d9..e9c108a6b1c3 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -308,6 +308,7 @@ static int perf_event__synthesize_fork(struct perf_tool *tool, event->fork.pid = tgid; event->fork.tid = pid; event->fork.header.type = PERF_RECORD_FORK; + event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC; event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index be440df29615..e88e6f9b1463 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -358,7 +358,7 @@ void perf_evlist__disable(struct perf_evlist *evlist) struct perf_evsel *pos; evlist__for_each_entry(evlist, pos) { - if (!perf_evsel__is_group_leader(pos) || !pos->fd) + if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->fd) continue; perf_evsel__disable(pos); } diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 29d7b97f66fb..6d187059a373 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -232,6 +232,7 @@ void perf_evsel__init(struct perf_evsel *evsel, evsel->leader = evsel; evsel->unit = ""; evsel->scale = 1.0; + evsel->max_events = ULONG_MAX; evsel->evlist = NULL; evsel->bpf_fd = -1; INIT_LIST_HEAD(&evsel->node); @@ -793,6 +794,9 @@ static void apply_config_terms(struct perf_evsel *evsel, case PERF_EVSEL__CONFIG_TERM_MAX_STACK: max_stack = term->val.max_stack; break; + case PERF_EVSEL__CONFIG_TERM_MAX_EVENTS: + evsel->max_events = term->val.max_events; + break; case PERF_EVSEL__CONFIG_TERM_INHERIT: /* * attr->inherit should has already been set by @@ -1203,16 +1207,27 @@ int perf_evsel__append_addr_filter(struct perf_evsel *evsel, const char *filter) int perf_evsel__enable(struct perf_evsel *evsel) { - return perf_evsel__run_ioctl(evsel, - PERF_EVENT_IOC_ENABLE, - 0); + int err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, 0); + + if (!err) + evsel->disabled = false; + + return err; } int perf_evsel__disable(struct perf_evsel *evsel) { - return perf_evsel__run_ioctl(evsel, - PERF_EVENT_IOC_DISABLE, - 0); + int err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, 0); + /* + * We mark it disabled here so that tools that disable a event can + * ignore events after they disable it. I.e. the ring buffer may have + * already a few more events queued up before the kernel got the stop + * request. + */ + if (!err) + evsel->disabled = true; + + return err; } int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 4107c39f4a54..3147ca76c6fc 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -46,6 +46,7 @@ enum term_type { PERF_EVSEL__CONFIG_TERM_STACK_USER, PERF_EVSEL__CONFIG_TERM_INHERIT, PERF_EVSEL__CONFIG_TERM_MAX_STACK, + PERF_EVSEL__CONFIG_TERM_MAX_EVENTS, PERF_EVSEL__CONFIG_TERM_OVERWRITE, PERF_EVSEL__CONFIG_TERM_DRV_CFG, PERF_EVSEL__CONFIG_TERM_BRANCH, @@ -65,6 +66,7 @@ struct perf_evsel_config_term { bool inherit; bool overwrite; char *branch; + unsigned long max_events; } val; bool weak; }; @@ -99,6 +101,8 @@ struct perf_evsel { struct perf_counts *prev_raw_counts; int idx; u32 ids; + unsigned long max_events; + unsigned long nr_events_printed; char *name; double scale; const char *unit; @@ -119,6 +123,7 @@ struct perf_evsel { bool snapshot; bool supported; bool needs_swap; + bool disabled; bool no_aux_samples; bool immediate; bool system_wide; diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h index de322d51c7fe..b72440bf9a79 100644 --- a/tools/perf/util/genelf.h +++ b/tools/perf/util/genelf.h @@ -29,6 +29,12 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent #elif defined(__powerpc__) #define GEN_ELF_ARCH EM_PPC #define GEN_ELF_CLASS ELFCLASS32 +#elif defined(__sparc__) && defined(__arch64__) +#define GEN_ELF_ARCH EM_SPARCV9 +#define GEN_ELF_CLASS ELFCLASS64 +#elif defined(__sparc__) +#define GEN_ELF_ARCH EM_SPARC +#define GEN_ELF_CLASS ELFCLASS32 #else #error "unsupported architecture" #endif diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 1ec1d9bc2d63..4fd45be95a43 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1034,6 +1034,13 @@ static int write_auxtrace(struct feat_fd *ff, return err; } +static int write_clockid(struct feat_fd *ff, + struct perf_evlist *evlist __maybe_unused) +{ + return do_write(ff, &ff->ph->env.clockid_res_ns, + sizeof(ff->ph->env.clockid_res_ns)); +} + static int cpu_cache_level__sort(const void *a, const void *b) { struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; @@ -1508,6 +1515,12 @@ static void print_cpu_topology(struct feat_fd *ff, FILE *fp) fprintf(fp, "# Core ID and Socket ID information is not available\n"); } +static void print_clockid(struct feat_fd *ff, FILE *fp) +{ + fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n", + ff->ph->env.clockid_res_ns * 1000); +} + static void free_event_desc(struct perf_evsel *events) { struct perf_evsel *evsel; @@ -2531,6 +2544,15 @@ out: return ret; } +static int process_clockid(struct feat_fd *ff, + void *data __maybe_unused) +{ + if (do_read_u64(ff, &ff->ph->env.clockid_res_ns)) + return -1; + + return 0; +} + struct feature_ops { int (*write)(struct feat_fd *ff, struct perf_evlist *evlist); void (*print)(struct feat_fd *ff, FILE *fp); @@ -2590,6 +2612,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { FEAT_OPN(CACHE, cache, true), FEAT_OPR(SAMPLE_TIME, sample_time, false), FEAT_OPR(MEM_TOPOLOGY, mem_topology, true), + FEAT_OPR(CLOCKID, clockid, false) }; struct header_print_data { diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index e17903caa71d..0d553ddca0a3 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -38,6 +38,7 @@ enum { HEADER_CACHE, HEADER_SAMPLE_TIME, HEADER_MEM_TOPOLOGY, + HEADER_CLOCKID, HEADER_LAST_FEATURE, HEADER_FEAT_BITS = 256, }; diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c index 7f0c83b6332b..7b27d77306c2 100644 --- a/tools/perf/util/intel-bts.c +++ b/tools/perf/util/intel-bts.c @@ -269,6 +269,13 @@ static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue, return 0; } +static inline u8 intel_bts_cpumode(struct intel_bts *bts, uint64_t ip) +{ + return machine__kernel_ip(bts->machine, ip) ? + PERF_RECORD_MISC_KERNEL : + PERF_RECORD_MISC_USER; +} + static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq, struct branch *branch) { @@ -281,12 +288,8 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq, bts->num_events++ <= bts->synth_opts.initial_skip) return 0; - event.sample.header.type = PERF_RECORD_SAMPLE; - event.sample.header.misc = PERF_RECORD_MISC_USER; - event.sample.header.size = sizeof(struct perf_event_header); - - sample.cpumode = PERF_RECORD_MISC_USER; sample.ip = le64_to_cpu(branch->from); + sample.cpumode = intel_bts_cpumode(bts, sample.ip); sample.pid = btsq->pid; sample.tid = btsq->tid; sample.addr = le64_to_cpu(branch->to); @@ -298,6 +301,10 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq, sample.insn_len = btsq->intel_pt_insn.length; memcpy(sample.insn, btsq->intel_pt_insn.buf, INTEL_PT_INSN_BUF_SZ); + event.sample.header.type = PERF_RECORD_SAMPLE; + event.sample.header.misc = sample.cpumode; + event.sample.header.size = sizeof(struct perf_event_header); + if (bts->synth_opts.inject) { event.sample.header.size = bts->branches_event_size; ret = perf_event__synthesize_sample(&event, @@ -910,7 +917,8 @@ int intel_bts_process_auxtrace_info(union perf_event *event, if (session->itrace_synth_opts && session->itrace_synth_opts->set) { bts->synth_opts = *session->itrace_synth_opts; } else { - itrace_synth_opts__set_default(&bts->synth_opts); + itrace_synth_opts__set_default(&bts->synth_opts, + session->itrace_synth_opts->default_no_sample); if (session->itrace_synth_opts) bts->synth_opts.thread_stack = session->itrace_synth_opts->thread_stack; diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 48c1d415c6b0..86cc9a64e982 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -407,6 +407,13 @@ intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset) return auxtrace_cache__lookup(dso->auxtrace_cache, offset); } +static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip) +{ + return ip >= pt->kernel_start ? + PERF_RECORD_MISC_KERNEL : + PERF_RECORD_MISC_USER; +} + static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, uint64_t *insn_cnt_ptr, uint64_t *ip, uint64_t to_ip, uint64_t max_insn_cnt, @@ -429,10 +436,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, if (to_ip && *ip == to_ip) goto out_no_cache; - if (*ip >= ptq->pt->kernel_start) - cpumode = PERF_RECORD_MISC_KERNEL; - else - cpumode = PERF_RECORD_MISC_USER; + cpumode = intel_pt_cpumode(ptq->pt, *ip); thread = ptq->thread; if (!thread) { @@ -759,7 +763,8 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, if (pt->synth_opts.callchain) { size_t sz = sizeof(struct ip_callchain); - sz += pt->synth_opts.callchain_sz * sizeof(u64); + /* Add 1 to callchain_sz for callchain context */ + sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64); ptq->chain = zalloc(sz); if (!ptq->chain) goto out_free; @@ -1058,15 +1063,11 @@ static void intel_pt_prep_b_sample(struct intel_pt *pt, union perf_event *event, struct perf_sample *sample) { - event->sample.header.type = PERF_RECORD_SAMPLE; - event->sample.header.misc = PERF_RECORD_MISC_USER; - event->sample.header.size = sizeof(struct perf_event_header); - if (!pt->timeless_decoding) sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc); - sample->cpumode = PERF_RECORD_MISC_USER; sample->ip = ptq->state->from_ip; + sample->cpumode = intel_pt_cpumode(pt, sample->ip); sample->pid = ptq->pid; sample->tid = ptq->tid; sample->addr = ptq->state->to_ip; @@ -1075,6 +1076,10 @@ static void intel_pt_prep_b_sample(struct intel_pt *pt, sample->flags = ptq->flags; sample->insn_len = ptq->insn_len; memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ); + + event->sample.header.type = PERF_RECORD_SAMPLE; + event->sample.header.misc = sample->cpumode; + event->sample.header.size = sizeof(struct perf_event_header); } static int intel_pt_inject_event(union perf_event *event, @@ -1160,7 +1165,8 @@ static void intel_pt_prep_sample(struct intel_pt *pt, if (pt->synth_opts.callchain) { thread_stack__sample(ptq->thread, ptq->chain, - pt->synth_opts.callchain_sz, sample->ip); + pt->synth_opts.callchain_sz + 1, + sample->ip, pt->kernel_start); sample->callchain = ptq->chain; } @@ -2559,7 +2565,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event, if (session->itrace_synth_opts && session->itrace_synth_opts->set) { pt->synth_opts = *session->itrace_synth_opts; } else { - itrace_synth_opts__set_default(&pt->synth_opts); + itrace_synth_opts__set_default(&pt->synth_opts, + session->itrace_synth_opts->default_no_sample); if (use_browser != -1) { pt->synth_opts.branches = false; pt->synth_opts.callchain = true; diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 111ae858cbcb..8f36ce813bc5 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1708,6 +1708,7 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event struct thread *parent = machine__findnew_thread(machine, event->fork.ppid, event->fork.ptid); + bool do_maps_clone = true; int err = 0; if (dump_trace) @@ -1736,9 +1737,25 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid); + /* + * When synthesizing FORK events, we are trying to create thread + * objects for the already running tasks on the machine. + * + * Normally, for a kernel FORK event, we want to clone the parent's + * maps because that is what the kernel just did. + * + * But when synthesizing, this should not be done. If we do, we end up + * with overlapping maps as we process the sythesized MMAP2 events that + * get delivered shortly thereafter. + * + * Use the FORK event misc flags in an internal way to signal this + * situation, so we can elide the map clone when appropriate. + */ + if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC) + do_maps_clone = false; if (thread == NULL || parent == NULL || - thread__fork(thread, parent, sample->time) < 0) { + thread__fork(thread, parent, sample->time, do_maps_clone) < 0) { dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); err = -1; } @@ -2140,6 +2157,27 @@ static int resolve_lbr_callchain_sample(struct thread *thread, return 0; } +static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread, + struct callchain_cursor *cursor, + struct symbol **parent, + struct addr_location *root_al, + u8 *cpumode, int ent) +{ + int err = 0; + + while (--ent >= 0) { + u64 ip = chain->ips[ent]; + + if (ip >= PERF_CONTEXT_MAX) { + err = add_callchain_ip(thread, cursor, parent, + root_al, cpumode, ip, + false, NULL, NULL, 0); + break; + } + } + return err; +} + static int thread__resolve_callchain_sample(struct thread *thread, struct callchain_cursor *cursor, struct perf_evsel *evsel, @@ -2246,6 +2284,12 @@ static int thread__resolve_callchain_sample(struct thread *thread, } check_calls: + if (callchain_param.order != ORDER_CALLEE) { + err = find_prev_cpumode(chain, thread, cursor, parent, root_al, + &cpumode, chain->nr - first_call); + if (err) + return (err < 0) ? err : 0; + } for (i = first_call, nr_entries = 0; i < chain_nr && nr_entries < max_stack; i++) { u64 ip; @@ -2260,9 +2304,15 @@ check_calls: continue; #endif ip = chain->ips[j]; - if (ip < PERF_CONTEXT_MAX) ++nr_entries; + else if (callchain_param.order != ORDER_CALLEE) { + err = find_prev_cpumode(chain, thread, cursor, parent, + root_al, &cpumode, j); + if (err) + return (err < 0) ? err : 0; + continue; + } err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip, diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index f8cd3e7c9186..59be3466d64d 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -926,6 +926,7 @@ static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack", + [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr", [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite", [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite", [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config", @@ -1037,6 +1038,9 @@ do { \ case PARSE_EVENTS__TERM_TYPE_MAX_STACK: CHECK_TYPE_VAL(NUM); break; + case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: + CHECK_TYPE_VAL(NUM); + break; default: err->str = strdup("unknown term"); err->idx = term->err_term; @@ -1084,6 +1088,7 @@ static int config_term_tracepoint(struct perf_event_attr *attr, case PARSE_EVENTS__TERM_TYPE_INHERIT: case PARSE_EVENTS__TERM_TYPE_NOINHERIT: case PARSE_EVENTS__TERM_TYPE_MAX_STACK: + case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: case PARSE_EVENTS__TERM_TYPE_OVERWRITE: case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE: return config_term_common(attr, term, err); @@ -1162,6 +1167,9 @@ do { \ case PARSE_EVENTS__TERM_TYPE_MAX_STACK: ADD_CONFIG_TERM(MAX_STACK, max_stack, term->val.num); break; + case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS: + ADD_CONFIG_TERM(MAX_EVENTS, max_events, term->val.num); + break; case PARSE_EVENTS__TERM_TYPE_OVERWRITE: ADD_CONFIG_TERM(OVERWRITE, overwrite, term->val.num ? 1 : 0); break; diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 4473dac27aee..5ed035cbcbb7 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -71,6 +71,7 @@ enum { PARSE_EVENTS__TERM_TYPE_NOINHERIT, PARSE_EVENTS__TERM_TYPE_INHERIT, PARSE_EVENTS__TERM_TYPE_MAX_STACK, + PARSE_EVENTS__TERM_TYPE_MAX_EVENTS, PARSE_EVENTS__TERM_TYPE_NOOVERWRITE, PARSE_EVENTS__TERM_TYPE_OVERWRITE, PARSE_EVENTS__TERM_TYPE_DRV_CFG, diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index 5f761f3ed0f3..7805c71aaae2 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l @@ -269,6 +269,7 @@ time { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_TIME); } call-graph { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CALLGRAPH); } stack-size { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_STACKSIZE); } max-stack { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_MAX_STACK); } +nr { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_MAX_EVENTS); } inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_INHERIT); } no-inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); } overwrite { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_OVERWRITE); } diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 0281d5e2cd67..66a84d5846c8 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -324,7 +324,17 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss) plt_entry_size = 16; break; - default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/sparc/xtensa need to be checked */ + case EM_SPARC: + plt_header_size = 48; + plt_entry_size = 12; + break; + + case EM_SPARCV9: + plt_header_size = 128; + plt_entry_size = 32; + break; + + default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */ plt_header_size = shdr_plt.sh_entsize; plt_entry_size = shdr_plt.sh_entsize; break; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 20f49779116b..d026d215bdc6 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -123,7 +123,8 @@ struct symbol_conf { const char *vmlinux_name, *kallsyms_name, *source_prefix, - *field_sep; + *field_sep, + *graph_function; const char *default_guest_vmlinux_name, *default_guest_kallsyms, *default_guest_modules; diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c index c091635bf7dc..61a4286a74dc 100644 --- a/tools/perf/util/thread-stack.c +++ b/tools/perf/util/thread-stack.c @@ -310,20 +310,46 @@ void thread_stack__free(struct thread *thread) } } +static inline u64 callchain_context(u64 ip, u64 kernel_start) +{ + return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL; +} + void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, - size_t sz, u64 ip) + size_t sz, u64 ip, u64 kernel_start) { - size_t i; + u64 context = callchain_context(ip, kernel_start); + u64 last_context; + size_t i, j; - if (!thread || !thread->ts) - chain->nr = 1; - else - chain->nr = min(sz, thread->ts->cnt + 1); + if (sz < 2) { + chain->nr = 0; + return; + } - chain->ips[0] = ip; + chain->ips[0] = context; + chain->ips[1] = ip; + + if (!thread || !thread->ts) { + chain->nr = 2; + return; + } + + last_context = context; + + for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) { + ip = thread->ts->stack[thread->ts->cnt - j].ret_addr; + context = callchain_context(ip, kernel_start); + if (context != last_context) { + if (i >= sz - 1) + break; + chain->ips[i++] = context; + last_context = context; + } + chain->ips[i] = ip; + } - for (i = 1; i < chain->nr; i++) - chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr; + chain->nr = i; } struct call_return_processor * diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h index b7e41c4ebfdd..f97c00a8c251 100644 --- a/tools/perf/util/thread-stack.h +++ b/tools/perf/util/thread-stack.h @@ -84,7 +84,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, u64 to_ip, u16 insn_len, u64 trace_nr); void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr); void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, - size_t sz, u64 ip); + size_t sz, u64 ip, u64 kernel_start); int thread_stack__flush(struct thread *thread); void thread_stack__free(struct thread *thread); size_t thread_stack__depth(struct thread *thread); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 2048d393ece6..3d9ed7d0e281 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -330,7 +330,8 @@ static int thread__prepare_access(struct thread *thread) } static int thread__clone_map_groups(struct thread *thread, - struct thread *parent) + struct thread *parent, + bool do_maps_clone) { /* This is new thread, we share map groups for process. */ if (thread->pid_ == parent->pid_) @@ -341,15 +342,11 @@ static int thread__clone_map_groups(struct thread *thread, thread->pid_, thread->tid, parent->pid_, parent->tid); return 0; } - /* But this one is new process, copy maps. */ - if (map_groups__clone(thread, parent->mg) < 0) - return -ENOMEM; - - return 0; + return do_maps_clone ? map_groups__clone(thread, parent->mg) : 0; } -int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp) +int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone) { if (parent->comm_set) { const char *comm = thread__comm_str(parent); @@ -362,7 +359,7 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp) } thread->ppid = parent->tid; - return thread__clone_map_groups(thread, parent); + return thread__clone_map_groups(thread, parent, do_maps_clone); } void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 07606aa6998d..30e2b4c165fe 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -42,6 +42,8 @@ struct thread { void *addr_space; struct unwind_libunwind_ops *unwind_libunwind_ops; #endif + bool filter; + int filter_entry_depth; }; struct machine; @@ -87,7 +89,7 @@ struct comm *thread__comm(const struct thread *thread); struct comm *thread__exec_comm(const struct thread *thread); const char *thread__comm_str(const struct thread *thread); int thread__insert_map(struct thread *thread, struct map *map); -int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp); +int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone); size_t thread__fprintf(struct thread *thread, FILE *fp); struct thread *thread__main_thread(struct machine *machine, struct thread *thread); diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index 6f318b15950e..5eff9bfc5758 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -45,13 +45,13 @@ static int __report_module(struct addr_location *al, u64 ip, Dwarf_Addr s; dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL); - if (s != al->map->start) + if (s != al->map->start - al->map->pgoff) mod = 0; } if (!mod) mod = dwfl_report_elf(ui->dwfl, dso->short_name, - (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start, + (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start - al->map->pgoff, false); return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1; diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c index d3273b5b3173..ae8180b11d5f 100644 --- a/tools/testing/selftests/bpf/flow_dissector_load.c +++ b/tools/testing/selftests/bpf/flow_dissector_load.c @@ -11,6 +11,8 @@ #include <bpf/bpf.h> #include <bpf/libbpf.h> +#include "bpf_rlimit.h" + const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector"; const char *cfg_map_name = "jmp_table"; bool cfg_attach = true; diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh index 42544a969abc..a9bc6f82abc1 100755 --- a/tools/testing/selftests/bpf/test_skb_cgroup_id.sh +++ b/tools/testing/selftests/bpf/test_skb_cgroup_id.sh @@ -10,7 +10,7 @@ wait_for_ip() echo -n "Wait for testing link-local IP to become available " for _i in $(seq ${MAX_PING_TRIES}); do echo -n "." - if ping -6 -q -c 1 -W 1 ff02::1%${TEST_IF} >/dev/null 2>&1; then + if $PING6 -c 1 -W 1 ff02::1%${TEST_IF} >/dev/null 2>&1; then echo " OK" return fi @@ -58,5 +58,6 @@ BPF_PROG_OBJ="${DIR}/test_skb_cgroup_id_kern.o" BPF_PROG_SECTION="cgroup_id_logger" BPF_PROG_ID=0 PROG="${DIR}/test_skb_cgroup_id_user" +type ping6 >/dev/null 2>&1 && PING6="ping6" || PING6="ping -6" main diff --git a/tools/testing/selftests/bpf/test_sock_addr.sh b/tools/testing/selftests/bpf/test_sock_addr.sh index 9832a875a828..3b9fdb8094aa 100755 --- a/tools/testing/selftests/bpf/test_sock_addr.sh +++ b/tools/testing/selftests/bpf/test_sock_addr.sh @@ -4,7 +4,8 @@ set -eu ping_once() { - ping -${1} -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1 + type ping${1} >/dev/null 2>&1 && PING="ping${1}" || PING="ping -${1}" + $PING -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1 } wait_for_ip() diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 36f3d3009d1a..6f61df62f690 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -76,7 +76,7 @@ struct bpf_test { int fixup_percpu_cgroup_storage[MAX_FIXUPS]; const char *errstr; const char *errstr_unpriv; - uint32_t retval; + uint32_t retval, retval_unpriv; enum { UNDEF, ACCEPT, @@ -3084,6 +3084,8 @@ static struct bpf_test tests[] = { .fixup_prog1 = { 2 }, .result = ACCEPT, .retval = 42, + /* Verifier rewrite for unpriv skips tail call here. */ + .retval_unpriv = 2, }, { "stack pointer arithmetic", @@ -6455,6 +6457,256 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, { + "map access: known scalar += value_ptr", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 1, + }, + { + "map access: value_ptr += known scalar", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 1, + }, + { + "map access: unknown scalar += value_ptr", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 1, + }, + { + "map access: value_ptr += unknown scalar", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 1, + }, + { + "map access: value_ptr += value_ptr", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R0 pointer += pointer prohibited", + }, + { + "map access: known scalar -= value_ptr", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R1 tried to subtract pointer from scalar", + }, + { + "map access: value_ptr -= known scalar", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R0 min value is outside of the array range", + }, + { + "map access: value_ptr -= known scalar, 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_IMM(BPF_REG_1, 6), + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 1, + }, + { + "map access: unknown scalar -= value_ptr", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R1 tried to subtract pointer from scalar", + }, + { + "map access: value_ptr -= unknown scalar", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R0 min value is negative", + }, + { + "map access: value_ptr -= unknown scalar, 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), + BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 1, + }, + { + "map access: value_ptr -= value_ptr", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 pointer -= pointer prohibited", + }, + { "map lookup helper access to map", .insns = { BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -13899,6 +14151,33 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type, } } +static int set_admin(bool admin) +{ + cap_t caps; + const cap_value_t cap_val = CAP_SYS_ADMIN; + int ret = -1; + + caps = cap_get_proc(); + if (!caps) { + perror("cap_get_proc"); + return -1; + } + if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val, + admin ? CAP_SET : CAP_CLEAR)) { + perror("cap_set_flag"); + goto out; + } + if (cap_set_proc(caps)) { + perror("cap_set_proc"); + goto out; + } + ret = 0; +out: + if (cap_free(caps)) + perror("cap_free"); + return ret; +} + static void do_test_single(struct bpf_test *test, bool unpriv, int *passes, int *errors) { @@ -13907,6 +14186,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv, struct bpf_insn *prog = test->insns; int map_fds[MAX_NR_MAPS]; const char *expected_err; + uint32_t expected_val; uint32_t retval; int i, err; @@ -13926,6 +14206,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv, test->result_unpriv : test->result; expected_err = unpriv && test->errstr_unpriv ? test->errstr_unpriv : test->errstr; + expected_val = unpriv && test->retval_unpriv ? + test->retval_unpriv : test->retval; reject_from_alignment = fd_prog < 0 && (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) && @@ -13959,16 +14241,20 @@ static void do_test_single(struct bpf_test *test, bool unpriv, __u8 tmp[TEST_DATA_LEN << 2]; __u32 size_tmp = sizeof(tmp); + if (unpriv) + set_admin(true); err = bpf_prog_test_run(fd_prog, 1, test->data, sizeof(test->data), tmp, &size_tmp, &retval, NULL); + if (unpriv) + set_admin(false); if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) { printf("Unexpected bpf_prog_test_run error\n"); goto fail_log; } - if (!err && retval != test->retval && - test->retval != POINTER_VALUE) { - printf("FAIL retval %d != %d\n", retval, test->retval); + if (!err && retval != expected_val && + expected_val != POINTER_VALUE) { + printf("FAIL retval %d != %d\n", retval, expected_val); goto fail_log; } } @@ -14011,33 +14297,6 @@ static bool is_admin(void) return (sysadmin == CAP_SET); } -static int set_admin(bool admin) -{ - cap_t caps; - const cap_value_t cap_val = CAP_SYS_ADMIN; - int ret = -1; - - caps = cap_get_proc(); - if (!caps) { - perror("cap_get_proc"); - return -1; - } - if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val, - admin ? CAP_SET : CAP_CLEAR)) { - perror("cap_set_flag"); - goto out; - } - if (cap_set_proc(caps)) { - perror("cap_set_proc"); - goto out; - } - ret = 0; -out: - if (cap_free(caps)) - perror("cap_free"); - return ret; -} - static void get_unpriv_disabled() { char buf[2]; diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh index 0150bb2741eb..117f6f35d72f 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh @@ -25,24 +25,24 @@ # Thus we set MTU to 10K on all involved interfaces. Then both unicast and # multicast traffic uses 8K frames. # -# +-----------------------+ +----------------------------------+ -# | H1 | | H2 | -# | | | unicast --> + $h2.111 | -# | | | traffic | 192.0.2.129/28 | -# | multicast | | | e-qos-map 0:1 | -# | traffic | | | | -# | $h1 + <----- | | + $h2 | -# +-----|-----------------+ +--------------|-------------------+ -# | | -# +-----|-------------------------------------------------|-------------------+ -# | + $swp1 + $swp2 | -# | | >1Gbps | >1Gbps | -# | +---|----------------+ +----------|----------------+ | -# | | + $swp1.1 | | + $swp2.111 | | +# +---------------------------+ +----------------------------------+ +# | H1 | | H2 | +# | | | unicast --> + $h2.111 | +# | multicast | | traffic | 192.0.2.129/28 | +# | traffic | | | e-qos-map 0:1 | +# | $h1 + <----- | | | | +# | 192.0.2.65/28 | | | + $h2 | +# +---------------|-----------+ +--------------|-------------------+ +# | | +# +---------------|---------------------------------------|-------------------+ +# | $swp1 + + $swp2 | +# | >1Gbps | | >1Gbps | +# | +-------------|------+ +----------|----------------+ | +# | | $swp1.1 + | | + $swp2.111 | | # | | BR1 | SW | BR111 | | -# | | + $swp3.1 | | + $swp3.111 | | -# | +---|----------------+ +----------|----------------+ | -# | \_________________________________________________/ | +# | | $swp3.1 + | | + $swp3.111 | | +# | +-------------|------+ +----------|----------------+ | +# | \_______________________________________/ | # | | | # | + $swp3 | # | | 1Gbps bottleneck | @@ -51,6 +51,7 @@ # | # +--|-----------------+ # | + $h3 H3 | +# | | 192.0.2.66/28 | # | | | # | + $h3.111 | # | 192.0.2.130/28 | @@ -59,6 +60,7 @@ ALL_TESTS=" ping_ipv4 test_mc_aware + test_uc_aware " lib_dir=$(dirname $0)/../../../net/forwarding @@ -68,14 +70,14 @@ source $lib_dir/lib.sh h1_create() { - simple_if_init $h1 + simple_if_init $h1 192.0.2.65/28 mtu_set $h1 10000 } h1_destroy() { mtu_restore $h1 - simple_if_fini $h1 + simple_if_fini $h1 192.0.2.65/28 } h2_create() @@ -97,7 +99,7 @@ h2_destroy() h3_create() { - simple_if_init $h3 + simple_if_init $h3 192.0.2.66/28 mtu_set $h3 10000 vlan_create $h3 111 v$h3 192.0.2.130/28 @@ -108,7 +110,7 @@ h3_destroy() vlan_destroy $h3 111 mtu_restore $h3 - simple_if_fini $h3 + simple_if_fini $h3 192.0.2.66/28 } switch_create() @@ -251,7 +253,7 @@ measure_uc_rate() # average ingress rate to somewhat mitigate this. local min_ingress=2147483648 - mausezahn $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \ + $MZ $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \ -a own -b $h3mac -t udp -q & sleep 1 @@ -291,7 +293,7 @@ test_mc_aware() check_err $? "Could not get high enough UC-only ingress rate" local ucth1=${uc_rate[1]} - mausezahn $h1 -p 8000 -c 0 -a own -b bc -t udp -q & + $MZ $h1 -p 8000 -c 0 -a own -b bc -t udp -q & local d0=$(date +%s) local t0=$(ethtool_stats_get $h3 rx_octets_prio_0) @@ -311,7 +313,7 @@ test_mc_aware() ret = 100 * ($ucth1 - $ucth2) / $ucth1 if (ret > 0) { ret } else { 0 } ") - check_err $(bc <<< "$deg > 10") + check_err $(bc <<< "$deg > 25") local interval=$((d1 - d0)) local mc_ir=$(rate $u0 $u1 $interval) @@ -335,6 +337,51 @@ test_mc_aware() echo " egress UC throughput $(humanize ${uc_rate_2[1]})" echo " ingress MC throughput $(humanize $mc_ir)" echo " egress MC throughput $(humanize $mc_er)" + echo +} + +test_uc_aware() +{ + RET=0 + + $MZ $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \ + -a own -b $h3mac -t udp -q & + + local d0=$(date +%s) + local t0=$(ethtool_stats_get $h3 rx_octets_prio_1) + local u0=$(ethtool_stats_get $swp2 rx_octets_prio_1) + sleep 1 + + local attempts=50 + local passes=0 + local i + + for ((i = 0; i < attempts; ++i)); do + if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 0.1; then + ((passes++)) + fi + + sleep 0.1 + done + + local d1=$(date +%s) + local t1=$(ethtool_stats_get $h3 rx_octets_prio_1) + local u1=$(ethtool_stats_get $swp2 rx_octets_prio_1) + + local interval=$((d1 - d0)) + local uc_ir=$(rate $u0 $u1 $interval) + local uc_er=$(rate $t0 $t1 $interval) + + ((attempts == passes)) + check_err $? + + # Suppress noise from killing mausezahn. + { kill %% && wait; } 2>/dev/null + + log_test "MC performace under UC overload" + echo " ingress UC throughput $(humanize ${uc_ir})" + echo " egress UC throughput $(humanize ${uc_er})" + echo " sent $attempts BC ARPs, got $passes responses" } trap cleanup EXIT diff --git a/tools/testing/selftests/powerpc/cache_shape/Makefile b/tools/testing/selftests/powerpc/cache_shape/Makefile index ede4d3dae750..689f6c8ebcd8 100644 --- a/tools/testing/selftests/powerpc/cache_shape/Makefile +++ b/tools/testing/selftests/powerpc/cache_shape/Makefile @@ -1,12 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 -TEST_PROGS := cache_shape - -all: $(TEST_PROGS) - -$(TEST_PROGS): ../harness.c ../utils.c +TEST_GEN_PROGS := cache_shape top_srcdir = ../../../../.. include ../../lib.mk -clean: - rm -f $(TEST_PROGS) *.o +$(TEST_GEN_PROGS): ../harness.c ../utils.c diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile index bd5dfa509272..23f4caf48ffc 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile +++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile @@ -5,6 +5,9 @@ noarg: # The EBB handler is 64-bit code and everything links against it CFLAGS += -m64 +# Toolchains may build PIE by default which breaks the assembly +LDFLAGS += -no-pie + TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \ cycles_with_freeze_test pmc56_overflow_test \ ebb_vs_cpu_event_test cpu_event_vs_ebb_test \ diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index 9b35ca8e8f13..8d3f006c98cc 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ +TEST_GEN_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \ ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \ perf-hwbreak ptrace-syscall @@ -7,14 +7,9 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ top_srcdir = ../../../../.. include ../../lib.mk -all: $(TEST_PROGS) - CFLAGS += -m64 -I../../../../../usr/include -I../tm -mhtm -fno-pie -ptrace-pkey core-pkey: child.h -ptrace-pkey core-pkey: LDLIBS += -pthread - -$(TEST_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h +$(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: child.h +$(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: LDLIBS += -pthread -clean: - rm -f $(TEST_PROGS) *.o +$(TEST_GEN_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c index 327fa943c7f3..dbdffa2e2c82 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c @@ -67,8 +67,8 @@ trans: "3: ;" : [res] "=r" (result), [texasr] "=r" (texasr) : [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), [gpr_4]"i"(GPR_4), - [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a), - [flt_2] "r" (&b), [flt_4] "r" (&d) + [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "b" (&a), + [flt_4] "b" (&d) : "memory", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", diff --git a/tools/testing/selftests/powerpc/security/Makefile b/tools/testing/selftests/powerpc/security/Makefile index 44690f1bb26a..85861c46b445 100644 --- a/tools/testing/selftests/powerpc/security/Makefile +++ b/tools/testing/selftests/powerpc/security/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0+ TEST_GEN_PROGS := rfi_flush +top_srcdir = ../../../../.. CFLAGS += -I../../../../../usr/include diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c index 564ed45bbf73..0a7d0afb26b8 100644 --- a/tools/testing/selftests/powerpc/security/rfi_flush.c +++ b/tools/testing/selftests/powerpc/security/rfi_flush.c @@ -49,6 +49,7 @@ int rfi_flush_test(void) struct perf_event_read v; __u64 l1d_misses_total = 0; unsigned long iterations = 100000, zero_size = 24 * 1024; + unsigned long l1d_misses_expected; int rfi_flush_org, rfi_flush; SKIP_IF(geteuid() != 0); @@ -71,6 +72,12 @@ int rfi_flush_test(void) iter = repetitions; + /* + * We expect to see l1d miss for each cacheline access when rfi_flush + * is set. Allow a small variation on this. + */ + l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2); + again: FAIL_IF(perf_event_reset(fd)); @@ -78,10 +85,9 @@ again: FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v)); - /* Expect at least zero_size/CACHELINE_SIZE misses per iteration */ - if (v.l1d_misses >= (iterations * zero_size / CACHELINE_SIZE) && rfi_flush) + if (rfi_flush && v.l1d_misses >= l1d_misses_expected) passes++; - else if (v.l1d_misses < iterations && !rfi_flush) + else if (!rfi_flush && v.l1d_misses < (l1d_misses_expected / 2)) passes++; l1d_misses_total += v.l1d_misses; @@ -92,13 +98,15 @@ again: if (passes < repetitions) { printf("FAIL (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d failures]\n", rfi_flush, l1d_misses_total, rfi_flush ? '<' : '>', - rfi_flush ? (repetitions * iterations * zero_size / CACHELINE_SIZE) : iterations, + rfi_flush ? repetitions * l1d_misses_expected : + repetitions * l1d_misses_expected / 2, repetitions - passes, repetitions); rc = 1; } else printf("PASS (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d pass]\n", rfi_flush, l1d_misses_total, rfi_flush ? '>' : '<', - rfi_flush ? (repetitions * iterations * zero_size / CACHELINE_SIZE) : iterations, + rfi_flush ? repetitions * l1d_misses_expected : + repetitions * l1d_misses_expected / 2, passes, repetitions); if (rfi_flush == rfi_flush_org) { diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile index 1fca25c6ace0..209a958dca12 100644 --- a/tools/testing/selftests/powerpc/signal/Makefile +++ b/tools/testing/selftests/powerpc/signal/Makefile @@ -1,15 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 -TEST_PROGS := signal signal_tm - -all: $(TEST_PROGS) - -$(TEST_PROGS): ../harness.c ../utils.c signal.S +TEST_GEN_PROGS := signal signal_tm CFLAGS += -maltivec -signal_tm: CFLAGS += -mhtm +$(OUTPUT)/signal_tm: CFLAGS += -mhtm top_srcdir = ../../../../.. include ../../lib.mk -clean: - rm -f $(TEST_PROGS) *.o +$(TEST_GEN_PROGS): ../harness.c ../utils.c signal.S diff --git a/tools/testing/selftests/powerpc/switch_endian/Makefile b/tools/testing/selftests/powerpc/switch_endian/Makefile index fcd2dcb8972b..bdc081afedb0 100644 --- a/tools/testing/selftests/powerpc/switch_endian/Makefile +++ b/tools/testing/selftests/powerpc/switch_endian/Makefile @@ -8,6 +8,7 @@ EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S top_srcdir = ../../../../.. include ../../lib.mk +$(OUTPUT)/switch_endian_test: ASFLAGS += -I $(OUTPUT) $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S $(OUTPUT)/check-reversed.o: $(OUTPUT)/check.o diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c index 43c342845be0..ed62f4153d3e 100644 --- a/tools/testing/selftests/powerpc/utils.c +++ b/tools/testing/selftests/powerpc/utils.c @@ -25,7 +25,6 @@ #include "utils.h" static char auxv[4096]; -extern unsigned int dscr_insn[]; int read_auxv(char *buf, ssize_t buf_size) { @@ -247,7 +246,8 @@ static void sigill_handler(int signr, siginfo_t *info, void *unused) ucontext_t *ctx = (ucontext_t *)unused; unsigned long *pc = &UCONTEXT_NIA(ctx); - if (*pc == (unsigned long)&dscr_insn) { + /* mtspr 3,RS to check for move to DSCR below */ + if ((*((unsigned int *)*pc) & 0xfc1fffff) == 0x7c0303a6) { if (!warned++) printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n"); *pc += 4; @@ -271,5 +271,5 @@ void set_dscr(unsigned long val) init = 1; } - asm volatile("dscr_insn: mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR)); + asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR)); } |