summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore7
-rw-r--r--Documentation/clk.txt16
-rw-r--r--Documentation/cpu-freq/core.txt12
-rw-r--r--Documentation/cpu-freq/cpu-drivers.txt6
-rw-r--r--Documentation/cpuidle/sysfs.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt20
-rw-r--r--Documentation/devicetree/bindings/clock/imx6sll-clock.txt36
-rw-r--r--Documentation/devicetree/bindings/clock/intc_stratix10.txt20
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt6
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3328-cru.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/silabs,si544.txt25
-rw-r--r--Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.txt60
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi-ccu.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/ti/davinci/da8xx-cfgchip.txt93
-rw-r--r--Documentation/devicetree/bindings/clock/ti/davinci/pll.txt96
-rw-r--r--Documentation/devicetree/bindings/clock/ti/davinci/psc.txt71
-rw-r--r--Documentation/devicetree/bindings/clock/ti/divider.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/ti/mux.txt3
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt5
-rw-r--r--Documentation/devicetree/bindings/iommu/rockchip,iommu.txt7
-rw-r--r--Documentation/devicetree/bindings/pwm/ingenic,jz47xx-pwm.txt25
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt3
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-sun4i.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt11
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt10
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt2
-rw-r--r--Documentation/devicetree/bindings/watchdog/meson-wdt.txt4
-rw-r--r--Documentation/devicetree/bindings/watchdog/mtk-wdt.txt4
-rw-r--r--Documentation/devicetree/bindings/watchdog/nuvoton,npcm-wdt.txt28
-rw-r--r--Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt4
-rw-r--r--Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt4
-rw-r--r--Documentation/filesystems/afs.txt28
-rw-r--r--Documentation/filesystems/gfs2-glocks.txt5
-rw-r--r--Documentation/filesystems/overlayfs.txt39
-rw-r--r--Documentation/process/adding-syscalls.rst4
-rw-r--r--Documentation/thermal/sysfs-api.txt31
-rw-r--r--Documentation/x86/x86_64/mm.txt2
-rw-r--r--MAINTAINERS92
-rw-r--r--Makefile5
-rw-r--r--arch/arc/boot/dts/Makefile2
-rw-r--r--arch/arm/crypto/Makefile2
-rw-r--r--arch/arm/mach-npcm/npcm7xx.c2
-rw-r--r--arch/arm64/crypto/Makefile2
-rw-r--r--arch/arm64/include/asm/assembler.h136
-rw-r--r--arch/arm64/include/asm/cpucaps.h13
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/asm-offsets.c3
-rw-r--r--arch/arm64/kernel/bpi.S102
-rw-r--r--arch/arm64/kernel/cpu_errata.c97
-rw-r--r--arch/arm64/kvm/hyp/entry.S12
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S64
-rw-r--r--arch/arm64/kvm/hyp/switch.c10
-rw-r--r--arch/microblaze/include/asm/pci.h7
-rw-r--r--arch/microblaze/include/asm/pgtable.h2
-rw-r--r--arch/microblaze/pci/pci-common.c99
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/nios2/kernel/time.c4
-rw-r--r--arch/openrisc/include/uapi/asm/unistd.h2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/compat.h6
-rw-r--r--arch/parisc/include/asm/elf.h69
-rw-r--r--arch/parisc/include/uapi/asm/siginfo.h7
-rw-r--r--arch/parisc/kernel/binfmt_elf32.c98
-rw-r--r--arch/parisc/kernel/cache.c2
-rw-r--r--arch/parisc/kernel/pacache.S9
-rw-r--r--arch/parisc/kernel/process.c13
-rw-r--r--arch/parisc/kernel/traps.c7
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/include/asm/cputable.h23
-rw-r--r--arch/powerpc/include/asm/kexec.h2
-rw-r--r--arch/powerpc/include/asm/module.h12
-rw-r--r--arch/powerpc/include/asm/opal.h3
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c14
-rw-r--r--arch/powerpc/kernel/kexec_elf_64.c11
-rw-r--r--arch/powerpc/kernel/machine_kexec_file_64.c39
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/traps.c32
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c4
-rw-r--r--arch/powerpc/mm/slice.c1
-rw-r--r--arch/powerpc/mm/tlb-radix.c5
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c7
-rw-r--r--arch/s390/boot/compressed/misc.c23
-rw-r--r--arch/s390/crypto/aes_s390.c8
-rw-r--r--arch/s390/crypto/paes_s390.c8
-rw-r--r--arch/s390/include/asm/ap.h6
-rw-r--r--arch/s390/include/asm/cio.h10
-rw-r--r--arch/s390/include/asm/ipl.h25
-rw-r--r--arch/s390/include/asm/nospec-branch.h1
-rw-r--r--arch/s390/include/asm/reset.h20
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h163
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/early.c14
-rw-r--r--arch/s390/kernel/ipl.c376
-rw-r--r--arch/s390/kernel/machine_kexec.c2
-rw-r--r--arch/s390/kernel/nospec-branch.c8
-rw-r--r--arch/s390/kernel/reipl.S87
-rw-r--r--arch/s390/kernel/relocate_kernel.S54
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/mm/gup.c2
-rw-r--r--arch/sh/boards/of-generic.c6
-rw-r--r--arch/sh/drivers/pci/pci.c5
-rw-r--r--arch/sh/drivers/pci/pcie-sh7786.c78
-rw-r--r--arch/sh/include/asm/futex.h5
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7786.h7
-rw-r--r--arch/sh/kernel/dma-nommu.c7
-rw-r--r--arch/sh/kernel/entry-common.S2
-rw-r--r--arch/sh/kernel/setup.c8
-rw-r--r--arch/sh/mm/consistent.c4
-rw-r--r--arch/sh/mm/gup.c2
-rw-r--r--arch/sparc/mm/gup.c4
-rw-r--r--arch/sparc/vdso/Makefile4
-rw-r--r--arch/um/Kconfig.net11
-rw-r--r--arch/um/drivers/Makefile4
-rw-r--r--arch/um/drivers/chan_kern.c53
-rw-r--r--arch/um/drivers/line.c2
-rw-r--r--arch/um/drivers/net_kern.c4
-rw-r--r--arch/um/drivers/random.c11
-rw-r--r--arch/um/drivers/ubd_kern.c4
-rw-r--r--arch/um/drivers/vector_kern.c1633
-rw-r--r--arch/um/drivers/vector_kern.h130
-rw-r--r--arch/um/drivers/vector_transports.c458
-rw-r--r--arch/um/drivers/vector_user.c590
-rw-r--r--arch/um/drivers/vector_user.h100
-rw-r--r--arch/um/include/asm/asm-prototypes.h1
-rw-r--r--arch/um/include/asm/irq.h12
-rw-r--r--arch/um/include/shared/irq_user.h12
-rw-r--r--arch/um/include/shared/net_kern.h2
-rw-r--r--arch/um/include/shared/os.h17
-rw-r--r--arch/um/kernel/irq.c461
-rw-r--r--arch/um/kernel/time.c6
-rw-r--r--arch/um/os-Linux/file.c1
-rw-r--r--arch/um/os-Linux/irq.c202
-rw-r--r--arch/um/os-Linux/signal.c3
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/boot/compressed/kaslr.c3
-rw-r--r--arch/x86/entry/calling.h2
-rw-r--r--arch/x86/entry/common.c20
-rw-r--r--arch/x86/entry/entry_64.S7
-rw-r--r--arch/x86/entry/entry_64_compat.S6
-rw-r--r--arch/x86/entry/syscall_32.c15
-rw-r--r--arch/x86/entry/syscall_64.c6
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl723
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl712
-rw-r--r--arch/x86/entry/syscalls/syscalltbl.sh14
-rw-r--r--arch/x86/entry/vdso/Makefile4
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c18
-rw-r--r--arch/x86/events/intel/ds.c34
-rw-r--r--arch/x86/include/asm/apic.h4
-rw-r--r--arch/x86/include/asm/kexec-bzimage64.h2
-rw-r--r--arch/x86/include/asm/pgtable.h27
-rw-r--r--arch/x86/include/asm/pgtable_types.h29
-rw-r--r--arch/x86/include/asm/pti.h2
-rw-r--r--arch/x86/include/asm/syscall.h4
-rw-r--r--arch/x86/include/asm/syscall_wrapper.h209
-rw-r--r--arch/x86/include/asm/syscalls.h17
-rw-r--r--arch/x86/include/asm/tlbflush.h7
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h18
-rw-r--r--arch/x86/kernel/acpi/boot.c13
-rw-r--r--arch/x86/kernel/apic/apic_common.c2
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/apic/x2apic.h2
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/cpu/common.c32
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c2
-rw-r--r--arch/x86/kernel/crash.c334
-rw-r--r--arch/x86/kernel/espfix_64.c4
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_64.S11
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c10
-rw-r--r--arch/x86/kernel/ldt.c6
-rw-r--r--arch/x86/kernel/machine_kexec_64.c111
-rw-r--r--arch/x86/kernel/signal_compat.c2
-rw-r--r--arch/x86/mm/cpu_entry_area.c14
-rw-r--r--arch/x86/mm/ident_map.c3
-rw-r--r--arch/x86/mm/init.c14
-rw-r--r--arch/x86/mm/init_32.c8
-rw-r--r--arch/x86/mm/init_64.c11
-rw-r--r--arch/x86/mm/iomap_32.c6
-rw-r--r--arch/x86/mm/ioremap.c3
-rw-r--r--arch/x86/mm/kasan_init_64.c14
-rw-r--r--arch/x86/mm/pageattr.c97
-rw-r--r--arch/x86/mm/pgtable.c12
-rw-r--r--arch/x86/mm/pti.c126
-rw-r--r--arch/x86/power/hibernate_64.c20
-rw-r--r--arch/x86/purgatory/Makefile3
-rw-r--r--arch/x86/purgatory/purgatory.c2
-rw-r--r--arch/x86/purgatory/string.c12
-rw-r--r--arch/x86/um/stub_segv.c3
-rw-r--r--arch/x86/xen/apic.c2
-rw-r--r--arch/x86/xen/enlighten_pv.c8
-rw-r--r--arch/x86/xen/smp_pv.c1
-rw-r--r--arch/x86/xen/xen-head.S4
-rw-r--r--block/blk-core.c35
-rw-r--r--block/blk-mq-cpumap.c5
-rw-r--r--block/blk-mq-debugfs.c1
-rw-r--r--block/blk-mq.c122
-rw-r--r--crypto/.gitignore1
-rw-r--r--crypto/Makefile14
-rw-r--r--crypto/asymmetric_keys/.gitignore1
-rw-r--r--crypto/asymmetric_keys/Makefile31
-rw-r--r--crypto/asymmetric_keys/mscode_parser.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c2
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c4
-rw-r--r--crypto/rsa_helper.c4
-rw-r--r--drivers/acpi/arm64/iort.c111
-rw-r--r--drivers/acpi/processor_perflib.c11
-rw-r--r--drivers/block/loop.c45
-rw-r--r--drivers/clk/Kconfig35
-rw-r--r--drivers/clk/Makefile8
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c8
-rw-r--r--drivers/clk/clk-cs2000-cp.c2
-rw-r--r--drivers/clk/clk-divider.c58
-rw-r--r--drivers/clk/clk-gpio.c4
-rw-r--r--drivers/clk/clk-mux.c75
-rw-r--r--drivers/clk/clk-si544.c411
-rw-r--r--drivers/clk/clk-stm32f4.c14
-rw-r--r--drivers/clk/clk-stm32mp1.c2117
-rw-r--r--drivers/clk/clk.c85
-rw-r--r--drivers/clk/davinci/Makefile21
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c790
-rw-r--r--drivers/clk/davinci/pll-da830.c70
-rw-r--r--drivers/clk/davinci/pll-da850.c212
-rw-r--r--drivers/clk/davinci/pll-dm355.c79
-rw-r--r--drivers/clk/davinci/pll-dm365.c145
-rw-r--r--drivers/clk/davinci/pll-dm644x.c80
-rw-r--r--drivers/clk/davinci/pll-dm646x.c84
-rw-r--r--drivers/clk/davinci/pll.c899
-rw-r--r--drivers/clk/davinci/pll.h141
-rw-r--r--drivers/clk/davinci/psc-da830.c116
-rw-r--r--drivers/clk/davinci/psc-da850.c156
-rw-r--r--drivers/clk/davinci/psc-dm355.c88
-rw-r--r--drivers/clk/davinci/psc-dm365.c96
-rw-r--r--drivers/clk/davinci/psc-dm644x.c83
-rw-r--r--drivers/clk/davinci/psc-dm646x.c80
-rw-r--r--drivers/clk/davinci/psc.c551
-rw-r--r--drivers/clk/davinci/psc.h108
-rw-r--r--drivers/clk/hisilicon/Makefile2
-rw-r--r--drivers/clk/hisilicon/clk-hisi-phase.c121
-rw-r--r--drivers/clk/hisilicon/clk.c26
-rw-r--r--drivers/clk/hisilicon/clk.h19
-rw-r--r--drivers/clk/hisilicon/crg-hi3516cv300.c2
-rw-r--r--drivers/clk/hisilicon/crg-hi3798cv200.c100
-rw-r--r--drivers/clk/imx/Makefile1
-rw-r--r--drivers/clk/imx/clk-busy.c4
-rw-r--r--drivers/clk/imx/clk-imx6sll.c340
-rw-r--r--drivers/clk/imx/clk-imx6sx.c14
-rw-r--r--drivers/clk/imx/clk-imx6ul.c5
-rw-r--r--drivers/clk/imx/clk-imx7d.c113
-rw-r--r--drivers/clk/imx/clk-pllv2.c6
-rw-r--r--drivers/clk/imx/clk.h14
-rw-r--r--drivers/clk/keystone/sci-clk.c380
-rw-r--r--drivers/clk/mediatek/Kconfig6
-rw-r--r--drivers/clk/mediatek/Makefile1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-aud.c186
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c15
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c69
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c15
-rw-r--r--drivers/clk/meson/Kconfig9
-rw-r--r--drivers/clk/meson/Makefile5
-rw-r--r--drivers/clk/meson/axg.c955
-rw-r--r--drivers/clk/meson/axg.h12
-rw-r--r--drivers/clk/meson/clk-audio-divider.c63
-rw-r--r--drivers/clk/meson/clk-cpu.c178
-rw-r--r--drivers/clk/meson/clk-mpll.c125
-rw-r--r--drivers/clk/meson/clk-pll.c306
-rw-r--r--drivers/clk/meson/clk-regmap.c166
-rw-r--r--drivers/clk/meson/clk-regmap.h111
-rw-r--r--drivers/clk/meson/clkc.h107
-rw-r--r--drivers/clk/meson/gxbb-aoclk-regmap.c46
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c20
-rw-r--r--drivers/clk/meson/gxbb-aoclk.h11
-rw-r--r--drivers/clk/meson/gxbb.c1591
-rw-r--r--drivers/clk/meson/gxbb.h14
-rw-r--r--drivers/clk/meson/meson8b.c705
-rw-r--r--drivers/clk/meson/meson8b.h17
-rw-r--r--drivers/clk/mvebu/armada-38x.c14
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c94
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c1
-rw-r--r--drivers/clk/qcom/clk-regmap-divider.c20
-rw-r--r--drivers/clk/qcom/clk-rpm.c79
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c9
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c8
-rw-r--r--drivers/clk/renesas/Kconfig13
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/clk-div6.c22
-rw-r--r--drivers/clk/renesas/clk-mstp.c4
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c11
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c8
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c17
-rw-r--r--drivers/clk/renesas/clk-rz.c4
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c20
-rw-r--r--drivers/clk/renesas/r8a7743-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7745-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7790-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7791-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7792-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7794-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c334
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c227
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c143
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h2
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h2
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c78
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c83
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c5
-rw-r--r--drivers/clk/rockchip/clk.c22
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c4
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c114
-rw-r--r--drivers/clk/samsung/clk-exynos4.c103
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c189
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.h26
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c111
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c90
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c20
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c189
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c121
-rw-r--r--drivers/clk/samsung/clk-exynos7.c2
-rw-r--r--drivers/clk/samsung/clk-pll.h48
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c148
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c25
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c55
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c17
-rw-r--r--drivers/clk/socfpga/Makefile9
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c125
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c149
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c146
-rw-r--r--drivers/clk/socfpga/clk-s10.c345
-rw-r--r--drivers/clk/socfpga/clk.h4
-rw-r--r--drivers/clk/socfpga/stratix10-clk.h80
-rw-r--r--drivers/clk/sprd/sc9860-clk.c76
-rw-r--r--drivers/clk/sunxi-ng/Kconfig12
-rw-r--r--drivers/clk/sunxi-ng/Makefile1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c1211
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.h56
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c32
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c58
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.h27
-rw-r--r--drivers/clk/tegra/clk-emc.c2
-rw-r--r--drivers/clk/tegra/clk-pll.c2
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c2
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c8
-rw-r--r--drivers/clk/tegra/clk-tegra114.c4
-rw-r--r--drivers/clk/tegra/clk-tegra124.c9
-rw-r--r--drivers/clk/tegra/clk-tegra20.c24
-rw-r--r--drivers/clk/tegra/clk-tegra210.c361
-rw-r--r--drivers/clk/tegra/clk-tegra30.c15
-rw-r--r--drivers/clk/tegra/clk.h7
-rw-r--r--drivers/clk/ti/clk.c38
-rw-r--r--drivers/clk/ti/clock.h13
-rw-r--r--drivers/clk/ti/divider.c26
-rw-r--r--drivers/clk/ti/mux.c13
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c25
-rw-r--r--drivers/clk/ux500/Makefile2
-rw-r--r--drivers/clk/ux500/abx500-clk.c16
-rw-r--r--drivers/clk/ux500/u8540_clk.c597
-rw-r--r--drivers/clk/ux500/u9540_clk.c18
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c5
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c15
-rw-r--r--drivers/cpufreq/freq_table.c14
-rw-r--r--drivers/cpufreq/intel_pstate.c1
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c10
-rw-r--r--drivers/cpufreq/ti-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle.c10
-rw-r--r--drivers/cpuidle/governors/ladder.c3
-rw-r--r--drivers/cpuidle/governors/menu.c113
-rw-r--r--drivers/crypto/qat/qat_common/.gitignore1
-rw-r--r--drivers/firmware/dmi_scan.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c13
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c1364
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h53
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/i2c/i2c-core-base.c3
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c231
-rw-r--r--drivers/iommu/amd_iommu.c320
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/amd_iommu_types.h6
-rw-r--r--drivers/iommu/arm-smmu-v3.c542
-rw-r--r--drivers/iommu/dma-iommu.c8
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/exynos-iommu.c13
-rw-r--r--drivers/iommu/intel-iommu.c6
-rw-r--r--drivers/iommu/intel-svm.c17
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c21
-rw-r--r--drivers/iommu/io-pgtable-arm.c91
-rw-r--r--drivers/iommu/io-pgtable.h4
-rw-r--r--drivers/iommu/iommu.c6
-rw-r--r--drivers/iommu/mtk_iommu.c15
-rw-r--r--drivers/iommu/mtk_iommu.h1
-rw-r--r--drivers/iommu/mtk_iommu_v1.c54
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/rockchip-iommu.c592
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c3
-rw-r--r--drivers/mfd/cros_ec_dev.c31
-rw-r--r--drivers/misc/kgdbts.c8
-rw-r--r--drivers/mmc/core/block.c1
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c2
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/mtd/ubi/build.c11
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c63
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c30
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/slip/slhc.c5
-rw-r--r--drivers/net/tun.c33
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/usb/lan78xx.c9
-rw-r--r--drivers/nvme/host/core.c33
-rw-r--r--drivers/nvme/host/fabrics.c83
-rw-r--r--drivers/nvme/host/fabrics.h33
-rw-r--r--drivers/nvme/host/fc.c12
-rw-r--r--drivers/nvme/host/nvme.h4
-rw-r--r--drivers/nvme/host/pci.c35
-rw-r--r--drivers/nvme/host/rdma.c14
-rw-r--r--drivers/nvme/target/admin-cmd.c1
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/io-cmd.c4
-rw-r--r--drivers/nvme/target/loop.c20
-rw-r--r--drivers/of/unittest-data/Makefile6
-rw-r--r--drivers/pci/pci.c1
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c896
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c76
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c16
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c141
-rw-r--r--drivers/pwm/Kconfig6
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c1
-rw-r--r--drivers/pwm/pwm-imx.c3
-rw-r--r--drivers/pwm/pwm-jz4740.c41
-rw-r--r--drivers/pwm/pwm-mediatek.c36
-rw-r--r--drivers/pwm/pwm-puv3.c4
-rw-r--r--drivers/pwm/pwm-rcar.c58
-rw-r--r--drivers/pwm/pwm-stm32-lp.c5
-rw-r--r--drivers/pwm/pwm-stm32.c22
-rw-r--r--drivers/pwm/pwm-sun4i.c38
-rw-r--r--drivers/pwm/sysfs.c3
-rw-r--r--drivers/s390/cio/ccwgroup.c5
-rw-r--r--drivers/s390/cio/cio.c257
-rw-r--r--drivers/s390/cio/ioasm.c24
-rw-r--r--drivers/s390/cio/ioasm.h1
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c32
-rw-r--r--drivers/s390/crypto/ap_bus.h5
-rw-r--r--drivers/s390/crypto/ap_debug.h3
-rw-r--r--drivers/s390/crypto/pkey_api.c41
-rw-r--r--drivers/s390/crypto/zcrypt_api.c471
-rw-r--r--drivers/s390/crypto/zcrypt_api.h26
-rw-r--r--drivers/scsi/aacraid/commsup.c4
-rw-r--r--drivers/scsi/aacraid/linit.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c8
-rw-r--r--drivers/scsi/dpt_i2o.c13
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_dh.c3
-rw-r--r--drivers/scsi/scsi_lib.c27
-rw-r--r--drivers/scsi/sr.c19
-rw-r--r--drivers/soc/samsung/pm_domains.c11
-rw-r--r--drivers/thermal/Kconfig7
-rw-r--r--drivers/thermal/imx_thermal.c6
-rw-r--r--drivers/thermal/thermal_core.c3
-rw-r--r--drivers/thermal/thermal_core.h10
-rw-r--r--drivers/thermal/thermal_helpers.c5
-rw-r--r--drivers/thermal/thermal_sysfs.c225
-rw-r--r--drivers/vhost/vhost.c72
-rw-r--r--drivers/vhost/vhost.h4
-rw-r--r--drivers/virtio/virtio_balloon.c6
-rw-r--r--drivers/watchdog/Kconfig12
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/ar7_wdt.c14
-rw-r--r--drivers/watchdog/asm9260_wdt.c8
-rw-r--r--drivers/watchdog/aspeed_wdt.c13
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c5
-rw-r--r--drivers/watchdog/at91sam9_wdt.c5
-rw-r--r--drivers/watchdog/at91sam9_wdt.h5
-rw-r--r--drivers/watchdog/bcm2835_wdt.c5
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c5
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c5
-rw-r--r--drivers/watchdog/bcm7038_wdt.c12
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c9
-rw-r--r--drivers/watchdog/cadence_wdt.c5
-rw-r--r--drivers/watchdog/coh901327_wdt.c18
-rw-r--r--drivers/watchdog/da9052_wdt.c6
-rw-r--r--drivers/watchdog/da9055_wdt.c6
-rw-r--r--drivers/watchdog/da9062_wdt.c10
-rw-r--r--drivers/watchdog/da9063_wdt.c5
-rw-r--r--drivers/watchdog/davinci_wdt.c15
-rw-r--r--drivers/watchdog/digicolor_wdt.c5
-rw-r--r--drivers/watchdog/dw_wdt.c32
-rw-r--r--drivers/watchdog/ebc-c384_wdt.c1
-rw-r--r--drivers/watchdog/f71808e_wdt.c2
-rw-r--r--drivers/watchdog/gpio_wdt.c4
-rw-r--r--drivers/watchdog/hpwdt.c312
-rw-r--r--drivers/watchdog/imx2_wdt.c8
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c2
-rw-r--r--drivers/watchdog/mei_wdt.c12
-rw-r--r--drivers/watchdog/mena21_wdt.c4
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c50
-rw-r--r--drivers/watchdog/meson_wdt.c2
-rw-r--r--drivers/watchdog/mtk_wdt.c13
-rw-r--r--drivers/watchdog/mtx-1_wdt.c11
-rw-r--r--drivers/watchdog/npcm_wdt.c254
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c8
-rw-r--r--drivers/watchdog/omap_wdt.c4
-rw-r--r--drivers/watchdog/pnx4008_wdt.c2
-rw-r--r--drivers/watchdog/renesas_wdt.c87
-rw-r--r--drivers/watchdog/sama5d4_wdt.c6
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c2
-rw-r--r--drivers/watchdog/sprd_wdt.c4
-rw-r--r--drivers/watchdog/st_lpc_wdt.c6
-rw-r--r--drivers/watchdog/sunxi_wdt.c2
-rw-r--r--drivers/watchdog/tangox_wdt.c6
-rw-r--r--drivers/watchdog/tegra_wdt.c10
-rw-r--r--drivers/watchdog/uniphier_wdt.c15
-rw-r--r--drivers/watchdog/wm831x_wdt.c5
-rw-r--r--drivers/watchdog/wm8350_wdt.c5
-rw-r--r--drivers/xen/xen-acpi-processor.c30
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c16
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c4
-rw-r--r--fs/afs/Makefile2
-rw-r--r--fs/afs/addr_list.c6
-rw-r--r--fs/afs/afs.h27
-rw-r--r--fs/afs/afs_fs.h2
-rw-r--r--fs/afs/callback.c29
-rw-r--r--fs/afs/cell.c12
-rw-r--r--fs/afs/cmservice.c22
-rw-r--r--fs/afs/dir.c923
-rw-r--r--fs/afs/dir_edit.c505
-rw-r--r--fs/afs/dynroot.c209
-rw-r--r--fs/afs/file.c27
-rw-r--r--fs/afs/flock.c2
-rw-r--r--fs/afs/fsclient.c622
-rw-r--r--fs/afs/inode.c69
-rw-r--r--fs/afs/internal.h105
-rw-r--r--fs/afs/main.c44
-rw-r--r--fs/afs/proc.c326
-rw-r--r--fs/afs/rotate.c2
-rw-r--r--fs/afs/rxrpc.c9
-rw-r--r--fs/afs/security.c13
-rw-r--r--fs/afs/server.c14
-rw-r--r--fs/afs/super.c11
-rw-r--r--fs/afs/vlclient.c28
-rw-r--r--fs/afs/write.c41
-rw-r--r--fs/afs/xdr_fs.h103
-rw-r--r--fs/buffer.c25
-rw-r--r--fs/exportfs/expfs.c9
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/glock.c47
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/nfs/callback_xdr.c37
-rw-r--r--fs/nfs/delegation.c52
-rw-r--r--fs/nfs/delegation.h7
-rw-r--r--fs/nfs/dir.c15
-rw-r--r--fs/nfs/inode.c138
-rw-r--r--fs/nfs/nfs3proc.c24
-rw-r--r--fs/nfs/nfs3xdr.c7
-rw-r--r--fs/nfs/nfs4proc.c168
-rw-r--r--fs/nfs/nfs4state.c22
-rw-r--r--fs/nfs/nfs4xdr.c245
-rw-r--r--fs/nfs/proc.c19
-rw-r--r--fs/nfs/unlink.c7
-rw-r--r--fs/nfs/write.c8
-rw-r--r--fs/overlayfs/Kconfig17
-rw-r--r--fs/overlayfs/copy_up.c6
-rw-r--r--fs/overlayfs/export.c75
-rw-r--r--fs/overlayfs/inode.c188
-rw-r--r--fs/overlayfs/namei.c67
-rw-r--r--fs/overlayfs/overlayfs.h19
-rw-r--r--fs/overlayfs/ovl_entry.h21
-rw-r--r--fs/overlayfs/readdir.c45
-rw-r--r--fs/overlayfs/super.c157
-rw-r--r--fs/overlayfs/util.c39
-rw-r--r--fs/proc/generic.c23
-rw-r--r--fs/super.c105
-rw-r--r--fs/ubifs/find.c2
-rw-r--r--fs/ubifs/lprops.c4
-rw-r--r--fs/ubifs/scan.c1
-rw-r--r--fs/ubifs/super.c14
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c6
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h5
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c3
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h2
-rw-r--r--fs/xfs/libxfs/xfs_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_btree.h6
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c2
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c22
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c5
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c3
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c5
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h2
-rw-r--r--fs/xfs/libxfs/xfs_sb.c10
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c10
-rw-r--r--fs/xfs/xfs_bmap_item.c39
-rw-r--r--fs/xfs/xfs_bmap_util.c3
-rw-r--r--fs/xfs/xfs_buf.c1
-rw-r--r--fs/xfs/xfs_buf.h2
-rw-r--r--fs/xfs/xfs_discard.c14
-rw-r--r--fs/xfs/xfs_extfree_item.c38
-rw-r--r--fs/xfs/xfs_filestream.c21
-rw-r--r--fs/xfs/xfs_inode.c31
-rw-r--r--fs/xfs/xfs_inode.h6
-rw-r--r--fs/xfs/xfs_iops.c2
-rw-r--r--fs/xfs/xfs_log.c1
-rw-r--r--fs/xfs/xfs_log.h3
-rw-r--r--fs/xfs/xfs_log_cil.c2
-rw-r--r--fs/xfs/xfs_mru_cache.c8
-rw-r--r--fs/xfs/xfs_mru_cache.h8
-rw-r--r--fs/xfs/xfs_qm.c4
-rw-r--r--fs/xfs/xfs_refcount_item.c39
-rw-r--r--fs/xfs/xfs_rmap_item.c38
-rw-r--r--fs/xfs/xfs_super.c13
-rw-r--r--fs/xfs/xfs_symlink.c2
-rw-r--r--fs/xfs/xfs_trace.h14
-rw-r--r--include/acpi/processor.h2
-rw-r--r--include/asm-generic/io.h161
-rw-r--r--include/dt-bindings/clock/axg-clkc.h1
-rw-r--r--include/dt-bindings/clock/histb-clock.h55
-rw-r--r--include/dt-bindings/clock/imx6sll-clock.h202
-rw-r--r--include/dt-bindings/clock/mt2701-clk.h3
-rw-r--r--include/dt-bindings/clock/mt2712-clk.h12
-rw-r--r--include/dt-bindings/clock/mt7622-clk.h3
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h5
-rw-r--r--include/dt-bindings/clock/r8a77965-cpg-mssr.h62
-rw-r--r--include/dt-bindings/clock/r8a77980-cpg-mssr.h51
-rw-r--r--include/dt-bindings/clock/rk3328-cru.h1
-rw-r--r--include/dt-bindings/clock/sprd,sc9860-clk.h21
-rw-r--r--include/dt-bindings/clock/stm32fx-clock.h7
-rw-r--r--include/dt-bindings/clock/stm32mp1-clks.h254
-rw-r--r--include/dt-bindings/clock/stratix10-clock.h84
-rw-r--r--include/dt-bindings/clock/sun50i-h6-ccu.h125
-rw-r--r--include/dt-bindings/clock/sun8i-h3-ccu.h2
-rw-r--r--include/dt-bindings/clock/tegra210-car.h2
-rw-r--r--include/dt-bindings/reset/sun50i-h6-ccu.h73
-rw-r--r--include/linux/acpi_iort.h7
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/clk-provider.h23
-rw-r--r--include/linux/clk.h16
-rw-r--r--include/linux/clk/tegra.h1
-rw-r--r--include/linux/clk/ti.h2
-rw-r--r--include/linux/compat.h51
-rw-r--r--include/linux/cpufreq.h2
-rw-r--r--include/linux/cpuidle.h8
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/hrtimer.h1
-rw-r--r--include/linux/intel-iommu.h12
-rw-r--r--include/linux/iommu.h14
-rw-r--r--include/linux/jiffies.h7
-rw-r--r--include/linux/kexec.h81
-rw-r--r--include/linux/lockref.h1
-rw-r--r--include/linux/mfd/cros_ec.h2
-rw-r--r--include/linux/mfd/cros_ec_commands.h3
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/nfs_fs.h35
-rw-r--r--include/linux/nfs_xdr.h9
-rw-r--r--include/linux/platform_data/atmel_mxt_ts.h31
-rw-r--r--include/linux/platform_data/clk-da8xx-cfgchip.h21
-rw-r--r--include/linux/platform_data/clk-davinci-pll.h21
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/sha256.h (renamed from arch/x86/purgatory/sha256.h)11
-rw-r--r--include/linux/sunrpc/clnt.h7
-rw-r--r--include/linux/sunrpc/xdr.h94
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/syscalls.h42
-rw-r--r--include/linux/thermal.h1
-rw-r--r--include/linux/tick.h25
-rw-r--r--include/linux/timekeeping.h1
-rw-r--r--include/net/slhc_vj.h1
-rw-r--r--include/trace/events/afs.h113
-rw-r--r--include/trace/events/sunrpc.h106
-rw-r--r--include/uapi/asm-generic/siginfo.h3
-rw-r--r--include/uapi/linux/virtio_balloon.h4
-rw-r--r--include/xen/interface/features.h23
-rw-r--r--init/Kconfig10
-rw-r--r--ipc/shm.c23
-rw-r--r--kernel/crash_core.c1
-rw-r--r--kernel/debug/kdb/kdb_bp.c4
-rw-r--r--kernel/debug/kdb/kdb_main.c89
-rw-r--r--kernel/debug/kdb/kdb_support.c4
-rw-r--r--kernel/events/core.c14
-rw-r--r--kernel/irq/affinity.c162
-rw-r--r--kernel/kexec_file.c619
-rw-r--r--kernel/power/qos.c2
-rw-r--r--kernel/resource.c3
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c3
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/idle.c34
-rw-r--r--kernel/sched/rt.c4
-rw-r--r--kernel/sched/sched.h17
-rw-r--r--kernel/sys_ni.c10
-rw-r--r--kernel/time/hrtimer.c53
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/posix-stubs.c10
-rw-r--r--kernel/time/tick-sched.c250
-rw-r--r--kernel/time/tick-sched.h18
-rw-r--r--kernel/time/timekeeping_internal.h2
-rw-r--r--kernel/trace/trace_event_perf.c4
-rw-r--r--kernel/trace/trace_events_filter.c26
-rw-r--r--kernel/trace/trace_uprobe.c25
-rw-r--r--lib/lockref.c28
-rw-r--r--lib/sha256.c (renamed from arch/x86/purgatory/sha256.c)4
-rw-r--r--lib/swiotlb.c4
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/gup.c9
-rw-r--r--mm/gup_benchmark.c4
-rw-r--r--mm/slab.c3
-rw-r--r--mm/util.c6
-rw-r--r--net/ipv4/ip_gre.c6
-rw-r--r--net/ipv4/netfilter/Makefile5
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic_main.c2
-rw-r--r--net/l2tp/l2tp_core.c225
-rw-r--r--net/l2tp/l2tp_core.h4
-rw-r--r--net/l2tp/l2tp_netlink.c22
-rw-r--r--net/l2tp/l2tp_ppp.c9
-rw-r--r--net/rds/send.c15
-rw-r--r--net/sunrpc/clnt.c8
-rw-r--r--net/sunrpc/sched.c10
-rw-r--r--net/sunrpc/stats.c16
-rw-r--r--net/sunrpc/sunrpc.h6
-rw-r--r--net/sunrpc/xdr.c82
-rw-r--r--net/sunrpc/xprt.c34
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c7
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c13
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c53
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c32
-rw-r--r--net/sunrpc/xprtrdma/transport.c43
-rw-r--r--net/sunrpc/xprtrdma/verbs.c44
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h4
-rw-r--r--net/sunrpc/xprtsock.c4
-rw-r--r--scripts/Kbuild.include5
-rw-r--r--scripts/Makefile.build23
-rw-r--r--scripts/Makefile.host2
-rw-r--r--scripts/Makefile.lib31
-rw-r--r--scripts/asn1_compiler.c2
-rwxr-xr-xscripts/bloat-o-meter4
-rw-r--r--scripts/dtc/.gitignore3
-rw-r--r--scripts/dtc/Makefile5
l---------scripts/dtc/include-prefixes/cris1
l---------scripts/dtc/include-prefixes/metag1
-rw-r--r--scripts/genksyms/.gitignore3
-rw-r--r--scripts/genksyms/Makefile27
-rw-r--r--scripts/genksyms/lex.lex.c_shipped2291
-rw-r--r--scripts/genksyms/parse.tab.c_shipped2394
-rw-r--r--scripts/genksyms/parse.tab.h_shipped119
-rw-r--r--scripts/kconfig/.gitignore3
-rw-r--r--scripts/kconfig/Makefile4
-rw-r--r--scripts/kconfig/conf.c14
-rw-r--r--scripts/package/Makefile34
-rwxr-xr-xscripts/package/builddeb221
-rwxr-xr-xscripts/package/mkdebian189
-rwxr-xr-xscripts/package/mkspec2
-rw-r--r--security/apparmor/.gitignore1
-rw-r--r--security/apparmor/Makefile45
-rw-r--r--security/apparmor/apparmorfs.c203
-rw-r--r--security/apparmor/capability.c2
-rw-r--r--security/apparmor/domain.c355
-rw-r--r--security/apparmor/file.c32
-rw-r--r--security/apparmor/include/apparmor.h3
-rw-r--r--security/apparmor/include/audit.h19
-rw-r--r--security/apparmor/include/cred.h (renamed from security/apparmor/include/context.h)63
-rw-r--r--security/apparmor/include/label.h28
-rw-r--r--security/apparmor/include/match.h28
-rw-r--r--security/apparmor/include/net.h106
-rw-r--r--security/apparmor/include/perms.h5
-rw-r--r--security/apparmor/include/policy.h23
-rw-r--r--security/apparmor/include/policy_unpack.h2
-rw-r--r--security/apparmor/include/sig_names.h5
-rw-r--r--security/apparmor/include/task.h94
-rw-r--r--security/apparmor/ipc.c52
-rw-r--r--security/apparmor/label.c42
-rw-r--r--security/apparmor/lib.c5
-rw-r--r--security/apparmor/lsm.c467
-rw-r--r--security/apparmor/match.c423
-rw-r--r--security/apparmor/mount.c2
-rw-r--r--security/apparmor/net.c187
-rw-r--r--security/apparmor/nulldfa.in108
-rw-r--r--security/apparmor/policy.c11
-rw-r--r--security/apparmor/policy_ns.c2
-rw-r--r--security/apparmor/policy_unpack.c70
-rw-r--r--security/apparmor/procattr.c2
-rw-r--r--security/apparmor/resource.c2
-rw-r--r--security/apparmor/stacksplitdfa.in114
-rw-r--r--security/apparmor/task.c (renamed from security/apparmor/context.c)139
-rw-r--r--sound/pci/hda/hda_intel.c4
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/build/Build.include5
-rw-r--r--tools/include/tools/config.h34
-rw-r--r--tools/include/uapi/drm/i915_drm.h112
-rw-r--r--tools/objtool/Makefile2
-rw-r--r--tools/perf/Documentation/perf-report.txt1
-rw-r--r--tools/perf/Documentation/perf-trace.txt3
-rw-r--r--tools/perf/Documentation/perf-version.txt24
-rw-r--r--tools/perf/Makefile.config8
-rw-r--r--tools/perf/Makefile.perf3
-rw-r--r--tools/perf/builtin-trace.c11
-rw-r--r--tools/perf/builtin-version.c82
-rw-r--r--tools/perf/perf.c6
-rw-r--r--tools/perf/perf.h1
-rw-r--r--tools/perf/ui/browser.c12
-rw-r--r--tools/perf/ui/browser.h2
-rw-r--r--tools/perf/ui/browsers/annotate.c33
-rw-r--r--tools/perf/ui/browsers/hists.c138
-rw-r--r--tools/perf/util/annotate.c51
-rw-r--r--tools/perf/util/annotate.h12
-rw-r--r--tools/perf/util/auxtrace.c72
-rw-r--r--tools/perf/util/c++/clang-test.cpp2
-rw-r--r--tools/perf/util/c++/clang.cpp11
-rw-r--r--tools/perf/util/dwarf-aux.c2
-rw-r--r--tools/perf/util/hist.c81
-rw-r--r--tools/perf/util/hist.h7
-rw-r--r--tools/perf/util/map.h4
-rw-r--r--tools/perf/util/session.c2
-rw-r--r--tools/perf/util/sort.c41
-rw-r--r--tools/perf/util/sort.h1
-rw-r--r--tools/perf/util/util.h4
-rw-r--r--tools/scripts/Makefile.include2
-rwxr-xr-xtools/testing/ktest/config-bisect.pl770
-rwxr-xr-xtools/testing/ktest/ktest.pl533
-rw-r--r--tools/testing/ktest/sample.conf60
-rw-r--r--tools/testing/selftests/proc/proc-loadavg-001.c2
-rw-r--r--tools/testing/selftests/proc/proc-self-map-files-001.c2
-rw-r--r--tools/testing/selftests/proc/proc-self-map-files-002.c2
-rw-r--r--tools/testing/selftests/proc/proc-self-syscall.c15
-rw-r--r--tools/testing/selftests/proc/proc-self-wchan.c15
-rw-r--r--tools/testing/selftests/proc/proc-uptime-001.c2
-rw-r--r--tools/testing/selftests/proc/proc-uptime-002.c2
-rw-r--r--tools/testing/selftests/proc/proc-uptime.h2
-rw-r--r--tools/testing/selftests/proc/read.c2
894 files changed, 36096 insertions, 19972 deletions
diff --git a/.gitignore b/.gitignore
index a1dfd2acd9c3..97ba6b79834c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,6 +11,7 @@
#
.*
*.a
+*.asn1.[ch]
*.bin
*.bz2
*.c.[012]*.*
@@ -22,6 +23,7 @@
*.gz
*.i
*.ko
+*.lex.c
*.ll
*.lst
*.lz4
@@ -37,6 +39,7 @@
*.so.dbg
*.su
*.symtypes
+*.tab.[ch]
*.tar
*.xz
Module.symvers
@@ -129,7 +132,3 @@ all.config
# Kdevelop4
*.kdev4
-
-#Automatically generated by ASN.1 compiler
-net/ipv4/netfilter/nf_nat_snmp_basic-asn1.c
-net/ipv4/netfilter/nf_nat_snmp_basic-asn1.h
diff --git a/Documentation/clk.txt b/Documentation/clk.txt
index be909ed45970..511628bb3d3a 100644
--- a/Documentation/clk.txt
+++ b/Documentation/clk.txt
@@ -268,9 +268,19 @@ The common clock framework uses two global locks, the prepare lock and the
enable lock.
The enable lock is a spinlock and is held across calls to the .enable,
-.disable and .is_enabled operations. Those operations are thus not allowed to
-sleep, and calls to the clk_enable(), clk_disable() and clk_is_enabled() API
-functions are allowed in atomic context.
+.disable operations. Those operations are thus not allowed to sleep,
+and calls to the clk_enable(), clk_disable() API functions are allowed in
+atomic context.
+
+For clk_is_enabled() API, it is also designed to be allowed to be used in
+atomic context. However, it doesn't really make any sense to hold the enable
+lock in core, unless you want to do something else with the information of
+the enable state with that lock held. Otherwise, seeing if a clk is enabled is
+a one-shot read of the enabled state, which could just as easily change after
+the function returns because the lock is released. Thus the user of this API
+needs to handle synchronizing the read of the state with whatever they're
+using it for to make sure that the enable state doesn't change during that
+time.
The prepare lock is a mutex and is held across calls to all other operations.
All those operations are allowed to sleep, and calls to the corresponding API
diff --git a/Documentation/cpu-freq/core.txt b/Documentation/cpu-freq/core.txt
index 978463a7c81e..073f128af5a7 100644
--- a/Documentation/cpu-freq/core.txt
+++ b/Documentation/cpu-freq/core.txt
@@ -97,12 +97,10 @@ flags - flags of the cpufreq driver
==================================================================
For details about OPP, see Documentation/power/opp.txt
-dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
- cpufreq_table_validate_and_show() which is provided with the list of
- frequencies that are available for operation. This function provides
- a ready to use conversion routine to translate the OPP layer's internal
- information about the available frequencies into a format readily
- providable to cpufreq.
+dev_pm_opp_init_cpufreq_table -
+ This function provides a ready to use conversion routine to translate
+ the OPP layer's internal information about the available frequencies
+ into a format readily providable to cpufreq.
WARNING: Do not use this function in interrupt context.
@@ -112,7 +110,7 @@ dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
/* Do things */
r = dev_pm_opp_init_cpufreq_table(dev, &freq_table);
if (!r)
- cpufreq_table_validate_and_show(policy, freq_table);
+ policy->freq_table = freq_table;
/* Do other things */
}
diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt
index 61546ac578d6..6e353d00cdc6 100644
--- a/Documentation/cpu-freq/cpu-drivers.txt
+++ b/Documentation/cpu-freq/cpu-drivers.txt
@@ -259,10 +259,8 @@ CPUFREQ_ENTRY_INVALID. The entries don't need to be in sorted in any
particular order, but if they are cpufreq core will do DVFS a bit
quickly for them as search for best match is faster.
-By calling cpufreq_table_validate_and_show(), the cpuinfo.min_freq and
-cpuinfo.max_freq values are detected, and policy->min and policy->max
-are set to the same values. This is helpful for the per-CPU
-initialization stage.
+The cpufreq table is verified automatically by the core if the policy contains a
+valid pointer in its policy->freq_table field.
cpufreq_frequency_table_verify() assures that at least one valid
frequency is within policy->min and policy->max, and all other criteria
diff --git a/Documentation/cpuidle/sysfs.txt b/Documentation/cpuidle/sysfs.txt
index b6f44f490ed7..d1587f434e7b 100644
--- a/Documentation/cpuidle/sysfs.txt
+++ b/Documentation/cpuidle/sysfs.txt
@@ -40,6 +40,7 @@ total 0
-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
-r--r--r-- 1 root root 4096 Feb 8 10:42 name
-r--r--r-- 1 root root 4096 Feb 8 10:42 power
+-r--r--r-- 1 root root 4096 Feb 8 10:42 residency
-r--r--r-- 1 root root 4096 Feb 8 10:42 time
-r--r--r-- 1 root root 4096 Feb 8 10:42 usage
@@ -50,6 +51,7 @@ total 0
-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
-r--r--r-- 1 root root 4096 Feb 8 10:42 name
-r--r--r-- 1 root root 4096 Feb 8 10:42 power
+-r--r--r-- 1 root root 4096 Feb 8 10:42 residency
-r--r--r-- 1 root root 4096 Feb 8 10:42 time
-r--r--r-- 1 root root 4096 Feb 8 10:42 usage
@@ -60,6 +62,7 @@ total 0
-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
-r--r--r-- 1 root root 4096 Feb 8 10:42 name
-r--r--r-- 1 root root 4096 Feb 8 10:42 power
+-r--r--r-- 1 root root 4096 Feb 8 10:42 residency
-r--r--r-- 1 root root 4096 Feb 8 10:42 time
-r--r--r-- 1 root root 4096 Feb 8 10:42 usage
@@ -70,6 +73,7 @@ total 0
-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
-r--r--r-- 1 root root 4096 Feb 8 10:42 name
-r--r--r-- 1 root root 4096 Feb 8 10:42 power
+-r--r--r-- 1 root root 4096 Feb 8 10:42 residency
-r--r--r-- 1 root root 4096 Feb 8 10:42 time
-r--r--r-- 1 root root 4096 Feb 8 10:42 usage
--------------------------------------------------------------------------------
@@ -78,6 +82,8 @@ total 0
* desc : Small description about the idle state (string)
* disable : Option to disable this idle state (bool) -> see note below
* latency : Latency to exit out of this idle state (in microseconds)
+* residency : Time after which a state becomes more effecient than any
+ shallower state (in microseconds)
* name : Name of the idle state (string)
* power : Power consumed while in this idle state (in milliwatts)
* time : Total time spent in this idle state (in microseconds)
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
index 9b8f578d5e19..34a69ba67f13 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
@@ -6,6 +6,7 @@ The MediaTek AUDSYS controller provides various clocks to the system.
Required Properties:
- compatible: Should be one of:
+ - "mediatek,mt2701-audsys", "syscon"
- "mediatek,mt7622-audsys", "syscon"
- #clock-cells: Must be 1
@@ -13,10 +14,19 @@ The AUDSYS controller uses the common clk binding from
Documentation/devicetree/bindings/clock/clock-bindings.txt
The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+Required sub-nodes:
+-------
+For common binding part and usage, refer to
+../sonud/mt2701-afe-pcm.txt.
+
Example:
-audsys: audsys@11220000 {
- compatible = "mediatek,mt7622-audsys", "syscon";
- reg = <0 0x11220000 0 0x1000>;
- #clock-cells = <1>;
-};
+ audsys: clock-controller@11220000 {
+ compatible = "mediatek,mt7622-audsys", "syscon";
+ reg = <0 0x11220000 0 0x2000>;
+ #clock-cells = <1>;
+
+ afe: audio-controller {
+ ...
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx6sll-clock.txt b/Documentation/devicetree/bindings/clock/imx6sll-clock.txt
new file mode 100644
index 000000000000..fee849d5fdd1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx6sll-clock.txt
@@ -0,0 +1,36 @@
+* Clock bindings for Freescale i.MX6 SLL
+
+Required properties:
+- compatible: Should be "fsl,imx6sll-ccm"
+- reg: Address and length of the register set
+- #clock-cells: Should be <1>
+- clocks: list of clock specifiers, must contain an entry for each required
+ entry in clock-names
+- clock-names: should include entries "ckil", "osc", "ipp_di0" and "ipp_di1"
+
+The clock consumer should specify the desired clock by having the clock
+ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx6sll-clock.h
+for the full list of i.MX6 SLL clock IDs.
+
+Examples:
+
+#include <dt-bindings/clock/imx6sll-clock.h>
+
+clks: clock-controller@20c4000 {
+ compatible = "fsl,imx6sll-ccm";
+ reg = <0x020c4000 0x4000>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ clocks = <&ckil>, <&osc>, <&ipp_di0>, <&ipp_di1>;
+ clock-names = "ckil", "osc", "ipp_di0", "ipp_di1";
+};
+
+uart1: serial@2020000 {
+ compatible = "fsl,imx6sl-uart", "fsl,imx6q-uart", "fsl,imx21-uart";
+ reg = <0x02020000 0x4000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SLL_CLK_UART1_IPG>,
+ <&clks IMX6SLL_CLK_UART1_SERIAL>;
+ clock-names = "ipg", "per";
+};
diff --git a/Documentation/devicetree/bindings/clock/intc_stratix10.txt b/Documentation/devicetree/bindings/clock/intc_stratix10.txt
new file mode 100644
index 000000000000..9f4ec5cb5c6b
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/intc_stratix10.txt
@@ -0,0 +1,20 @@
+Device Tree Clock bindings for Intel's SoCFPGA Stratix10 platform
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be
+ "intel,stratix10-clkmgr"
+
+- reg : shall be the control register offset from CLOCK_MANAGER's base for the clock.
+
+- #clock-cells : from common clock binding, shall be set to 1.
+
+Example:
+ clkmgr: clock-controller@ffd10000 {
+ compatible = "intel,stratix10-clkmgr";
+ reg = <0xffd10000 0x1000>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
index f1890d0777a6..773a5226342f 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
@@ -22,7 +22,9 @@ Required Properties:
- "renesas,r8a7794-cpg-mssr" for the r8a7794 SoC (R-Car E2)
- "renesas,r8a7795-cpg-mssr" for the r8a7795 SoC (R-Car H3)
- "renesas,r8a7796-cpg-mssr" for the r8a7796 SoC (R-Car M3-W)
+ - "renesas,r8a77965-cpg-mssr" for the r8a77965 SoC (R-Car M3-N)
- "renesas,r8a77970-cpg-mssr" for the r8a77970 SoC (R-Car V3M)
+ - "renesas,r8a77980-cpg-mssr" for the r8a77980 SoC (R-Car V3H)
- "renesas,r8a77995-cpg-mssr" for the r8a77995 SoC (R-Car D3)
- reg: Base address and length of the memory resource used by the CPG/MSSR
@@ -32,8 +34,8 @@ Required Properties:
clock-names
- clock-names: List of external parent clock names. Valid names are:
- "extal" (r8a7743, r8a7745, r8a7790, r8a7791, r8a7792, r8a7793, r8a7794,
- r8a7795, r8a7796, r8a77970, r8a77995)
- - "extalr" (r8a7795, r8a7796, r8a77970)
+ r8a7795, r8a7796, r8a77965, r8a77970, r8a77980, r8a77995)
+ - "extalr" (r8a7795, r8a7796, r8a77965, r8a77970, r8a77980)
- "usb_extal" (r8a7743, r8a7745, r8a7790, r8a7791, r8a7793, r8a7794)
- #clock-cells: Must be 2
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3328-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3328-cru.txt
index e71c675ba5da..904ae682ea90 100644
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3328-cru.txt
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3328-cru.txt
@@ -32,6 +32,7 @@ clock-output-names:
- "clkin_i2s" - external I2S clock - optional,
- "gmac_clkin" - external GMAC clock - optional
- "phy_50m_out" - output clock of the pll in the mac phy
+ - "hdmi_phy" - output clock of the hdmi phy pll - optional
Example: Clock controller node:
diff --git a/Documentation/devicetree/bindings/clock/silabs,si544.txt b/Documentation/devicetree/bindings/clock/silabs,si544.txt
new file mode 100644
index 000000000000..b86535b80920
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/silabs,si544.txt
@@ -0,0 +1,25 @@
+Binding for Silicon Labs 544 programmable I2C clock generator.
+
+Reference
+This binding uses the common clock binding[1]. Details about the device can be
+found in the datasheet[2].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Si544 datasheet
+ https://www.silabs.com/documents/public/data-sheets/si544-datasheet.pdf
+
+Required properties:
+ - compatible: One of "silabs,si514a", "silabs,si514b" "silabs,si514c" according
+ to the speed grade of the chip.
+ - reg: I2C device address.
+ - #clock-cells: From common clock bindings: Shall be 0.
+
+Optional properties:
+ - clock-output-names: From common clock bindings. Recommended to be "si544".
+
+Example:
+ si544: clock-controller@55 {
+ reg = <0x55>;
+ #clock-cells = <0>;
+ compatible = "silabs,si544b";
+ };
diff --git a/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.txt
new file mode 100644
index 000000000000..fb9495ea582c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.txt
@@ -0,0 +1,60 @@
+STMicroelectronics STM32 Peripheral Reset Clock Controller
+==========================================================
+
+The RCC IP is both a reset and a clock controller.
+
+RCC makes also power management (resume/supend and wakeup interrupt).
+
+Please also refer to reset.txt for common reset controller binding usage.
+
+Please also refer to clock-bindings.txt for common clock controller
+binding usage.
+
+
+Required properties:
+- compatible: "st,stm32mp1-rcc", "syscon"
+- reg: should be register base and length as documented in the datasheet
+- #clock-cells: 1, device nodes should specify the clock in their
+ "clocks" property, containing a phandle to the clock device node,
+ an index specifying the clock to use.
+- #reset-cells: Shall be 1
+- interrupts: Should contain a general interrupt line and a interrupt line
+ to the wake-up of processor (CSTOP).
+
+Example:
+ rcc: rcc@50000000 {
+ compatible = "st,stm32mp1-rcc", "syscon";
+ reg = <0x50000000 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_NONE>,
+ <GIC_SPI 145 IRQ_TYPE_NONE>;
+ };
+
+Specifying clocks
+=================
+
+All available clocks are defined as preprocessor macros in
+dt-bindings/clock/stm32mp1-clks.h header and can be used in device
+tree sources.
+
+Specifying softreset control of devices
+=======================================
+
+Device nodes should specify the reset channel required in their "resets"
+property, containing a phandle to the reset device node and an index specifying
+which channel to use.
+The index is the bit number within the RCC registers bank, starting from RCC
+base address.
+It is calculated as: index = register_offset / 4 * 32 + bit_offset.
+Where bit_offset is the bit offset within the register.
+
+For example on STM32MP1, for LTDC reset:
+ ltdc = APB4_RSTSETR_offset / 4 * 32 + LTDC_bit_offset
+ = 0x180 / 4 * 32 + 0 = 3072
+
+The list of valid indices for STM32MP1 is available in:
+include/dt-bindings/reset-controller/stm32mp1-resets.h
+
+This file implements defines like:
+#define LTDC_R 3072
diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
index 4ca21c3a6fc9..460ef27b1008 100644
--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
@@ -20,6 +20,7 @@ Required properties :
- "allwinner,sun50i-a64-ccu"
- "allwinner,sun50i-a64-r-ccu"
- "allwinner,sun50i-h5-ccu"
+ - "allwinner,sun50i-h6-ccu"
- "nextthing,gr8-ccu"
- reg: Must contain the registers base address and length
@@ -31,6 +32,9 @@ Required properties :
- #clock-cells : must contain 1
- #reset-cells : must contain 1
+For the main CCU on H6, one more clock is needed:
+- "iosc": the SoC's internal frequency oscillator
+
For the PRCM CCUs on A83T/H3/A64, two more clocks are needed:
- "pll-periph": the SoC's peripheral PLL from the main CCU
- "iosc": the SoC's internal frequency oscillator
diff --git a/Documentation/devicetree/bindings/clock/ti/davinci/da8xx-cfgchip.txt b/Documentation/devicetree/bindings/clock/ti/davinci/da8xx-cfgchip.txt
new file mode 100644
index 000000000000..1e03dce99a8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti/davinci/da8xx-cfgchip.txt
@@ -0,0 +1,93 @@
+Binding for TI DA8XX/OMAP-L13X/AM17XX/AM18XX CFGCHIP clocks
+
+TI DA8XX/OMAP-L13X/AM17XX/AM18XX SoCs contain a general purpose set of
+registers call CFGCHIPn. Some of these registers function as clock
+gates. This document describes the bindings for those clocks.
+
+All of the clock nodes described below must be child nodes of a CFGCHIP node
+(compatible = "ti,da830-cfgchip").
+
+USB PHY clocks
+--------------
+Required properties:
+- compatible: shall be "ti,da830-usb-phy-clocks".
+- #clock-cells: from common clock binding; shall be set to 1.
+- clocks: phandles to the parent clocks corresponding to clock-names
+- clock-names: shall be "fck", "usb_refclkin", "auxclk"
+
+This node provides two clocks. The clock at index 0 is the USB 2.0 PHY 48MHz
+clock and the clock at index 1 is the USB 1.1 PHY 48MHz clock.
+
+eHRPWM Time Base Clock (TBCLK)
+------------------------------
+Required properties:
+- compatible: shall be "ti,da830-tbclksync".
+- #clock-cells: from common clock binding; shall be set to 0.
+- clocks: phandle to the parent clock
+- clock-names: shall be "fck"
+
+PLL DIV4.5 divider
+------------------
+Required properties:
+- compatible: shall be "ti,da830-div4p5ena".
+- #clock-cells: from common clock binding; shall be set to 0.
+- clocks: phandle to the parent clock
+- clock-names: shall be "pll0_pllout"
+
+EMIFA clock source (ASYNC1)
+---------------------------
+Required properties:
+- compatible: shall be "ti,da850-async1-clksrc".
+- #clock-cells: from common clock binding; shall be set to 0.
+- clocks: phandles to the parent clocks corresponding to clock-names
+- clock-names: shall be "pll0_sysclk3", "div4.5"
+
+ASYNC3 clock source
+-------------------
+Required properties:
+- compatible: shall be "ti,da850-async3-clksrc".
+- #clock-cells: from common clock binding; shall be set to 0.
+- clocks: phandles to the parent clocks corresponding to clock-names
+- clock-names: shall be "pll0_sysclk2", "pll1_sysclk2"
+
+Examples:
+
+ cfgchip: syscon@1417c {
+ compatible = "ti,da830-cfgchip", "syscon", "simple-mfd";
+ reg = <0x1417c 0x14>;
+
+ usb_phy_clk: usb-phy-clocks {
+ compatible = "ti,da830-usb-phy-clocks";
+ #clock-cells = <1>;
+ clocks = <&psc1 1>, <&usb_refclkin>, <&pll0_auxclk>;
+ clock-names = "fck", "usb_refclkin", "auxclk";
+ };
+ ehrpwm_tbclk: ehrpwm_tbclk {
+ compatible = "ti,da830-tbclksync";
+ #clock-cells = <0>;
+ clocks = <&psc1 17>;
+ clock-names = "fck";
+ };
+ div4p5_clk: div4.5 {
+ compatible = "ti,da830-div4p5ena";
+ #clock-cells = <0>;
+ clocks = <&pll0_pllout>;
+ clock-names = "pll0_pllout";
+ };
+ async1_clk: async1 {
+ compatible = "ti,da850-async1-clksrc";
+ #clock-cells = <0>;
+ clocks = <&pll0_sysclk 3>, <&div4p5_clk>;
+ clock-names = "pll0_sysclk3", "div4.5";
+ };
+ async3_clk: async3 {
+ compatible = "ti,da850-async3-clksrc";
+ #clock-cells = <0>;
+ clocks = <&pll0_sysclk 2>, <&pll1_sysclk 2>;
+ clock-names = "pll0_sysclk2", "pll1_sysclk2";
+ };
+ };
+
+Also see:
+- Documentation/devicetree/bindings/clock/clock-bindings.txt
+
diff --git a/Documentation/devicetree/bindings/clock/ti/davinci/pll.txt b/Documentation/devicetree/bindings/clock/ti/davinci/pll.txt
new file mode 100644
index 000000000000..36998e184821
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti/davinci/pll.txt
@@ -0,0 +1,96 @@
+Binding for TI DaVinci PLL Controllers
+
+The PLL provides clocks to most of the components on the SoC. In addition
+to the PLL itself, this controller also contains bypasses, gates, dividers,
+an multiplexers for various clock signals.
+
+Required properties:
+- compatible: shall be one of:
+ - "ti,da850-pll0" for PLL0 on DA850/OMAP-L138/AM18XX
+ - "ti,da850-pll1" for PLL1 on DA850/OMAP-L138/AM18XX
+- reg: physical base address and size of the controller's register area.
+- clocks: phandles corresponding to the clock names
+- clock-names: names of the clock sources - depends on compatible string
+ - for "ti,da850-pll0", shall be "clksrc", "extclksrc"
+ - for "ti,da850-pll1", shall be "clksrc"
+
+Optional properties:
+- ti,clkmode-square-wave: Indicates that the the board is supplying a square
+ wave input on the OSCIN pin instead of using a crystal oscillator.
+ This property is only valid when compatible = "ti,da850-pll0".
+
+
+Optional child nodes:
+
+pllout
+ Describes the main PLL clock output (before POSTDIV). The node name must
+ be "pllout".
+
+ Required properties:
+ - #clock-cells: shall be 0
+
+sysclk
+ Describes the PLLDIVn divider clocks that provide the SYSCLKn clock
+ domains. The node name must be "sysclk". Consumers of this node should
+ use "n" in "SYSCLKn" as the index parameter for the clock cell.
+
+ Required properties:
+ - #clock-cells: shall be 1
+
+auxclk
+ Describes the AUXCLK output of the PLL. The node name must be "auxclk".
+ This child node is only valid when compatible = "ti,da850-pll0".
+
+ Required properties:
+ - #clock-cells: shall be 0
+
+obsclk
+ Describes the OBSCLK output of the PLL. The node name must be "obsclk".
+
+ Required properties:
+ - #clock-cells: shall be 0
+
+
+Examples:
+
+ pll0: clock-controller@11000 {
+ compatible = "ti,da850-pll0";
+ reg = <0x11000 0x1000>;
+ clocks = <&ref_clk>, <&pll1_sysclk 3>;
+ clock-names = "clksrc", "extclksrc";
+ ti,clkmode-square-wave;
+
+ pll0_pllout: pllout {
+ #clock-cells = <0>;
+ };
+
+ pll0_sysclk: sysclk {
+ #clock-cells = <1>;
+ };
+
+ pll0_auxclk: auxclk {
+ #clock-cells = <0>;
+ };
+
+ pll0_obsclk: obsclk {
+ #clock-cells = <0>;
+ };
+ };
+
+ pll1: clock-controller@21a000 {
+ compatible = "ti,da850-pll1";
+ reg = <0x21a000 0x1000>;
+ clocks = <&ref_clk>;
+ clock-names = "clksrc";
+
+ pll0_sysclk: sysclk {
+ #clock-cells = <1>;
+ };
+
+ pll0_obsclk: obsclk {
+ #clock-cells = <0>;
+ };
+ };
+
+Also see:
+- Documentation/devicetree/bindings/clock/clock-bindings.txt
diff --git a/Documentation/devicetree/bindings/clock/ti/davinci/psc.txt b/Documentation/devicetree/bindings/clock/ti/davinci/psc.txt
new file mode 100644
index 000000000000..dae4ad8e198c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti/davinci/psc.txt
@@ -0,0 +1,71 @@
+Binding for TI DaVinci Power Sleep Controller (PSC)
+
+The PSC provides power management, clock gating and reset functionality. It is
+primarily used for clocking.
+
+Required properties:
+- compatible: shall be one of:
+ - "ti,da850-psc0" for PSC0 on DA850/OMAP-L138/AM18XX
+ - "ti,da850-psc1" for PSC1 on DA850/OMAP-L138/AM18XX
+- reg: physical base address and size of the controller's register area
+- #clock-cells: from common clock binding; shall be set to 1
+- #power-domain-cells: from generic power domain binding; shall be set to 1.
+- clocks: phandles to clocks corresponding to the clock-names property
+- clock-names: list of parent clock names - depends on compatible value
+ - for "ti,da850-psc0", shall be "pll0_sysclk1", "pll0_sysclk2",
+ "pll0_sysclk4", "pll0_sysclk6", "async1"
+ - for "ti,da850-psc1", shall be "pll0_sysclk2", "pll0_sysclk4", "async3"
+
+Optional properties:
+- #reset-cells: from reset binding; shall be set to 1 - only applicable when
+ at least one local domain provides a local reset.
+
+Consumers:
+
+ Clock, power domain and reset consumers shall use the local power domain
+ module ID (LPSC) as the index corresponding to the clock cell. Refer to
+ the device-specific datasheet to find these numbers. NB: Most local
+ domains only provide a clock/power domain and not a reset.
+
+Examples:
+
+ psc0: clock-controller@10000 {
+ compatible = "ti,da850-psc0";
+ reg = <0x10000 0x1000>;
+ #clock-cells = <1>;
+ #power-domain-cells = <1>;
+ #reset-cells = <1>;
+ clocks = <&pll0_sysclk 1>, <&pll0_sysclk 2>,
+ <&pll0_sysclk 4>, <&pll0_sysclk 6>, <&async1_clk>;
+ clock_names = "pll0_sysclk1", "pll0_sysclk2",
+ "pll0_sysclk4", "pll0_sysclk6", "async1";
+ };
+ psc1: clock-controller@227000 {
+ compatible = "ti,da850-psc1";
+ reg = <0x227000 0x1000>;
+ #clock-cells = <1>;
+ #power-domain-cells = <1>;
+ clocks = <&pll0_sysclk 2>, <&pll0_sysclk 4>, <&async3_clk>;
+ clock_names = "pll0_sysclk2", "pll0_sysclk4", "async3";
+ };
+
+ /* consumer */
+ dsp: dsp@11800000 {
+ compatible = "ti,da850-dsp";
+ reg = <0x11800000 0x40000>,
+ <0x11e00000 0x8000>,
+ <0x11f00000 0x8000>,
+ <0x01c14044 0x4>,
+ <0x01c14174 0x8>;
+ reg-names = "l2sram", "l1pram", "l1dram", "host1cfg", "chipsig";
+ interrupt-parent = <&intc>;
+ interrupts = <28>;
+ clocks = <&psc0 15>;
+ power-domains = <&psc0 15>;
+ resets = <&psc0 15>;
+ };
+
+Also see:
+- Documentation/devicetree/bindings/clock/clock-bindings.txt
+- Documentation/devicetree/bindings/power/power_domain.txt
+- Documentation/devicetree/bindings/reset/reset.txt
diff --git a/Documentation/devicetree/bindings/clock/ti/divider.txt b/Documentation/devicetree/bindings/clock/ti/divider.txt
index 35a6f5c7e5c2..9b13b32974f9 100644
--- a/Documentation/devicetree/bindings/clock/ti/divider.txt
+++ b/Documentation/devicetree/bindings/clock/ti/divider.txt
@@ -75,6 +75,9 @@ Optional properties:
- ti,invert-autoidle-bit : autoidle is enabled by setting the bit to 0,
see [2]
- ti,set-rate-parent : clk_set_rate is propagated to parent
+- ti,latch-bit : latch the divider value to HW, only needed if the register
+ access requires this. As an example dra76x DPLL_GMAC H14 divider implements
+ such behavior.
Examples:
dpll_usb_m2_ck: dpll_usb_m2_ck@4a008190 {
diff --git a/Documentation/devicetree/bindings/clock/ti/mux.txt b/Documentation/devicetree/bindings/clock/ti/mux.txt
index 2d0d170f8001..eec8994b9be8 100644
--- a/Documentation/devicetree/bindings/clock/ti/mux.txt
+++ b/Documentation/devicetree/bindings/clock/ti/mux.txt
@@ -48,6 +48,9 @@ Optional properties:
zero
- ti,set-rate-parent : clk_set_rate is propagated to parent clock,
not supported by the composite-mux-clock subtype
+- ti,latch-bit : latch the mux value to HW, only needed if the register
+ access requires this. As an example, dra7x DPLL_GMAC H14 muxing
+ implements such behavior.
Examples:
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
index 1fd5d69647ca..ffadb7c6f1f3 100644
--- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
+++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
@@ -11,6 +11,8 @@ Required Properties:
the device is compatible with the R-Car Gen2 VMSA-compatible IPMMU.
- "renesas,ipmmu-r8a73a4" for the R8A73A4 (R-Mobile APE6) IPMMU.
+ - "renesas,ipmmu-r8a7743" for the R8A7743 (RZ/G1M) IPMMU.
+ - "renesas,ipmmu-r8a7745" for the R8A7745 (RZ/G1E) IPMMU.
- "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU.
- "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU.
- "renesas,ipmmu-r8a7793" for the R8A7793 (R-Car M2-N) IPMMU.
@@ -19,7 +21,8 @@ Required Properties:
- "renesas,ipmmu-r8a7796" for the R8A7796 (R-Car M3-W) IPMMU.
- "renesas,ipmmu-r8a77970" for the R8A77970 (R-Car V3M) IPMMU.
- "renesas,ipmmu-r8a77995" for the R8A77995 (R-Car D3) IPMMU.
- - "renesas,ipmmu-vmsa" for generic R-Car Gen2 VMSA-compatible IPMMU.
+ - "renesas,ipmmu-vmsa" for generic R-Car Gen2 or RZ/G1 VMSA-compatible
+ IPMMU.
- reg: Base address and size of the IPMMU registers.
- interrupts: Specifiers for the MMU fault interrupts. For instances that
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
index 2098f7732264..6ecefea1c6f9 100644
--- a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
@@ -14,6 +14,11 @@ Required properties:
"single-master" device, and needs no additional information
to associate with its master device. See:
Documentation/devicetree/bindings/iommu/iommu.txt
+- clocks : A list of clocks required for the IOMMU to be accessible by
+ the host CPU.
+- clock-names : Should contain the following:
+ "iface" - Main peripheral bus clock (PCLK/HCL) (required)
+ "aclk" - AXI bus clock (required)
Optional properties:
- rockchip,disable-mmu-reset : Don't use the mmu reset operation.
@@ -27,5 +32,7 @@ Example:
reg = <0xff940300 0x100>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vopl_mmu";
+ clocks = <&cru ACLK_VOP1>, <&cru HCLK_VOP1>;
+ clock-names = "aclk", "iface";
#iommu-cells = <0>;
};
diff --git a/Documentation/devicetree/bindings/pwm/ingenic,jz47xx-pwm.txt b/Documentation/devicetree/bindings/pwm/ingenic,jz47xx-pwm.txt
new file mode 100644
index 000000000000..7d9d3f90641b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/ingenic,jz47xx-pwm.txt
@@ -0,0 +1,25 @@
+Ingenic JZ47xx PWM Controller
+=============================
+
+Required properties:
+- compatible: One of:
+ * "ingenic,jz4740-pwm"
+ * "ingenic,jz4770-pwm"
+ * "ingenic,jz4780-pwm"
+- #pwm-cells: Should be 3. See pwm.txt in this directory for a description
+ of the cells format.
+- clocks : phandle to the external clock.
+- clock-names : Should be "ext".
+
+
+Example:
+
+ pwm: pwm@10002000 {
+ compatible = "ingenic,jz4740-pwm";
+ reg = <0x10002000 0x1000>;
+
+ #pwm-cells = <3>;
+
+ clocks = <&ext>;
+ clock-names = "ext";
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt b/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt
index f8338d11fd2b..bd23302e84be 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt
@@ -7,6 +7,8 @@ See ../mfd/stm32-lptimer.txt for details about the parent node.
Required parameters:
- compatible: Must be "st,stm32-pwm-lp".
+- #pwm-cells: Should be set to 3. This PWM chip uses the default 3 cells
+ bindings defined in pwm.txt.
Optional properties:
- pinctrl-names: Set to "default".
@@ -18,6 +20,7 @@ Example:
...
pwm {
compatible = "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
pinctrl-names = "default";
pinctrl-0 = <&lppwm1_pins>;
};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt
index 51ff54c8b8ef..2a1affbff45e 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt
@@ -7,6 +7,8 @@ Required properties:
- "allwinner,sun5i-a13-pwm"
- "allwinner,sun7i-a20-pwm"
- "allwinner,sun8i-h3-pwm"
+ - "allwinner,sun50i-a64-pwm", "allwinner,sun5i-a13-pwm"
+ - "allwinner,sun50i-h5-pwm", "allwinner,sun5i-a13-pwm"
- reg: physical base address and length of the controller's registers
- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
the cells format.
diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
index 74c118015980..35a3b9761ee5 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
+++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
@@ -2,6 +2,8 @@
Required Properties:
- compatible: should be "renesas,pwm-rcar" and one of the following.
+ - "renesas,pwm-r8a7743": for RZ/G1M
+ - "renesas,pwm-r8a7745": for RZ/G1E
- "renesas,pwm-r8a7778": for R-Car M1A
- "renesas,pwm-r8a7779": for R-Car H1
- "renesas,pwm-r8a7790": for R-Car H2
@@ -9,6 +11,7 @@ Required Properties:
- "renesas,pwm-r8a7794": for R-Car E2
- "renesas,pwm-r8a7795": for R-Car H3
- "renesas,pwm-r8a7796": for R-Car M3-W
+ - "renesas,pwm-r8a77965": for R-Car M3-N
- "renesas,pwm-r8a77995": for R-Car D3
- reg: base address and length of the registers block for the PWM.
- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
@@ -17,13 +20,15 @@ Required Properties:
- pinctrl-0: phandle, referring to a default pin configuration node.
- pinctrl-names: Set to "default".
-Example: R8A7790 (R-Car H2) PWM Timer node
+Example: R8A7743 (RZ/G1M) PWM Timer node
pwm0: pwm@e6e30000 {
- compatible = "renesas,pwm-r8a7790", "renesas,pwm-rcar";
+ compatible = "renesas,pwm-r8a7743", "renesas,pwm-rcar";
reg = <0 0xe6e30000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
#pwm-cells = <2>;
- clocks = <&mstp5_clks R8A7790_CLK_PWM>;
pinctrl-0 = <&pwm0_pins>;
pinctrl-names = "default";
};
diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt
index 1aadc804dae4..d53a16715da6 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt
@@ -3,10 +3,12 @@
Required Properties:
- compatible: should be one of the following.
- - "renesas,tpu-r8a73a4": for R8A77A4 (R-Mobile APE6) compatible PWM controller.
+ - "renesas,tpu-r8a73a4": for R8A73A4 (R-Mobile APE6) compatible PWM controller.
- "renesas,tpu-r8a7740": for R8A7740 (R-Mobile A1) compatible PWM controller.
+ - "renesas,tpu-r8a7743": for R8A7743 (RZ/G1M) compatible PWM controller.
+ - "renesas,tpu-r8a7745": for R8A7745 (RZ/G1E) compatible PWM controller.
- "renesas,tpu-r8a7790": for R8A7790 (R-Car H2) compatible PWM controller.
- - "renesas,tpu": for generic R-Car TPU PWM controller.
+ - "renesas,tpu": for generic R-Car and RZ/G1 TPU PWM controller.
- reg: Base address and length of each memory resource used by the PWM
controller hardware module.
@@ -18,10 +20,10 @@ Required Properties:
Please refer to pwm.txt in this directory for details of the common PWM bindings
used by client devices.
-Example: R8A7740 (R-Car A1) TPU controller node
+Example: R8A7740 (R-Mobile A1) TPU controller node
tpu: pwm@e6600000 {
compatible = "renesas,tpu-r8a7740", "renesas,tpu";
- reg = <0xe6600000 0x100>;
+ reg = <0xe6600000 0x148>;
#pwm-cells = <3>;
};
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt
index 107280ef0025..adc6b76fcb3a 100644
--- a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt
@@ -11,6 +11,7 @@ Optional properties:
detail please see: Documentation/devicetree/bindings/regmap/regmap.txt.
- fsl,ext-reset-output: If present the watchdog device is configured to
assert its external reset (WDOG_B) instead of issuing a software reset.
+- timeout-sec : Contains the watchdog timeout in seconds
Examples:
@@ -19,4 +20,5 @@ wdt@73f98000 {
reg = <0x73f98000 0x4000>;
interrupts = <58>;
big-endian;
+ timeout-sec = <20>;
};
diff --git a/Documentation/devicetree/bindings/watchdog/meson-wdt.txt b/Documentation/devicetree/bindings/watchdog/meson-wdt.txt
index 8a6d84cb36c9..7588cc3971bf 100644
--- a/Documentation/devicetree/bindings/watchdog/meson-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/meson-wdt.txt
@@ -9,9 +9,13 @@ Required properties:
"amlogic,meson8m2-wdt" and "amlogic,meson8b-wdt" on Meson8m2 SoCs
- reg : Specifies base physical address and size of the registers.
+Optional properties:
+- timeout-sec: contains the watchdog timeout in seconds.
+
Example:
wdt: watchdog@c1109900 {
compatible = "amlogic,meson6-wdt";
reg = <0xc1109900 0x8>;
+ timeout-sec = <10>;
};
diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
index 5b38a30e608c..859dee167b91 100644
--- a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
@@ -11,9 +11,13 @@ Required properties:
- reg : Specifies base physical address and size of the registers.
+Optional properties:
+- timeout-sec: contains the watchdog timeout in seconds.
+
Example:
wdt: watchdog@10000000 {
compatible = "mediatek,mt6589-wdt";
reg = <0x10000000 0x18>;
+ timeout-sec = <10>;
};
diff --git a/Documentation/devicetree/bindings/watchdog/nuvoton,npcm-wdt.txt b/Documentation/devicetree/bindings/watchdog/nuvoton,npcm-wdt.txt
new file mode 100644
index 000000000000..6d593003c933
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/nuvoton,npcm-wdt.txt
@@ -0,0 +1,28 @@
+Nuvoton NPCM Watchdog
+
+Nuvoton NPCM timer module provides five 24-bit timer counters, and a watchdog.
+The watchdog supports a pre-timeout interrupt that fires 10ms before the
+expiry.
+
+Required properties:
+- compatible : "nuvoton,npcm750-wdt" for NPCM750 (Poleg).
+- reg : Offset and length of the register set for the device.
+- interrupts : Contain the timer interrupt with flags for
+ falling edge.
+
+Required clocking property, have to be one of:
+- clocks : phandle of timer reference clock.
+- clock-frequency : The frequency in Hz of the clock that drives the NPCM7xx
+ timer (usually 25000000).
+
+Optional properties:
+- timeout-sec : Contains the watchdog timeout in seconds
+
+Example:
+
+timer@f000801c {
+ compatible = "nuvoton,npcm750-wdt";
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0xf000801c 0x4>;
+ clocks = <&clk NPCM7XX_CLK_TIMER>;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt b/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt
index 9cbc76c89b2b..0dce5e3100b4 100644
--- a/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/sirfsoc_wdt.txt
@@ -5,10 +5,14 @@ Required properties:
- reg: Address range of tick timer/WDT register set
- interrupts: interrupt number to the cpu
+Optional properties:
+- timeout-sec : Contains the watchdog timeout in seconds
+
Example:
timer@b0020000 {
compatible = "sirf,prima2-tick";
reg = <0xb0020000 0x1000>;
interrupts = <0>;
+ timeout-sec = <30>;
};
diff --git a/Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt b/Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt
index 04fc368d828f..ed11ce0ac836 100644
--- a/Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt
@@ -8,9 +8,13 @@ Required properties:
"allwinner,sun50i-a64-wdt","allwinner,sun6i-a31-wdt"
- reg : Specifies base physical address and size of the registers.
+Optional properties:
+- timeout-sec : Contains the watchdog timeout in seconds
+
Example:
wdt: watchdog@1c20c90 {
compatible = "allwinner,sun4i-a10-wdt";
reg = <0x01c20c90 0x10>;
+ timeout-sec = <10>;
};
diff --git a/Documentation/filesystems/afs.txt b/Documentation/filesystems/afs.txt
index c5254f6d234d..8c6ea7b41048 100644
--- a/Documentation/filesystems/afs.txt
+++ b/Documentation/filesystems/afs.txt
@@ -11,7 +11,7 @@ Contents:
- Proc filesystem.
- The cell database.
- Security.
- - Examples.
+ - The @sys substitution.
========
@@ -230,3 +230,29 @@ If a file is opened with a particular key and then the file descriptor is
passed to a process that doesn't have that key (perhaps over an AF_UNIX
socket), then the operations on the file will be made with key that was used to
open the file.
+
+
+=====================
+THE @SYS SUBSTITUTION
+=====================
+
+The list of up to 16 @sys substitutions for the current network namespace can
+be configured by writing a list to /proc/fs/afs/sysname:
+
+ [root@andromeda ~]# echo foo amd64_linux_26 >/proc/fs/afs/sysname
+
+or cleared entirely by writing an empty list:
+
+ [root@andromeda ~]# echo >/proc/fs/afs/sysname
+
+The current list for current network namespace can be retrieved by:
+
+ [root@andromeda ~]# cat /proc/fs/afs/sysname
+ foo
+ amd64_linux_26
+
+When @sys is being substituted for, each element of the list is tried in the
+order given.
+
+By default, the list will contain one item that conforms to the pattern
+"<arch>_linux_26", amd64 being the name for x86_64.
diff --git a/Documentation/filesystems/gfs2-glocks.txt b/Documentation/filesystems/gfs2-glocks.txt
index 1fb12f9dfe48..7059623635b2 100644
--- a/Documentation/filesystems/gfs2-glocks.txt
+++ b/Documentation/filesystems/gfs2-glocks.txt
@@ -100,14 +100,15 @@ indicates that it is caching uptodate data.
Glock locking order within GFS2:
- 1. i_mutex (if required)
+ 1. i_rwsem (if required)
2. Rename glock (for rename only)
3. Inode glock(s)
(Parents before children, inodes at "same level" with same parent in
lock number order)
4. Rgrp glock(s) (for (de)allocation operations)
5. Transaction glock (via gfs2_trans_begin) for non-read operations
- 6. Page lock (always last, very important!)
+ 6. i_rw_mutex (if required)
+ 7. Page lock (always last, very important!)
There are two glocks per inode. One deals with access to the inode
itself (locking order as above), and the other, known as the iopen
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index 6ea1e64d1464..961b287ef323 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -14,9 +14,13 @@ The result will inevitably fail to look exactly like a normal
filesystem for various technical reasons. The expectation is that
many use cases will be able to ignore these differences.
-This approach is 'hybrid' because the objects that appear in the
-filesystem do not all appear to belong to that filesystem. In many
-cases an object accessed in the union will be indistinguishable
+
+Overlay objects
+---------------
+
+The overlay filesystem approach is 'hybrid', because the objects that
+appear in the filesystem do not always appear to belong to that filesystem.
+In many cases, an object accessed in the union will be indistinguishable
from accessing the corresponding object from the original filesystem.
This is most obvious from the 'st_dev' field returned by stat(2).
@@ -34,6 +38,19 @@ make the overlay mount more compliant with filesystem scanners and
overlay objects will be distinguishable from the corresponding
objects in the original filesystem.
+On 64bit systems, even if all overlay layers are not on the same
+underlying filesystem, the same compliant behavior could be achieved
+with the "xino" feature. The "xino" feature composes a unique object
+identifier from the real object st_ino and an underlying fsid index.
+If all underlying filesystems support NFS file handles and export file
+handles with 32bit inode number encoding (e.g. ext4), overlay filesystem
+will use the high inode number bits for fsid. Even when the underlying
+filesystem uses 64bit inode numbers, users can still enable the "xino"
+feature with the "-o xino=on" overlay mount option. That is useful for the
+case of underlying filesystems like xfs and tmpfs, which use 64bit inode
+numbers, but are very unlikely to use the high inode number bit.
+
+
Upper and Lower
---------------
@@ -290,10 +307,19 @@ Non-standard behavior
---------------------
The copy_up operation essentially creates a new, identical file and
-moves it over to the old name. The new file may be on a different
-filesystem, so both st_dev and st_ino of the file may change.
+moves it over to the old name. Any open files referring to this inode
+will access the old data.
+
+The new file may be on a different filesystem, so both st_dev and st_ino
+of the real file may change. The values of st_dev and st_ino returned by
+stat(2) on an overlay object are often not the same as the real file
+stat(2) values to prevent the values from changing on copy_up.
-Any open files referring to this inode will access the old data.
+Unless "xino" feature is enabled, when overlay layers are not all on the
+same underlying filesystem, the value of st_dev may be different for two
+non-directory objects in the same overlay filesystem and the value of
+st_ino for directory objects may be non persistent and could change even
+while the overlay filesystem is still mounted.
Unless "inode index" feature is enabled, if a file with multiple hard
links is copied up, then this will "break" the link. Changes will not be
@@ -302,6 +328,7 @@ propagated to other names referring to the same inode.
Unless "redirect_dir" feature is enabled, rename(2) on a lower or merged
directory will fail with EXDEV.
+
Changes to underlying filesystems
---------------------------------
diff --git a/Documentation/process/adding-syscalls.rst b/Documentation/process/adding-syscalls.rst
index 314c8bf6f2a2..0d4f29bc798b 100644
--- a/Documentation/process/adding-syscalls.rst
+++ b/Documentation/process/adding-syscalls.rst
@@ -360,7 +360,7 @@ First, the entry in ``arch/x86/entry/syscalls/syscall_32.tbl`` gets an extra
column to indicate that a 32-bit userspace program running on a 64-bit kernel
should hit the compat entry point::
- 380 i386 xyzzy sys_xyzzy compat_sys_xyzzy
+ 380 i386 xyzzy sys_xyzzy __ia32_compat_sys_xyzzy
Second, you need to figure out what should happen for the x32 ABI version of
the new system call. There's a choice here: the layout of the arguments
@@ -373,7 +373,7 @@ the compatibility wrapper::
333 64 xyzzy sys_xyzzy
...
- 555 x32 xyzzy compat_sys_xyzzy
+ 555 x32 xyzzy __x32_compat_sys_xyzzy
If no pointers are involved, then it is preferable to re-use the 64-bit system
call for the x32 ABI (and consequently the entry in
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index bb9a0a53e76b..911399730c1c 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -255,6 +255,7 @@ temperature) and throttle appropriate devices.
2. sysfs attributes structure
RO read only value
+WO write only value
RW read/write value
Thermal sysfs attributes will be represented under /sys/class/thermal.
@@ -286,6 +287,11 @@ Thermal cooling device sys I/F, created once it's registered:
|---type: Type of the cooling device(processor/fan/...)
|---max_state: Maximum cooling state of the cooling device
|---cur_state: Current cooling state of the cooling device
+ |---stats: Directory containing cooling device's statistics
+ |---stats/reset: Writing any value resets the statistics
+ |---stats/time_in_state_ms: Time (msec) spent in various cooling states
+ |---stats/total_trans: Total number of times cooling state is changed
+ |---stats/trans_table: Cooing state transition table
Then next two dynamic attributes are created/removed in pairs. They represent
@@ -490,6 +496,31 @@ cur_state
- cur_state == max_state means the maximum cooling.
RW, Required
+stats/reset
+ Writing any value resets the cooling device's statistics.
+ WO, Required
+
+stats/time_in_state_ms:
+ The amount of time spent by the cooling device in various cooling
+ states. The output will have "<state> <time>" pair in each line, which
+ will mean this cooling device spent <time> msec of time at <state>.
+ Output will have one line for each of the supported states. usertime
+ units here is 10mS (similar to other time exported in /proc).
+ RO, Required
+
+stats/total_trans:
+ A single positive value showing the total number of times the state of a
+ cooling device is changed.
+ RO, Required
+
+stats/trans_table:
+ This gives fine grained information about all the cooling state
+ transitions. The cat output here is a two dimensional matrix, where an
+ entry <i,j> (row i, column j) represents the number of transitions from
+ State_i to State_j. If the transition table is bigger than PAGE_SIZE,
+ reading this will return an -EFBIG error.
+ RO, Required
+
3. A simple implementation
ACPI thermal zone may support multiple trip points like critical, hot,
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index ea91cb61a602..5432a96d31ff 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -20,7 +20,7 @@ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
... unused hole ...
ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
-ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space (variable)
+ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space
[fixmap start] - ffffffffff5fffff kernel-internal fixmap range
ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
diff --git a/MAINTAINERS b/MAINTAINERS
index c7182d2a9f5c..0a1410d5a621 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -934,8 +934,8 @@ F: drivers/char/apm-emulation.c
APPARMOR SECURITY MODULE
M: John Johansen <john.johansen@canonical.com>
L: apparmor@lists.ubuntu.com (subscribers-only, general discussion)
-W: apparmor.wiki.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jj/apparmor-dev.git
+W: wiki.apparmor.net
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jj/linux-apparmor
S: Supported
F: security/apparmor/
F: Documentation/admin-guide/LSM/apparmor.rst
@@ -1232,10 +1232,15 @@ F: Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
ARM/ASPEED MACHINE SUPPORT
M: Joel Stanley <joel@jms.id.au>
-S: Maintained
+R: Andrew Jeffery <andrew@aj.id.au>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
+Q: https://patchwork.ozlabs.org/project/linux-aspeed/list/
+S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/joel/aspeed.git
F: arch/arm/mach-aspeed/
F: arch/arm/boot/dts/aspeed-*
-F: drivers/*/*aspeed*
+N: aspeed
ARM/ATMEL AT91 Clock Support
M: Boris Brezillon <boris.brezillon@bootlin.com>
@@ -1743,7 +1748,7 @@ F: arch/arm/mach-orion5x/ts78xx-*
ARM/OXNAS platform support
M: Neil Armstrong <narmstrong@baylibre.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L: linux-oxnas@lists.tuxfamily.org (moderated for non-subscribers)
+L: linux-oxnas@groups.io (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-oxnas/
F: arch/arm/boot/dts/ox8*.dts*
@@ -2421,7 +2426,6 @@ T: git git://github.com/ndyer/linux.git
S: Maintained
F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt
F: drivers/input/touchscreen/atmel_mxt_ts.c
-F: include/linux/platform_data/atmel_mxt_ts.h
ATMEL SAMA5D2 ADC DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
@@ -5838,7 +5842,7 @@ F: scripts/Makefile.gcc-plugins
F: Documentation/gcc-plugins.txt
GCOV BASED KERNEL PROFILING
-M: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+M: Peter Oberparleiter <oberpar@linux.ibm.com>
S: Maintained
F: kernel/gcov/
F: Documentation/dev-tools/gcov.rst
@@ -5906,6 +5910,11 @@ S: Supported
F: drivers/phy/
F: include/linux/phy/
+GENERIC PINCTRL I2C DEMULTIPLEXER DRIVER
+M: Wolfram Sang <wsa+renesas@sang-engineering.com>
+S: Supported
+F: drivers/i2c/muxes/i2c-demux-pinctrl.c
+
GENERIC PM DOMAINS
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Kevin Hilman <khilman@kernel.org>
@@ -6581,15 +6590,25 @@ W: https://i2c.wiki.kernel.org/
Q: https://patchwork.ozlabs.org/project/linux-i2c/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
S: Maintained
-F: Documentation/devicetree/bindings/i2c/
+F: Documentation/devicetree/bindings/i2c/i2c.txt
F: Documentation/i2c/
-F: drivers/i2c/
-F: drivers/i2c/*/
+F: drivers/i2c/*
F: include/linux/i2c.h
-F: include/linux/i2c-*.h
+F: include/linux/i2c-dev.h
+F: include/linux/i2c-smbus.h
F: include/uapi/linux/i2c.h
F: include/uapi/linux/i2c-*.h
+I2C SUBSYSTEM HOST DRIVERS
+L: linux-i2c@vger.kernel.org
+W: https://i2c.wiki.kernel.org/
+Q: https://patchwork.ozlabs.org/project/linux-i2c/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
+S: Odd Fixes
+F: Documentation/devicetree/bindings/i2c/
+F: drivers/i2c/algos/
+F: drivers/i2c/busses/
+
I2C-TAOS-EVM DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-i2c@vger.kernel.org
@@ -7763,7 +7782,7 @@ F: arch/powerpc/kernel/kvm*
KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
M: Christian Borntraeger <borntraeger@de.ibm.com>
-M: Janosch Frank <frankja@linux.vnet.ibm.com>
+M: Janosch Frank <frankja@linux.ibm.com>
R: David Hildenbrand <david@redhat.com>
R: Cornelia Huck <cohuck@redhat.com>
L: linux-s390@vger.kernel.org
@@ -11901,6 +11920,11 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git c
S: Supported
F: drivers/clk/renesas/
+RENESAS EMEV2 I2C DRIVER
+M: Wolfram Sang <wsa+renesas@sang-engineering.com>
+S: Supported
+F: drivers/i2c/busses/i2c-emev2.c
+
RENESAS ETHERNET DRIVERS
R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
L: netdev@vger.kernel.org
@@ -11916,6 +11940,12 @@ L: linux-iio@vger.kernel.org
S: Supported
F: drivers/iio/adc/rcar_gyro_adc.c
+RENESAS R-CAR I2C DRIVERS
+M: Wolfram Sang <wsa+renesas@sang-engineering.com>
+S: Supported
+F: drivers/i2c/busses/i2c-rcar.c
+F: drivers/i2c/busses/i2c-sh_mobile.c
+
RENESAS USB PHY DRIVER
M: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
L: linux-renesas-soc@vger.kernel.org
@@ -12119,16 +12149,16 @@ F: Documentation/s390/
F: Documentation/driver-api/s390-drivers.rst
S390 COMMON I/O LAYER
-M: Sebastian Ott <sebott@linux.vnet.ibm.com>
-M: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+M: Sebastian Ott <sebott@linux.ibm.com>
+M: Peter Oberparleiter <oberpar@linux.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: drivers/s390/cio/
S390 DASD DRIVER
-M: Stefan Haberland <sth@linux.vnet.ibm.com>
-M: Jan Hoeppner <hoeppner@linux.vnet.ibm.com>
+M: Stefan Haberland <sth@linux.ibm.com>
+M: Jan Hoeppner <hoeppner@linux.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
@@ -12143,8 +12173,8 @@ S: Supported
F: drivers/iommu/s390-iommu.c
S390 IUCV NETWORK LAYER
-M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
-M: Ursula Braun <ubraun@linux.vnet.ibm.com>
+M: Julian Wiedmann <jwi@linux.ibm.com>
+M: Ursula Braun <ubraun@linux.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
@@ -12153,15 +12183,15 @@ F: include/net/iucv/
F: net/iucv/
S390 NETWORK DRIVERS
-M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
-M: Ursula Braun <ubraun@linux.vnet.ibm.com>
+M: Julian Wiedmann <jwi@linux.ibm.com>
+M: Ursula Braun <ubraun@linux.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: drivers/s390/net/
S390 PCI SUBSYSTEM
-M: Sebastian Ott <sebott@linux.vnet.ibm.com>
+M: Sebastian Ott <sebott@linux.ibm.com>
M: Gerald Schaefer <gerald.schaefer@de.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -12171,8 +12201,8 @@ F: drivers/pci/hotplug/s390_pci_hpc.c
S390 VFIO-CCW DRIVER
M: Cornelia Huck <cohuck@redhat.com>
-M: Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
-M: Halil Pasic <pasic@linux.vnet.ibm.com>
+M: Dong Jia Shi <bjsdjshi@linux.ibm.com>
+M: Halil Pasic <pasic@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: kvm@vger.kernel.org
S: Supported
@@ -12188,8 +12218,8 @@ S: Supported
F: drivers/s390/crypto/
S390 ZFCP DRIVER
-M: Steffen Maier <maier@linux.vnet.ibm.com>
-M: Benjamin Block <bblock@linux.vnet.ibm.com>
+M: Steffen Maier <maier@linux.ibm.com>
+M: Benjamin Block <bblock@linux.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
@@ -12334,6 +12364,7 @@ M: Tomasz Figa <tomasz.figa@gmail.com>
M: Chanwoo Choi <cw00.choi@samsung.com>
S: Supported
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git
F: drivers/clk/samsung/
F: include/dt-bindings/clock/exynos*.h
F: Documentation/devicetree/bindings/clock/exynos*.txt
@@ -12625,7 +12656,7 @@ S: Maintained
F: drivers/misc/sgi-xp/
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
-M: Ursula Braun <ubraun@linux.vnet.ibm.com>
+M: Ursula Braun <ubraun@linux.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
@@ -13970,6 +14001,13 @@ F: arch/arm/mach-davinci/
F: drivers/i2c/busses/i2c-davinci.c
F: arch/arm/boot/dts/da850*
+TI DAVINCI SERIES CLOCK DRIVER
+M: David Lechner <david@lechnology.com>
+R: Sekhar Nori <nsekhar@ti.com>
+S: Maintained
+F: Documentation/devicetree/bindings/clock/ti/davinci/
+F: drivers/clk/davinci/
+
TI DAVINCI SERIES GPIO DRIVER
M: Keerthy <j-keerthy@ti.com>
L: linux-gpio@vger.kernel.org
@@ -14960,7 +14998,7 @@ F: include/uapi/linux/virtio_crypto.h
VIRTIO DRIVERS FOR S390
M: Cornelia Huck <cohuck@redhat.com>
-M: Halil Pasic <pasic@linux.vnet.ibm.com>
+M: Halil Pasic <pasic@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: virtualization@lists.linux-foundation.org
L: kvm@vger.kernel.org
diff --git a/Makefile b/Makefile
index c1a608a1bc4c..74567b0ec2f0 100644
--- a/Makefile
+++ b/Makefile
@@ -846,6 +846,9 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
# Require designated initializers for all marked structures
KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
+# change __FILE__ to the relative path from the srctree
+KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
+
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)
@@ -1615,6 +1618,8 @@ clean: $(clean-dirs)
-o -name '*.dwo' -o -name '*.lst' \
-o -name '*.su' \
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
+ -o -name '*.lex.c' -o -name '*.tab.[ch]' \
+ -o -name '*.asn1.[ch]' \
-o -name '*.symtypes' -o -name 'modules.order' \
-o -name modules.builtin -o -name '.tmp_*.o.*' \
-o -name .cache.mk \
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
index 22a4c5d4702f..a83c4f5e928b 100644
--- a/arch/arc/boot/dts/Makefile
+++ b/arch/arc/boot/dts/Makefile
@@ -9,8 +9,6 @@ endif
obj-y += $(builtindtb-y).dtb.o
dtb-y := $(builtindtb-y).dtb
-.SECONDARY: $(obj)/$(builtindtb-y).dtb.S
-
# for CONFIG_OF_ALL_DTBS test
dtstree := $(srctree)/$(src)
dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 3304e671918d..8de542c48ade 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -67,4 +67,4 @@ $(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl
$(call cmd,perl)
endif
-.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
+targets += sha256-core.S sha512-core.S
diff --git a/arch/arm/mach-npcm/npcm7xx.c b/arch/arm/mach-npcm/npcm7xx.c
index 5f7cd88103ef..c5f77d854c4f 100644
--- a/arch/arm/mach-npcm/npcm7xx.c
+++ b/arch/arm/mach-npcm/npcm7xx.c
@@ -17,4 +17,6 @@ static const char *const npcm7xx_dt_match[] = {
DT_MACHINE_START(NPCM7XX_DT, "NPCM7XX Chip family")
.atag_offset = 0x100,
.dt_compat = npcm7xx_dt_match,
+ .l2c_aux_val = 0x0,
+ .l2c_aux_mask = ~0x0,
MACHINE_END
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index 8df9f326f449..f35ac684b1c0 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -78,4 +78,4 @@ $(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl
$(call cmd,perlasm)
endif
-.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
+targets += sha256-core.S sha512-core.S
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 053d83e8db6f..0bcc98dbba56 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -565,4 +565,140 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
#endif
.endm
+ /*
+ * frame_push - Push @regcount callee saved registers to the stack,
+ * starting at x19, as well as x29/x30, and set x29 to
+ * the new value of sp. Add @extra bytes of stack space
+ * for locals.
+ */
+ .macro frame_push, regcount:req, extra
+ __frame st, \regcount, \extra
+ .endm
+
+ /*
+ * frame_pop - Pop the callee saved registers from the stack that were
+ * pushed in the most recent call to frame_push, as well
+ * as x29/x30 and any extra stack space that may have been
+ * allocated.
+ */
+ .macro frame_pop
+ __frame ld
+ .endm
+
+ .macro __frame_regs, reg1, reg2, op, num
+ .if .Lframe_regcount == \num
+ \op\()r \reg1, [sp, #(\num + 1) * 8]
+ .elseif .Lframe_regcount > \num
+ \op\()p \reg1, \reg2, [sp, #(\num + 1) * 8]
+ .endif
+ .endm
+
+ .macro __frame, op, regcount, extra=0
+ .ifc \op, st
+ .if (\regcount) < 0 || (\regcount) > 10
+ .error "regcount should be in the range [0 ... 10]"
+ .endif
+ .if ((\extra) % 16) != 0
+ .error "extra should be a multiple of 16 bytes"
+ .endif
+ .ifdef .Lframe_regcount
+ .if .Lframe_regcount != -1
+ .error "frame_push/frame_pop may not be nested"
+ .endif
+ .endif
+ .set .Lframe_regcount, \regcount
+ .set .Lframe_extra, \extra
+ .set .Lframe_local_offset, ((\regcount + 3) / 2) * 16
+ stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
+ mov x29, sp
+ .endif
+
+ __frame_regs x19, x20, \op, 1
+ __frame_regs x21, x22, \op, 3
+ __frame_regs x23, x24, \op, 5
+ __frame_regs x25, x26, \op, 7
+ __frame_regs x27, x28, \op, 9
+
+ .ifc \op, ld
+ .if .Lframe_regcount == -1
+ .error "frame_push/frame_pop may not be nested"
+ .endif
+ ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
+ .set .Lframe_regcount, -1
+ .endif
+ .endm
+
+/*
+ * Check whether to yield to another runnable task from kernel mode NEON code
+ * (which runs with preemption disabled).
+ *
+ * if_will_cond_yield_neon
+ * // pre-yield patchup code
+ * do_cond_yield_neon
+ * // post-yield patchup code
+ * endif_yield_neon <label>
+ *
+ * where <label> is optional, and marks the point where execution will resume
+ * after a yield has been performed. If omitted, execution resumes right after
+ * the endif_yield_neon invocation. Note that the entire sequence, including
+ * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
+ * is not defined.
+ *
+ * As a convenience, in the case where no patchup code is required, the above
+ * sequence may be abbreviated to
+ *
+ * cond_yield_neon <label>
+ *
+ * Note that the patchup code does not support assembler directives that change
+ * the output section, any use of such directives is undefined.
+ *
+ * The yield itself consists of the following:
+ * - Check whether the preempt count is exactly 1, in which case disabling
+ * preemption once will make the task preemptible. If this is not the case,
+ * yielding is pointless.
+ * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable
+ * kernel mode NEON (which will trigger a reschedule), and branch to the
+ * yield fixup code.
+ *
+ * This macro sequence may clobber all CPU state that is not guaranteed by the
+ * AAPCS to be preserved across an ordinary function call.
+ */
+
+ .macro cond_yield_neon, lbl
+ if_will_cond_yield_neon
+ do_cond_yield_neon
+ endif_yield_neon \lbl
+ .endm
+
+ .macro if_will_cond_yield_neon
+#ifdef CONFIG_PREEMPT
+ get_thread_info x0
+ ldr w1, [x0, #TSK_TI_PREEMPT]
+ ldr x0, [x0, #TSK_TI_FLAGS]
+ cmp w1, #PREEMPT_DISABLE_OFFSET
+ csel x0, x0, xzr, eq
+ tbnz x0, #TIF_NEED_RESCHED, .Lyield_\@ // needs rescheduling?
+ /* fall through to endif_yield_neon */
+ .subsection 1
+.Lyield_\@ :
+#else
+ .section ".discard.cond_yield_neon", "ax"
+#endif
+ .endm
+
+ .macro do_cond_yield_neon
+ bl kernel_neon_end
+ bl kernel_neon_begin
+ .endm
+
+ .macro endif_yield_neon, lbl
+ .ifnb \lbl
+ b \lbl
+ .else
+ b .Lyield_out_\@
+ .endif
+ .previous
+.Lyield_out_\@ :
+ .endm
+
#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index a311880feb0f..bc51b72fafd4 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -43,13 +43,12 @@
#define ARM64_SVE 22
#define ARM64_UNMAP_KERNEL_AT_EL0 23
#define ARM64_HARDEN_BRANCH_PREDICTOR 24
-#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
-#define ARM64_HAS_RAS_EXTN 26
-#define ARM64_WORKAROUND_843419 27
-#define ARM64_HAS_CACHE_IDC 28
-#define ARM64_HAS_CACHE_DIC 29
-#define ARM64_HW_DBM 30
+#define ARM64_HAS_RAS_EXTN 25
+#define ARM64_WORKAROUND_843419 26
+#define ARM64_HAS_CACHE_IDC 27
+#define ARM64_HAS_CACHE_DIC 28
+#define ARM64_HW_DBM 29
-#define ARM64_NCAPS 31
+#define ARM64_NCAPS 30
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index d53d40704416..f6648a3e4152 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -71,8 +71,6 @@ extern u32 __kvm_get_mdcr_el2(void);
extern u32 __init_stage2_translation(void);
-extern void __qcom_hyp_sanitize_btac_predictors(void);
-
#else /* __ASSEMBLY__ */
.macro get_host_ctxt reg, tmp
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 9b55a3f24be7..bf825f38d206 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -55,8 +55,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
-arm64-obj-$(CONFIG_KVM_INDIRECT_VECTORS)+= bpi.o
-
obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
head-y := head.o
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 78e1b0a70aaf..5bdda651bd05 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/kvm_host.h>
+#include <linux/preempt.h>
#include <linux/suspend.h>
#include <asm/cpufeature.h>
#include <asm/fixmap.h>
@@ -93,6 +94,8 @@ int main(void)
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
BLANK();
+ DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
+ BLANK();
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
deleted file mode 100644
index bb0b67722e86..000000000000
--- a/arch/arm64/kernel/bpi.S
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Contains CPU specific branch predictor invalidation sequences
- *
- * Copyright (C) 2018 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <linux/arm-smccc.h>
-
-#include <asm/alternative.h>
-#include <asm/mmu.h>
-
-.macro hyp_ventry
- .align 7
-1: .rept 27
- nop
- .endr
-/*
- * The default sequence is to directly branch to the KVM vectors,
- * using the computed offset. This applies for VHE as well as
- * !ARM64_HARDEN_EL2_VECTORS.
- *
- * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
- * with:
- *
- * stp x0, x1, [sp, #-16]!
- * movz x0, #(addr & 0xffff)
- * movk x0, #((addr >> 16) & 0xffff), lsl #16
- * movk x0, #((addr >> 32) & 0xffff), lsl #32
- * br x0
- *
- * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
- * See kvm_patch_vector_branch for details.
- */
-alternative_cb kvm_patch_vector_branch
- b __kvm_hyp_vector + (1b - 0b)
- nop
- nop
- nop
- nop
-alternative_cb_end
-.endm
-
-.macro generate_vectors
-0:
- .rept 16
- hyp_ventry
- .endr
- .org 0b + SZ_2K // Safety measure
-.endm
-
-
- .text
- .pushsection .hyp.text, "ax"
-
- .align 11
-ENTRY(__bp_harden_hyp_vecs_start)
- .rept BP_HARDEN_EL2_SLOTS
- generate_vectors
- .endr
-ENTRY(__bp_harden_hyp_vecs_end)
-
- .popsection
-
-ENTRY(__qcom_hyp_sanitize_link_stack_start)
- stp x29, x30, [sp, #-16]!
- .rept 16
- bl . + 4
- .endr
- ldp x29, x30, [sp], #16
-ENTRY(__qcom_hyp_sanitize_link_stack_end)
-
-.macro smccc_workaround_1 inst
- sub sp, sp, #(8 * 4)
- stp x2, x3, [sp, #(8 * 0)]
- stp x0, x1, [sp, #(8 * 2)]
- mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
- \inst #0
- ldp x2, x3, [sp, #(8 * 0)]
- ldp x0, x1, [sp, #(8 * 2)]
- add sp, sp, #(8 * 4)
-.endm
-
-ENTRY(__smccc_workaround_1_smc_start)
- smccc_workaround_1 smc
-ENTRY(__smccc_workaround_1_smc_end)
-
-ENTRY(__smccc_workaround_1_hvc_start)
- smccc_workaround_1 hvc
-ENTRY(__smccc_workaround_1_hvc_end)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 9262ec57f5ab..a900befadfe8 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -86,13 +86,9 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
-#ifdef CONFIG_KVM
-extern char __qcom_hyp_sanitize_link_stack_start[];
-extern char __qcom_hyp_sanitize_link_stack_end[];
+#ifdef CONFIG_KVM_INDIRECT_VECTORS
extern char __smccc_workaround_1_smc_start[];
extern char __smccc_workaround_1_smc_end[];
-extern char __smccc_workaround_1_hvc_start[];
-extern char __smccc_workaround_1_hvc_end[];
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end)
@@ -132,12 +128,8 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
spin_unlock(&bp_lock);
}
#else
-#define __qcom_hyp_sanitize_link_stack_start NULL
-#define __qcom_hyp_sanitize_link_stack_end NULL
#define __smccc_workaround_1_smc_start NULL
#define __smccc_workaround_1_smc_end NULL
-#define __smccc_workaround_1_hvc_start NULL
-#define __smccc_workaround_1_hvc_end NULL
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
@@ -145,7 +137,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
{
__this_cpu_write(bp_hardening_data.fn, fn);
}
-#endif /* CONFIG_KVM */
+#endif /* CONFIG_KVM_INDIRECT_VECTORS */
static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
bp_hardening_cb_t fn,
@@ -178,12 +170,25 @@ static void call_hvc_arch_workaround_1(void)
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
+static void qcom_link_stack_sanitization(void)
+{
+ u64 tmp;
+
+ asm volatile("mov %0, x30 \n"
+ ".rept 16 \n"
+ "bl . + 4 \n"
+ ".endr \n"
+ "mov x30, %0 \n"
+ : "=&r" (tmp));
+}
+
static void
enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
{
bp_hardening_cb_t cb;
void *smccc_start, *smccc_end;
struct arm_smccc_res res;
+ u32 midr = read_cpuid_id();
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
return;
@@ -198,8 +203,9 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
if ((int)res.a0 < 0)
return;
cb = call_hvc_arch_workaround_1;
- smccc_start = __smccc_workaround_1_hvc_start;
- smccc_end = __smccc_workaround_1_hvc_end;
+ /* This is a guest, no need to patch KVM vectors */
+ smccc_start = NULL;
+ smccc_end = NULL;
break;
case PSCI_CONDUIT_SMC:
@@ -216,30 +222,14 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
return;
}
+ if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
+ ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
+ cb = qcom_link_stack_sanitization;
+
install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
return;
}
-
-static void qcom_link_stack_sanitization(void)
-{
- u64 tmp;
-
- asm volatile("mov %0, x30 \n"
- ".rept 16 \n"
- "bl . + 4 \n"
- ".endr \n"
- "mov x30, %0 \n"
- : "=&r" (tmp));
-}
-
-static void
-qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry)
-{
- install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
- __qcom_hyp_sanitize_link_stack_start,
- __qcom_hyp_sanitize_link_stack_end);
-}
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
@@ -324,33 +314,23 @@ static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
- {},
-};
-
-static const struct midr_range qcom_bp_harden_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
{},
};
-static const struct arm64_cpu_capabilities arm64_bp_harden_list[] = {
- {
- CAP_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
- .cpu_enable = enable_smccc_arch_workaround_1,
- },
- {
- CAP_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
- .cpu_enable = qcom_enable_link_stack_sanitization,
- },
+#endif
+
+#ifdef CONFIG_HARDEN_EL2_VECTORS
+
+static const struct midr_range arm64_harden_el2_vectors[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
{},
};
#endif
-#ifndef ERRATA_MIDR_ALL_VERSIONS
-#define ERRATA_MIDR_ALL_VERSIONS(x) MIDR_ALL_VERSIONS(x)
-#endif
-
const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -495,25 +475,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
- .matches = multi_entry_cap_matches,
- .cpu_enable = multi_entry_cap_cpu_enable,
- .match_list = arm64_bp_harden_list,
- },
- {
- .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
- ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
+ .cpu_enable = enable_smccc_arch_workaround_1,
+ ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
},
#endif
#ifdef CONFIG_HARDEN_EL2_VECTORS
{
- .desc = "Cortex-A57 EL2 vector hardening",
- .capability = ARM64_HARDEN_EL2_VECTORS,
- ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
- },
- {
- .desc = "Cortex-A72 EL2 vector hardening",
+ .desc = "EL2 vector hardening",
.capability = ARM64_HARDEN_EL2_VECTORS,
- ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
},
#endif
{
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 1f458f7c3b44..e41a161d313a 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -209,15 +209,3 @@ alternative_endif
eret
ENDPROC(__fpsimd_guest_restore)
-
-ENTRY(__qcom_hyp_sanitize_btac_predictors)
- /**
- * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700)
- * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls
- * b15-b0: contains SiP functionID
- */
- movz x0, #0x1700
- movk x0, #0xc200, lsl #16
- smc #0
- ret
-ENDPROC(__qcom_hyp_sanitize_btac_predictors)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 87dfecce82b1..bffece27b5c1 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 - ARM Ltd
+ * Copyright (C) 2015-2018 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -24,6 +24,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/mmu.h>
.text
.pushsection .hyp.text, "ax"
@@ -237,3 +238,64 @@ ENTRY(__kvm_hyp_vector)
invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
valid_vect el1_error // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector)
+
+#ifdef CONFIG_KVM_INDIRECT_VECTORS
+.macro hyp_ventry
+ .align 7
+1: .rept 27
+ nop
+ .endr
+/*
+ * The default sequence is to directly branch to the KVM vectors,
+ * using the computed offset. This applies for VHE as well as
+ * !ARM64_HARDEN_EL2_VECTORS.
+ *
+ * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
+ * with:
+ *
+ * stp x0, x1, [sp, #-16]!
+ * movz x0, #(addr & 0xffff)
+ * movk x0, #((addr >> 16) & 0xffff), lsl #16
+ * movk x0, #((addr >> 32) & 0xffff), lsl #32
+ * br x0
+ *
+ * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
+ * See kvm_patch_vector_branch for details.
+ */
+alternative_cb kvm_patch_vector_branch
+ b __kvm_hyp_vector + (1b - 0b)
+ nop
+ nop
+ nop
+ nop
+alternative_cb_end
+.endm
+
+.macro generate_vectors
+0:
+ .rept 16
+ hyp_ventry
+ .endr
+ .org 0b + SZ_2K // Safety measure
+.endm
+
+ .align 11
+ENTRY(__bp_harden_hyp_vecs_start)
+ .rept BP_HARDEN_EL2_SLOTS
+ generate_vectors
+ .endr
+ENTRY(__bp_harden_hyp_vecs_end)
+
+ .popsection
+
+ENTRY(__smccc_workaround_1_smc_start)
+ sub sp, sp, #(8 * 4)
+ stp x2, x3, [sp, #(8 * 0)]
+ stp x0, x1, [sp, #(8 * 2)]
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
+ smc #0
+ ldp x2, x3, [sp, #(8 * 0)]
+ ldp x0, x1, [sp, #(8 * 2)]
+ add sp, sp, #(8 * 4)
+ENTRY(__smccc_workaround_1_smc_end)
+#endif
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 07b572173265..d9645236e474 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -472,16 +472,6 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
- if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
- u32 midr = read_cpuid_id();
-
- /* Apply BTAC predictors mitigation to all Falkor chips */
- if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
- ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
- __qcom_hyp_sanitize_btac_predictors();
- }
- }
-
fp_enabled = __fpsimd_enabled_nvhe();
__sysreg_save_state_nvhe(guest_ctxt);
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 114b93488193..5de871eb4a59 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -47,9 +47,10 @@ extern int pci_proc_domain(struct pci_bus *bus);
struct vm_area_struct;
-/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
-#define HAVE_PCI_MMAP 1
-#define arch_can_pci_mmap_io() 1
+/* Tell PCI code what kind of PCI resource mappings we support */
+#define HAVE_PCI_MMAP 1
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
+#define arch_can_pci_mmap_io() 1
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
size_t count);
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index e53b8532353c..db8b1fa83452 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -33,6 +33,8 @@ extern int mem_init_done;
#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */
#define pgprot_noncached(x) (x)
+#define pgprot_writecombine pgprot_noncached
+#define pgprot_device pgprot_noncached
#define __swp_type(x) (0)
#define __swp_offset(x) (0)
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index ae79e8638d50..161f9758c631 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -151,72 +151,22 @@ void pcibios_set_master(struct pci_dev *dev)
}
/*
- * Platform support for /proc/bus/pci/X/Y mmap()s,
- * modelled on the sparc64 implementation by Dave Miller.
- * -- paulus.
+ * Platform support for /proc/bus/pci/X/Y mmap()s.
*/
-/*
- * Adjust vm_pgoff of VMA such that it is the physical page offset
- * corresponding to the 32-bit pci bus offset for DEV requested by the user.
- *
- * Basically, the user finds the base address for his device which he wishes
- * to mmap. They read the 32-bit value from the config space base register,
- * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
- * offset parameter of mmap on /proc/bus/pci/XXX for that device.
- *
- * Returns negative error code on failure, zero on success.
- */
-static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
- resource_size_t *offset,
- enum pci_mmap_state mmap_state)
+int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- unsigned long io_offset = 0;
- int i, res_bit;
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ resource_size_t ioaddr = pci_resource_start(pdev, bar);
if (!hose)
- return NULL; /* should never happen */
-
- /* If memory, add on the PCI bridge address offset */
- if (mmap_state == pci_mmap_mem) {
-#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
- *offset += hose->pci_mem_offset;
-#endif
- res_bit = IORESOURCE_MEM;
- } else {
- io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
- *offset += io_offset;
- res_bit = IORESOURCE_IO;
- }
-
- /*
- * Check that the offset requested corresponds to one of the
- * resources of the device.
- */
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- struct resource *rp = &dev->resource[i];
- int flags = rp->flags;
+ return -EINVAL; /* should never happen */
- /* treat ROM as memory (should be already) */
- if (i == PCI_ROM_RESOURCE)
- flags |= IORESOURCE_MEM;
-
- /* Active and same type? */
- if ((flags & res_bit) == 0)
- continue;
-
- /* In the range of this resource? */
- if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
- continue;
-
- /* found it! construct the final physical address */
- if (mmap_state == pci_mmap_io)
- *offset += hose->io_base_phys - io_offset;
- return rp;
- }
+ /* Convert to an offset within this PCI controller */
+ ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
- return NULL;
+ vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
+ return 0;
}
/*
@@ -268,37 +218,6 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
return prot;
}
-/*
- * Perform the actual remap of the pages for a PCI device mapping, as
- * appropriate for this architecture. The region in the process to map
- * is described by vm_start and vm_end members of VMA, the base physical
- * address is found in vm_pgoff.
- * The pci device structure is provided so that architectures may make mapping
- * decisions on a per-device or per-bus basis.
- *
- * Returns a negative error code on failure, zero on success.
- */
-int pci_mmap_page_range(struct pci_dev *dev, int bar, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- resource_size_t offset =
- ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
- struct resource *rp;
- int ret;
-
- rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
- if (rp == NULL)
- return -EINVAL;
-
- vma->vm_pgoff = offset >> PAGE_SHIFT;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
-
- return ret;
-}
-
/* This provides legacy IO read access on a bus */
int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
{
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 1e4658eee13f..5a4875cac1ec 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -178,6 +178,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
/*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP.
+ * Note a difference with get_user_pages_fast: this always returns the
+ * number of pages pinned, 0 if no pages were pinned.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index 20e86209ef2e..ab88b6dd4679 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -336,9 +336,9 @@ static int __init nios2_time_init(struct device_node *timer)
return ret;
}
-void read_persistent_clock(struct timespec *ts)
+void read_persistent_clock64(struct timespec64 *ts)
{
- ts->tv_sec = mktime(2007, 1, 1, 0, 0, 0);
+ ts->tv_sec = mktime64(2007, 1, 1, 0, 0, 0);
ts->tv_nsec = 0;
}
diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h
index 9a3ee389631e..11c5a58ab333 100644
--- a/arch/openrisc/include/uapi/asm/unistd.h
+++ b/arch/openrisc/include/uapi/asm/unistd.h
@@ -17,8 +17,6 @@
* (at your option) any later version.
*/
-#define __ARCH_HAVE_MMU
-
#define sys_mmap2 sys_mmap_pgoff
#define __ARCH_WANT_RENAMEAT
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 7e0bb9836b58..fc5a574c3482 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -338,6 +338,7 @@ source "mm/Kconfig"
config COMPAT
def_bool y
depends on 64BIT
+ select COMPAT_BINFMT_ELF if BINFMT_ELF
config SYSVIPC_COMPAT
def_bool y
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index c22db5323244..57b8b2a2fd4e 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -193,6 +193,12 @@ struct compat_shmid64_ds {
};
/*
+ * The type of struct elf_prstatus.pr_reg in compatible core dumps.
+ */
+#define COMPAT_ELF_NGREG 80
+typedef compat_ulong_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
+
+/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index 382d75a2ee4f..f019d3ec0c1c 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -6,7 +6,7 @@
* ELF register definitions..
*/
-#include <asm/ptrace.h>
+#include <linux/types.h>
#define EM_PARISC 15
@@ -169,16 +169,12 @@ typedef struct elf64_fdesc {
__u64 gp;
} Elf64_Fdesc;
-#ifdef __KERNEL__
-
#ifdef CONFIG_64BIT
#define Elf_Fdesc Elf64_Fdesc
#else
#define Elf_Fdesc Elf32_Fdesc
#endif /*CONFIG_64BIT*/
-#endif /*__KERNEL__*/
-
/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */
#define PT_HP_TLS (PT_LOOS + 0x0)
@@ -213,44 +209,44 @@ typedef struct elf64_fdesc {
#define PF_HP_SBP 0x08000000
/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+
+#define ELF_PLATFORM ("PARISC")
+
+/*
* The following definitions are those for 32-bit ELF binaries on a 32-bit
* kernel and for 64-bit binaries on a 64-bit kernel. To run 32-bit binaries
- * on a 64-bit kernel, arch/parisc/kernel/binfmt_elf32.c defines these
- * macros appropriately and then #includes binfmt_elf.c, which then includes
- * this file.
+ * on a 64-bit kernel, fs/compat_binfmt_elf.c defines ELF_CLASS and then
+ * #includes binfmt_elf.c, which then includes this file.
*/
#ifndef ELF_CLASS
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- *
- * Note that this header file is used by default in fs/binfmt_elf.c. So
- * the following macros are for the default case. However, for the 64
- * bit kernel we also support 32 bit parisc binaries. To do that
- * arch/parisc/kernel/binfmt_elf32.c defines its own set of these
- * macros, and then it includes fs/binfmt_elf.c to provide an alternate
- * elf binary handler for 32 bit binaries (on the 64 bit kernel).
- */
#ifdef CONFIG_64BIT
-#define ELF_CLASS ELFCLASS64
+#define ELF_CLASS ELFCLASS64
#else
#define ELF_CLASS ELFCLASS32
#endif
typedef unsigned long elf_greg_t;
-/*
- * This yields a string that ld.so will use to load implementation
- * specific libraries for optimization. This is more specific in
- * intent than poking at uname or /proc/cpuinfo.
- */
-
-#define ELF_PLATFORM ("PARISC\0")
-
#define SET_PERSONALITY(ex) \
+({ \
set_personality((current->personality & ~PER_MASK) | PER_LINUX); \
current->thread.map_base = DEFAULT_MAP_BASE; \
- current->thread.task_size = DEFAULT_TASK_SIZE \
+ current->thread.task_size = DEFAULT_TASK_SIZE; \
+ })
+
+#endif /* ! ELF_CLASS */
+
+#define COMPAT_SET_PERSONALITY(ex) \
+({ \
+ set_thread_flag(TIF_32BIT); \
+ current->thread.map_base = DEFAULT_MAP_BASE32; \
+ current->thread.task_size = DEFAULT_TASK_SIZE32; \
+ })
/*
* Fill in general registers in a core dump. This saves pretty
@@ -277,10 +273,12 @@ typedef unsigned long elf_greg_t;
#define ELF_CORE_COPY_REGS(dst, pt) \
memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
- memcpy(dst + 0, pt->gr, 32 * sizeof(elf_greg_t)); \
- memcpy(dst + 32, pt->sr, 8 * sizeof(elf_greg_t)); \
- memcpy(dst + 40, pt->iaoq, 2 * sizeof(elf_greg_t)); \
- memcpy(dst + 42, pt->iasq, 2 * sizeof(elf_greg_t)); \
+ { int i; \
+ for (i = 0; i < 32; i++) dst[i] = pt->gr[i]; \
+ for (i = 0; i < 8; i++) dst[32 + i] = pt->sr[i]; \
+ } \
+ dst[40] = pt->iaoq[0]; dst[41] = pt->iaoq[1]; \
+ dst[42] = pt->iasq[0]; dst[43] = pt->iasq[1]; \
dst[44] = pt->sar; dst[45] = pt->iir; \
dst[46] = pt->isr; dst[47] = pt->ior; \
dst[48] = mfctl(22); dst[49] = mfctl(0); \
@@ -292,7 +290,7 @@ typedef unsigned long elf_greg_t;
dst[60] = mfctl(12); dst[61] = mfctl(13); \
dst[62] = mfctl(10); dst[63] = mfctl(15);
-#endif /* ! ELF_CLASS */
+#define CORE_DUMP_USE_REGSET
#define ELF_NGREG 80 /* We only need 64 at present, but leave space
for expansion. */
@@ -310,7 +308,10 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
struct pt_regs; /* forward declaration... */
-#define elf_check_arch(x) ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELF_CLASS)
+#define elf_check_arch(x) \
+ ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELF_CLASS)
+#define compat_elf_check_arch(x) \
+ ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELFCLASS32)
/*
* These are used to set parameters in the core dumps.
diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h
index be40331f757d..4a1062e05aaf 100644
--- a/arch/parisc/include/uapi/asm/siginfo.h
+++ b/arch/parisc/include/uapi/asm/siginfo.h
@@ -8,11 +8,4 @@
#include <asm-generic/siginfo.h>
-/*
- * SIGFPE si_codes
- */
-#ifdef __KERNEL__
-#define FPE_FIXME 0 /* Broken dup of SI_USER */
-#endif /* __KERNEL__ */
-
#endif
diff --git a/arch/parisc/kernel/binfmt_elf32.c b/arch/parisc/kernel/binfmt_elf32.c
deleted file mode 100644
index 20dfa081ed0b..000000000000
--- a/arch/parisc/kernel/binfmt_elf32.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for 32-bit Linux/Parisc ELF binaries on 64 bit kernels
- *
- * Copyright (C) 2000 John Marvin
- * Copyright (C) 2000 Hewlett Packard Co.
- *
- * Heavily inspired from various other efforts to do the same thing
- * (ia64,sparc64/mips64)
- */
-
-/* Make sure include/asm-parisc/elf.h does the right thing */
-
-#define ELF_CLASS ELFCLASS32
-
-#define ELF_CORE_COPY_REGS(dst, pt) \
- memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
- { int i; \
- for (i = 0; i < 32; i++) dst[i] = (elf_greg_t) pt->gr[i]; \
- for (i = 0; i < 8; i++) dst[32 + i] = (elf_greg_t) pt->sr[i]; \
- } \
- dst[40] = (elf_greg_t) pt->iaoq[0]; dst[41] = (elf_greg_t) pt->iaoq[1]; \
- dst[42] = (elf_greg_t) pt->iasq[0]; dst[43] = (elf_greg_t) pt->iasq[1]; \
- dst[44] = (elf_greg_t) pt->sar; dst[45] = (elf_greg_t) pt->iir; \
- dst[46] = (elf_greg_t) pt->isr; dst[47] = (elf_greg_t) pt->ior; \
- dst[48] = (elf_greg_t) mfctl(22); dst[49] = (elf_greg_t) mfctl(0); \
- dst[50] = (elf_greg_t) mfctl(24); dst[51] = (elf_greg_t) mfctl(25); \
- dst[52] = (elf_greg_t) mfctl(26); dst[53] = (elf_greg_t) mfctl(27); \
- dst[54] = (elf_greg_t) mfctl(28); dst[55] = (elf_greg_t) mfctl(29); \
- dst[56] = (elf_greg_t) mfctl(30); dst[57] = (elf_greg_t) mfctl(31); \
- dst[58] = (elf_greg_t) mfctl( 8); dst[59] = (elf_greg_t) mfctl( 9); \
- dst[60] = (elf_greg_t) mfctl(12); dst[61] = (elf_greg_t) mfctl(13); \
- dst[62] = (elf_greg_t) mfctl(10); dst[63] = (elf_greg_t) mfctl(15);
-
-
-typedef unsigned int elf_greg_t;
-
-#include <linux/spinlock.h>
-#include <asm/processor.h>
-#include <linux/module.h>
-#include <linux/elfcore.h>
-#include <linux/compat.h> /* struct compat_timeval */
-
-#define elf_prstatus elf_prstatus32
-struct elf_prstatus32
-{
- struct elf_siginfo pr_info; /* Info associated with signal */
- short pr_cursig; /* Current signal */
- unsigned int pr_sigpend; /* Set of pending signals */
- unsigned int pr_sighold; /* Set of held signals */
- pid_t pr_pid;
- pid_t pr_ppid;
- pid_t pr_pgrp;
- pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
- struct compat_timeval pr_cutime; /* Cumulative user time */
- struct compat_timeval pr_cstime; /* Cumulative system time */
- elf_gregset_t pr_reg; /* GP registers */
- int pr_fpvalid; /* True if math co-processor being used. */
-};
-
-#define elf_prpsinfo elf_prpsinfo32
-struct elf_prpsinfo32
-{
- char pr_state; /* numeric process state */
- char pr_sname; /* char for pr_state */
- char pr_zomb; /* zombie */
- char pr_nice; /* nice val */
- unsigned int pr_flag; /* flags */
- u16 pr_uid;
- u16 pr_gid;
- pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
- /* Lots missing */
- char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
-};
-
-#define init_elf_binfmt init_elf32_binfmt
-
-#define ELF_PLATFORM ("PARISC32\0")
-
-/*
- * We should probably use this macro to set a flag somewhere to indicate
- * this is a 32 on 64 process. We could use PER_LINUX_32BIT, or we
- * could set a processor dependent flag in the thread_struct.
- */
-
-#undef SET_PERSONALITY
-#define SET_PERSONALITY(ex) \
- set_thread_flag(TIF_32BIT); \
- current->thread.map_base = DEFAULT_MAP_BASE32; \
- current->thread.task_size = DEFAULT_TASK_SIZE32 \
-
-#undef ns_to_timeval
-#define ns_to_timeval ns_to_compat_timeval
-
-#include "../../../fs/binfmt_elf.c"
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index a99da95fc9fd..bddd2acebdcc 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -254,7 +254,7 @@ parisc_cache_init(void)
}
}
-void disable_sr_hashing(void)
+void __init disable_sr_hashing(void)
{
int srhash_type, retval;
unsigned long space_bits;
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 67b0f7532e83..22e6374ece44 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -38,9 +38,10 @@
#include <asm/cache.h>
#include <asm/ldcw.h>
#include <linux/linkage.h>
+#include <linux/init.h>
- .text
- .align 128
+ .section .text.hot
+ .align 16
ENTRY_CFI(flush_tlb_all_local)
.proc
@@ -328,8 +329,6 @@ fdsync:
.procend
ENDPROC_CFI(flush_data_cache_local)
- .align 16
-
/* Macros to serialize TLB purge operations on SMP. */
.macro tlb_lock la,flags,tmp
@@ -1216,6 +1215,8 @@ ENTRY_CFI(flush_kernel_icache_range_asm)
.procend
ENDPROC_CFI(flush_kernel_icache_range_asm)
+ __INIT
+
/* align should cover use of rfi in disable_sr_hashing_asm and
* srdis_done.
*/
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index bbe46571ff96..b931745815e0 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -112,14 +112,6 @@ void machine_restart(char *cmd)
}
-void machine_halt(void)
-{
- /*
- ** The LED/ChassisCodes are updated by the led_halt()
- ** function, called by the reboot notifier chain.
- */
-}
-
void (*chassis_power_off)(void);
/*
@@ -158,6 +150,11 @@ void machine_power_off(void)
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
+void machine_halt(void)
+{
+ machine_power_off();
+}
+
void flush_thread(void)
{
/* Only needs to handle fpu stuff or perf monitors.
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index c919e6c0a687..68e671a11987 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -627,9 +627,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
on condition */
if(user_mode(regs)){
si.si_signo = SIGFPE;
- /* Set to zero, and let the userspace app figure it out from
- the insn pointed to by si_addr */
- si.si_code = FPE_FIXME;
+ /* Let userspace app figure it out from the insn pointed
+ * to by si_addr.
+ */
+ si.si_code = FPE_CONDTRAP;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGFPE, &si, current);
return;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 73ce5dd07642..c32a181a7cbb 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -552,6 +552,9 @@ config KEXEC_FILE
for kernel and initramfs as opposed to a list of segments as is the
case for the older kexec call.
+config ARCH_HAS_KEXEC_PURGATORY
+ def_bool KEXEC_FILE
+
config RELOCATABLE
bool "Build a relocatable kernel"
depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE))
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 931dda8be87c..66fcab13c8b4 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -545,18 +545,37 @@ enum {
#ifdef CONFIG_PPC_BOOK3E
#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500)
#else
+
+#ifdef CONFIG_PPC_DT_CPU_FTRS
+#define CPU_FTRS_DT_CPU_BASE \
+ (CPU_FTR_LWSYNC | \
+ CPU_FTR_FPU_UNAVAILABLE | \
+ CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_NOEXECUTE | \
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | \
+ CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_DAWR | \
+ CPU_FTR_ARCH_206 | \
+ CPU_FTR_ARCH_207S)
+#else
+#define CPU_FTRS_DT_CPU_BASE (~0ul)
+#endif
+
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \
CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER8_DD1 & \
- CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1)
+ CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1 & \
+ CPU_FTRS_DT_CPU_BASE)
#else
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & \
- CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1)
+ CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1 & \
+ CPU_FTRS_DT_CPU_BASE)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif
#else
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index d8b1e8e7e035..4a585cba1787 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -95,7 +95,7 @@ static inline bool kdump_in_progress(void)
}
#ifdef CONFIG_KEXEC_FILE
-extern struct kexec_file_ops kexec_elf64_ops;
+extern const struct kexec_file_ops kexec_elf64_ops;
#ifdef CONFIG_IMA_KEXEC
#define ARCH_HAS_KIMAGE_ARCH
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index 7e28442827f1..4f6573934792 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -15,9 +15,19 @@
#ifdef CC_USING_MPROFILE_KERNEL
-#define MODULE_ARCH_VERMAGIC "mprofile-kernel"
+#define MODULE_ARCH_VERMAGIC_FTRACE "mprofile-kernel "
+#else
+#define MODULE_ARCH_VERMAGIC_FTRACE ""
#endif
+#ifdef CONFIG_RELOCATABLE
+#define MODULE_ARCH_VERMAGIC_RELOCATABLE "relocatable "
+#else
+#define MODULE_ARCH_VERMAGIC_RELOCATABLE ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC MODULE_ARCH_VERMAGIC_FTRACE MODULE_ARCH_VERMAGIC_RELOCATABLE
+
#ifndef __powerpc64__
/*
* Thanks to Paul M for explaining this.
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 7159e1a6a61a..03e1a920491e 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -21,6 +21,9 @@
/* We calculate number of sg entries based on PAGE_SIZE */
#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
+/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
+#define OPAL_BUSY_DELAY_MS 10
+
/* /sys/firmware/opal */
extern struct kobject *opal_kobj;
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index e88fbb1fdb8f..8ab51f6ca03a 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -53,18 +53,6 @@ struct dt_cpu_feature {
int disabled;
};
-#define CPU_FTRS_BASE \
- (CPU_FTR_LWSYNC | \
- CPU_FTR_FPU_UNAVAILABLE |\
- CPU_FTR_NODSISRALIGN |\
- CPU_FTR_NOEXECUTE |\
- CPU_FTR_COHERENT_ICACHE | \
- CPU_FTR_STCX_CHECKS_ADDRESS |\
- CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_DAWR | \
- CPU_FTR_ARCH_206 |\
- CPU_FTR_ARCH_207S)
-
#define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
#define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
@@ -124,7 +112,7 @@ static char dt_cpu_name[64];
static struct cpu_spec __initdata base_cpu_spec = {
.cpu_name = NULL,
- .cpu_features = CPU_FTRS_BASE,
+ .cpu_features = CPU_FTRS_DT_CPU_BASE,
.cpu_user_features = COMMON_USER_BASE,
.cpu_user_features2 = COMMON_USER2_BASE,
.mmu_features = 0,
diff --git a/arch/powerpc/kernel/kexec_elf_64.c b/arch/powerpc/kernel/kexec_elf_64.c
index 9a42309b091a..ba4f18a43ee8 100644
--- a/arch/powerpc/kernel/kexec_elf_64.c
+++ b/arch/powerpc/kernel/kexec_elf_64.c
@@ -572,7 +572,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
{
int ret;
unsigned int fdt_size;
- unsigned long kernel_load_addr, purgatory_load_addr;
+ unsigned long kernel_load_addr;
unsigned long initrd_load_addr = 0, fdt_load_addr;
void *fdt;
const void *slave_code;
@@ -580,6 +580,8 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
struct elf_info elf_info;
struct kexec_buf kbuf = { .image = image, .buf_min = 0,
.buf_max = ppc64_rma_size };
+ struct kexec_buf pbuf = { .image = image, .buf_min = 0,
+ .buf_max = ppc64_rma_size, .top_down = true };
ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info);
if (ret)
@@ -591,14 +593,13 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr);
- ret = kexec_load_purgatory(image, 0, ppc64_rma_size, true,
- &purgatory_load_addr);
+ ret = kexec_load_purgatory(image, &pbuf);
if (ret) {
pr_err("Loading purgatory failed.\n");
goto out;
}
- pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr);
+ pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
if (initrd != NULL) {
kbuf.buffer = initrd;
@@ -657,7 +658,7 @@ out:
return ret ? ERR_PTR(ret) : fdt;
}
-struct kexec_file_ops kexec_elf64_ops = {
+const struct kexec_file_ops kexec_elf64_ops = {
.probe = elf64_probe,
.load = elf64_load,
};
diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c
index 45e0b7d5f200..0bd23dc789a4 100644
--- a/arch/powerpc/kernel/machine_kexec_file_64.c
+++ b/arch/powerpc/kernel/machine_kexec_file_64.c
@@ -31,52 +31,19 @@
#define SLAVE_CODE_SIZE 256
-static struct kexec_file_ops *kexec_file_loaders[] = {
+const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_elf64_ops,
+ NULL
};
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len)
{
- int i, ret = -ENOEXEC;
- struct kexec_file_ops *fops;
-
/* We don't support crash kernels yet. */
if (image->type == KEXEC_TYPE_CRASH)
return -EOPNOTSUPP;
- for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
- fops = kexec_file_loaders[i];
- if (!fops || !fops->probe)
- continue;
-
- ret = fops->probe(buf, buf_len);
- if (!ret) {
- image->fops = fops;
- return ret;
- }
- }
-
- return ret;
-}
-
-void *arch_kexec_kernel_image_load(struct kimage *image)
-{
- if (!image->fops || !image->fops->load)
- return ERR_PTR(-ENOEXEC);
-
- return image->fops->load(image, image->kernel_buf,
- image->kernel_buf_len, image->initrd_buf,
- image->initrd_buf_len, image->cmdline_buf,
- image->cmdline_buf_len);
-}
-
-int arch_kimage_file_post_load_cleanup(struct kimage *image)
-{
- if (!image->fops || !image->fops->cleanup)
- return 0;
-
- return image->fops->cleanup(image->image_loader_data);
+ return kexec_image_probe_default(image, buf, buf_len);
}
/**
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 66f2b6299c40..44c30dd38067 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -880,7 +880,7 @@ void rfi_flush_enable(bool enable)
rfi_flush = enable;
}
-static void init_fallback_flush(void)
+static void __ref init_fallback_flush(void)
{
u64 l1d_size, limit;
int cpu;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a2ef0c0e6c31..0904492e7032 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1613,6 +1613,22 @@ void facility_unavailable_exception(struct pt_regs *regs)
value = mfspr(SPRN_FSCR);
status = value >> 56;
+ if ((hv || status >= 2) &&
+ (status < ARRAY_SIZE(facility_strings)) &&
+ facility_strings[status])
+ facility = facility_strings[status];
+
+ /* We should not have taken this interrupt in kernel */
+ if (!user_mode(regs)) {
+ pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
+ facility, status, regs->nip);
+ die("Unexpected facility unavailable exception", regs, SIGABRT);
+ }
+
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
if (status == FSCR_DSCR_LG) {
/*
* User is accessing the DSCR register using the problem
@@ -1679,25 +1695,11 @@ void facility_unavailable_exception(struct pt_regs *regs)
return;
}
- if ((hv || status >= 2) &&
- (status < ARRAY_SIZE(facility_strings)) &&
- facility_strings[status])
- facility = facility_strings[status];
-
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
-
pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
out:
- if (user_mode(regs)) {
- _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
- return;
- }
-
- die("Unexpected facility unavailable exception", regs, SIGABRT);
+ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
}
#endif
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index e1c083fbe434..78e6a392330f 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -470,8 +470,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
for (i = 0; i < npages; ++i) {
asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
"r" (rbvalues[i]), "r" (kvm->arch.lpid));
- trace_tlbie(kvm->arch.lpid, 0, rbvalues[i],
- kvm->arch.lpid, 0, 0, 0);
}
if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
@@ -492,8 +490,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
for (i = 0; i < npages; ++i) {
asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
"r" (rbvalues[i]), "r" (0));
- trace_tlbie(kvm->arch.lpid, 1, rbvalues[i],
- 0, 0, 0, 0);
}
asm volatile("ptesync" : : : "memory");
}
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 9cd87d11fe4e..205fe557ca10 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -35,6 +35,7 @@
#include <asm/mmu.h>
#include <asm/copro.h>
#include <asm/hugetlb.h>
+#include <asm/mmu_context.h>
static DEFINE_SPINLOCK(slice_convert_lock);
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 2fba6170ab3f..a5d7309c2d05 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -33,13 +33,12 @@ static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
{
unsigned long rb;
unsigned long rs;
- unsigned int r = 1; /* radix format */
rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
- asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
- : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
+ asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
+ : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
: "memory");
}
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index ba2ff06a2c98..1bceb95f422d 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -11,6 +11,7 @@
#define DEBUG
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of.h>
@@ -56,8 +57,12 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_write_nvram(__pa(buf), count, off);
- if (rc == OPAL_BUSY_EVENT)
+ if (rc == OPAL_BUSY_EVENT) {
+ msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
+ } else if (rc == OPAL_BUSY) {
+ msleep(OPAL_BUSY_DELAY_MS);
+ }
}
if (rc)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 63838a17e56a..511b2cc9b91a 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -119,34 +119,12 @@ static void error(char *x)
asm volatile("lpsw %0" : : "Q" (psw));
}
-/*
- * Safe guard the ipl parameter block against a memory area that will be
- * overwritten. The validity check for the ipl parameter block is complex
- * (see cio_get_iplinfo and ipl_save_parameters) but if the pointer to
- * the ipl parameter block intersects with the passed memory area we can
- * safely assume that we can read from that memory. In that case just copy
- * the memory to IPL_PARMBLOCK_ORIGIN even if there is no ipl parameter
- * block.
- */
-static void check_ipl_parmblock(void *start, unsigned long size)
-{
- void *src, *dst;
-
- src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
- if (src + PAGE_SIZE <= start || src >= start + size)
- return;
- dst = (void *) IPL_PARMBLOCK_ORIGIN;
- memmove(dst, src, PAGE_SIZE);
- S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
-}
-
unsigned long decompress_kernel(void)
{
void *output, *kernel_end;
output = (void *) ALIGN((unsigned long) _end + HEAP_SIZE, PAGE_SIZE);
kernel_end = output + SZ__bss_start;
- check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
#ifdef CONFIG_BLK_DEV_INITRD
/*
@@ -156,7 +134,6 @@ unsigned long decompress_kernel(void)
* current bss section..
*/
if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
- check_ipl_parmblock(kernel_end, INITRD_SIZE);
memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = (unsigned long) kernel_end;
}
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index fa9b7dd1a513..ad47abd08630 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -329,7 +329,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
static struct crypto_alg ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-s390",
- .cra_priority = 400, /* combo: aes + ecb */
+ .cra_priority = 401, /* combo: aes + ecb + 1 */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -426,7 +426,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg cbc_aes_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-s390",
- .cra_priority = 400, /* combo: aes + cbc */
+ .cra_priority = 402, /* ecb-aes-s390 + 1 */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -633,7 +633,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
static struct crypto_alg xts_aes_alg = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-s390",
- .cra_priority = 400, /* combo: aes + xts */
+ .cra_priority = 402, /* ecb-aes-s390 + 1 */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -763,7 +763,7 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg ctr_aes_alg = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-s390",
- .cra_priority = 400, /* combo: aes + ctr */
+ .cra_priority = 402, /* ecb-aes-s390 + 1 */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 003932db8d12..80b27294c1de 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -138,7 +138,7 @@ static int ecb_paes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg ecb_paes_alg = {
.cra_name = "ecb(paes)",
.cra_driver_name = "ecb-paes-s390",
- .cra_priority = 400, /* combo: aes + ecb */
+ .cra_priority = 401, /* combo: aes + ecb + 1 */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_paes_ctx),
@@ -241,7 +241,7 @@ static int cbc_paes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg cbc_paes_alg = {
.cra_name = "cbc(paes)",
.cra_driver_name = "cbc-paes-s390",
- .cra_priority = 400, /* combo: aes + cbc */
+ .cra_priority = 402, /* ecb-paes-s390 + 1 */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_paes_ctx),
@@ -377,7 +377,7 @@ static int xts_paes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg xts_paes_alg = {
.cra_name = "xts(paes)",
.cra_driver_name = "xts-paes-s390",
- .cra_priority = 400, /* combo: aes + xts */
+ .cra_priority = 402, /* ecb-paes-s390 + 1 */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_pxts_ctx),
@@ -523,7 +523,7 @@ static int ctr_paes_decrypt(struct blkcipher_desc *desc,
static struct crypto_alg ctr_paes_alg = {
.cra_name = "ctr(paes)",
.cra_driver_name = "ctr-paes-s390",
- .cra_priority = 400, /* combo: aes + ctr */
+ .cra_priority = 402, /* ecb-paes-s390 + 1 */
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_paes_ctx),
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index cfce6835b109..c1bedb4c8de0 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -20,9 +20,9 @@
*/
typedef unsigned int ap_qid_t;
-#define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255))
-#define AP_QID_CARD(_qid) (((_qid) >> 8) & 63)
-#define AP_QID_QUEUE(_qid) ((_qid) & 255)
+#define AP_MKQID(_card, _queue) (((_card) & 0xff) << 8 | ((_queue) & 0xff))
+#define AP_QID_CARD(_qid) (((_qid) >> 8) & 0xff)
+#define AP_QID_QUEUE(_qid) ((_qid) & 0xff)
/**
* struct ap_queue_status - Holds the AP queue status.
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 847a04262b9c..225667652069 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -328,16 +328,6 @@ static inline u8 pathmask_to_pos(u8 mask)
void channel_subsystem_reinit(void);
extern void css_schedule_reprobe(void);
-extern void reipl_ccw_dev(struct ccw_dev_id *id);
-
-struct cio_iplinfo {
- u8 ssid;
- u16 devno;
- int is_qdio;
-};
-
-extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
-
/* Function from drivers/s390/cio/chsc.c */
int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
int chsc_sstpi(void *page, void *result, size_t size);
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 186c7b5f5511..ae5135704616 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -15,8 +15,6 @@
#define NSS_NAME_SIZE 8
-#define IPL_PARMBLOCK_ORIGIN 0x2000
-
#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
sizeof(struct ipl_block_fcp))
@@ -29,10 +27,6 @@
#define IPL_MAX_SUPPORTED_VERSION (0)
-#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \
- IPL_PARMBLOCK_ORIGIN)
-#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len)
-
struct ipl_list_hdr {
u32 len;
u8 reserved1[3];
@@ -83,33 +77,21 @@ struct ipl_parameter_block {
union {
struct ipl_block_fcp fcp;
struct ipl_block_ccw ccw;
+ char raw[PAGE_SIZE - sizeof(struct ipl_list_hdr)];
} ipl_info;
} __packed __aligned(PAGE_SIZE);
-/*
- * IPL validity flags
- */
-extern u32 ipl_flags;
-
struct save_area;
struct save_area * __init save_area_alloc(bool is_boot_cpu);
struct save_area * __init save_area_boot_cpu(void);
void __init save_area_add_regs(struct save_area *, void *regs);
void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
-extern void do_reipl(void);
-extern void do_halt(void);
-extern void do_poff(void);
-extern void ipl_verify_parameters(void);
-extern void ipl_update_parameters(void);
+extern void s390_reset_system(void);
+extern void ipl_store_parameters(void);
extern size_t append_ipl_vmparm(char *, size_t);
extern size_t append_ipl_scpdata(char *, size_t);
-enum {
- IPL_DEVNO_VALID = 1,
- IPL_PARMBLOCK_VALID = 2,
-};
-
enum ipl_type {
IPL_TYPE_UNKNOWN = 1,
IPL_TYPE_CCW = 2,
@@ -138,6 +120,7 @@ struct ipl_info
extern struct ipl_info ipl_info;
extern void setup_ipl(void);
+extern void set_os_info_reipl_block(void);
/*
* DIAG 308 support
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
index 35bf28fe4c64..b4bd8c41e9d3 100644
--- a/arch/s390/include/asm/nospec-branch.h
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -9,6 +9,7 @@
extern int nospec_disable;
void nospec_init_branches(void);
+void nospec_auto_detect(void);
void nospec_revert(s32 *start, s32 *end);
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h
deleted file mode 100644
index 6450b31ade03..000000000000
--- a/arch/s390/include/asm/reset.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright IBM Corp. 2006
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
- */
-
-#ifndef _ASM_S390_RESET_H
-#define _ASM_S390_RESET_H
-
-#include <linux/list.h>
-
-struct reset_call {
- struct list_head list;
- void (*fn)(void);
-};
-
-extern void register_reset_call(struct reset_call *reset);
-extern void unregister_reset_call(struct reset_call *reset);
-extern void s390_reset_system(void);
-#endif /* _ASM_S390_RESET_H */
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index d568307321fc..b62e0614e440 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -203,9 +203,9 @@ struct ep11_urb {
} __attribute__((packed));
/**
- * struct zcrypt_device_status
+ * struct zcrypt_device_status_ext
* @hwtype: raw hardware type
- * @qid: 6 bit device index, 8 bit domain
+ * @qid: 8 bit device index, 8 bit domain
* @functions: AP device function bit field 'abcdef'
* a, b, c = reserved
* d = CCA coprocessor
@@ -214,28 +214,23 @@ struct ep11_urb {
* @online online status
* @reserved reserved
*/
-struct zcrypt_device_status {
+struct zcrypt_device_status_ext {
unsigned int hwtype:8;
- unsigned int qid:14;
+ unsigned int qid:16;
unsigned int online:1;
unsigned int functions:6;
- unsigned int reserved:3;
+ unsigned int reserved:1;
};
-#define MAX_ZDEV_CARDIDS 64
-#define MAX_ZDEV_DOMAINS 256
+#define MAX_ZDEV_CARDIDS_EXT 256
+#define MAX_ZDEV_DOMAINS_EXT 256
-/**
- * Maximum number of zcrypt devices
- */
-#define MAX_ZDEV_ENTRIES (MAX_ZDEV_CARDIDS * MAX_ZDEV_DOMAINS)
+/* Maximum number of zcrypt devices */
+#define MAX_ZDEV_ENTRIES_EXT (MAX_ZDEV_CARDIDS_EXT * MAX_ZDEV_DOMAINS_EXT)
-/**
- * zcrypt_device_matrix
- * Device matrix of all zcrypt devices
- */
-struct zcrypt_device_matrix {
- struct zcrypt_device_status device[MAX_ZDEV_ENTRIES];
+/* Device matrix of all zcrypt devices */
+struct zcrypt_device_matrix_ext {
+ struct zcrypt_device_status_ext device[MAX_ZDEV_ENTRIES_EXT];
};
#define AUTOSELECT ((unsigned int)0xFFFFFFFF)
@@ -270,71 +265,35 @@ struct zcrypt_device_matrix {
* ZSENDEP11CPRB
* Send an arbitrary EP11 CPRB to an EP11 coprocessor crypto card.
*
- * Z90STAT_STATUS_MASK
- * Return an 64 element array of unsigned chars for the status of
- * all devices.
+ * ZCRYPT_DEVICE_STATUS
+ * The given struct zcrypt_device_matrix_ext is updated with
+ * status information for each currently known apqn.
+ *
+ * ZCRYPT_STATUS_MASK
+ * Return an MAX_ZDEV_CARDIDS_EXT element array of unsigned chars for the
+ * status of all devices.
* 0x01: PCICA
* 0x02: PCICC
* 0x03: PCIXCC_MCL2
* 0x04: PCIXCC_MCL3
* 0x05: CEX2C
* 0x06: CEX2A
- * 0x0d: device is disabled via the proc filesystem
- *
- * Z90STAT_QDEPTH_MASK
- * Return an 64 element array of unsigned chars for the queue
- * depth of all devices.
- *
- * Z90STAT_PERDEV_REQCNT
- * Return an 64 element array of unsigned integers for the number
- * of successfully completed requests per device since the device
- * was detected and made available.
- *
- * Z90STAT_REQUESTQ_COUNT
- * Return an integer count of the number of entries waiting to be
- * sent to a device.
- *
- * Z90STAT_PENDINGQ_COUNT
- * Return an integer count of the number of entries sent to all
- * devices awaiting the reply.
- *
- * Z90STAT_TOTALOPEN_COUNT
- * Return an integer count of the number of open file handles.
- *
- * Z90STAT_DOMAIN_INDEX
- * Return the integer value of the Cryptographic Domain.
- *
- * The following ioctls are deprecated and should be no longer used:
- *
- * Z90STAT_TOTALCOUNT
- * Return an integer count of all device types together.
- *
- * Z90STAT_PCICACOUNT
- * Return an integer count of all PCICAs.
- *
- * Z90STAT_PCICCCOUNT
- * Return an integer count of all PCICCs.
- *
- * Z90STAT_PCIXCCMCL2COUNT
- * Return an integer count of all MCL2 PCIXCCs.
- *
- * Z90STAT_PCIXCCMCL3COUNT
- * Return an integer count of all MCL3 PCIXCCs.
- *
- * Z90STAT_CEX2CCOUNT
- * Return an integer count of all CEX2Cs.
+ * 0x07: CEX3C
+ * 0x08: CEX3A
+ * 0x0a: CEX4
+ * 0x0b: CEX5
+ * 0x0c: CEX6
+ * 0x0d: device is disabled
*
- * Z90STAT_CEX2ACOUNT
- * Return an integer count of all CEX2As.
+ * ZCRYPT_QDEPTH_MASK
+ * Return an MAX_ZDEV_CARDIDS_EXT element array of unsigned chars for the
+ * queue depth of all devices.
*
- * ICAZ90STATUS
- * Return some device driver status in a ica_z90_status struct
- * This takes an ica_z90_status struct as its arg.
+ * ZCRYPT_PERDEV_REQCNT
+ * Return an MAX_ZDEV_CARDIDS_EXT element array of unsigned integers for
+ * the number of successfully completed requests per device since the
+ * device was detected and made available.
*
- * Z90STAT_PCIXCCCOUNT
- * Return an integer count of all PCIXCCs (MCL2 + MCL3).
- * This is DEPRECATED now that MCL3 PCIXCCs are treated differently from
- * MCL2 PCIXCCs.
*/
/**
@@ -344,22 +303,56 @@ struct zcrypt_device_matrix {
#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0)
-#define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0)
-/* New status calls */
-#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int)
-#define Z90STAT_PCICACOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x41, int)
-#define Z90STAT_PCICCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x42, int)
-#define Z90STAT_PCIXCCMCL2COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4b, int)
-#define Z90STAT_PCIXCCMCL3COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4c, int)
-#define Z90STAT_CEX2CCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4d, int)
-#define Z90STAT_CEX2ACOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4e, int)
+#define ZCRYPT_DEVICE_STATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x5f, 0)
+#define ZCRYPT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x58, char[MAX_ZDEV_CARDIDS_EXT])
+#define ZCRYPT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x59, char[MAX_ZDEV_CARDIDS_EXT])
+#define ZCRYPT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x5a, int[MAX_ZDEV_CARDIDS_EXT])
+
+/*
+ * Only deprecated defines, structs and ioctls below this line.
+ */
+
+/* Deprecated: use MAX_ZDEV_CARDIDS_EXT */
+#define MAX_ZDEV_CARDIDS 64
+/* Deprecated: use MAX_ZDEV_DOMAINS_EXT */
+#define MAX_ZDEV_DOMAINS 256
+
+/* Deprecated: use MAX_ZDEV_ENTRIES_EXT */
+#define MAX_ZDEV_ENTRIES (MAX_ZDEV_CARDIDS * MAX_ZDEV_DOMAINS)
+
+/* Deprecated: use struct zcrypt_device_status_ext */
+struct zcrypt_device_status {
+ unsigned int hwtype:8;
+ unsigned int qid:14;
+ unsigned int online:1;
+ unsigned int functions:6;
+ unsigned int reserved:3;
+};
+
+/* Deprecated: use struct zcrypt_device_matrix_ext */
+struct zcrypt_device_matrix {
+ struct zcrypt_device_status device[MAX_ZDEV_ENTRIES];
+};
+
+/* Deprecated: use ZCRYPT_DEVICE_STATUS */
+#define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0)
+/* Deprecated: use ZCRYPT_STATUS_MASK */
+#define Z90STAT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x48, char[64])
+/* Deprecated: use ZCRYPT_QDEPTH_MASK */
+#define Z90STAT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x49, char[64])
+/* Deprecated: use ZCRYPT_PERDEV_REQCNT */
+#define Z90STAT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4a, int[64])
+
+/* Deprecated: use sysfs to query these values */
#define Z90STAT_REQUESTQ_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x44, int)
#define Z90STAT_PENDINGQ_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x45, int)
#define Z90STAT_TOTALOPEN_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x46, int)
#define Z90STAT_DOMAIN_INDEX _IOR(ZCRYPT_IOCTL_MAGIC, 0x47, int)
-#define Z90STAT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x48, char[64])
-#define Z90STAT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x49, char[64])
-#define Z90STAT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4a, int[64])
+
+/*
+ * The ioctl number ranges 0x40 - 0x42 and 0x4b - 0x4e had been used in the
+ * past, don't assign new ioctls for these.
+ */
#endif /* __ASM_S390_ZCRYPT_H */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 18c1eeb847b2..6f2a193ccccc 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -279,7 +279,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
if (put_compat_sigset((compat_sigset_t __user *)frame->sc.oldmask,
set, sizeof(compat_sigset_t)))
return -EFAULT;
- if (__put_user(ptr_to_compat(&frame->sc), &frame->sc.sregs))
+ if (__put_user(ptr_to_compat(&frame->sregs), &frame->sc.sregs))
return -EFAULT;
/* Store registers needed to create the signal frame */
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index b00b515baa53..32daa0f84325 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -342,16 +342,6 @@ static __init void memmove_early(void *dst, const void *src, size_t n)
S390_lowcore.program_new_psw = old;
}
-static __init noinline void ipl_save_parameters(void)
-{
- void *src, *dst;
-
- src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
- dst = (void *) IPL_PARMBLOCK_ORIGIN;
- memmove_early(dst, src, PAGE_SIZE);
- S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
-}
-
static __init noinline void rescue_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
@@ -421,10 +411,8 @@ static void __init setup_boot_command_line(void)
void __init startup_init(void)
{
reset_tod_clock();
- ipl_save_parameters();
rescue_initrd();
clear_bss_section();
- ipl_verify_parameters();
time_early_init();
init_kernel_storage_key();
lockdep_off();
@@ -432,7 +420,7 @@ void __init startup_init(void)
setup_facility_list();
detect_machine_type();
setup_arch_string();
- ipl_update_parameters();
+ ipl_store_parameters();
setup_boot_command_line();
detect_diag9c();
detect_diag44();
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 34477c1aee6d..4296d7e61fb6 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -24,9 +24,7 @@
#include <asm/smp.h>
#include <asm/setup.h>
#include <asm/cpcmd.h>
-#include <asm/cio.h>
#include <asm/ebcdic.h>
-#include <asm/reset.h>
#include <asm/sclp.h>
#include <asm/checksum.h>
#include <asm/debug.h>
@@ -119,39 +117,12 @@ static char *dump_type_str(enum dump_type type)
}
}
-static u8 ipl_ssid;
-static u16 ipl_devno;
-u32 ipl_flags;
-
-enum ipl_method {
- REIPL_METHOD_CCW_CIO,
- REIPL_METHOD_CCW_DIAG,
- REIPL_METHOD_CCW_VM,
- REIPL_METHOD_FCP_RO_DIAG,
- REIPL_METHOD_FCP_RW_DIAG,
- REIPL_METHOD_FCP_RO_VM,
- REIPL_METHOD_FCP_DUMP,
- REIPL_METHOD_NSS,
- REIPL_METHOD_NSS_DIAG,
- REIPL_METHOD_DEFAULT,
-};
-
-enum dump_method {
- DUMP_METHOD_NONE,
- DUMP_METHOD_CCW_CIO,
- DUMP_METHOD_CCW_DIAG,
- DUMP_METHOD_CCW_VM,
- DUMP_METHOD_FCP_DIAG,
-};
-
-static int diag308_set_works;
-
+static int ipl_block_valid;
static struct ipl_parameter_block ipl_block;
static int reipl_capabilities = IPL_TYPE_UNKNOWN;
static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
-static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT;
static struct ipl_parameter_block *reipl_block_fcp;
static struct ipl_parameter_block *reipl_block_ccw;
static struct ipl_parameter_block *reipl_block_nss;
@@ -159,7 +130,6 @@ static struct ipl_parameter_block *reipl_block_actual;
static int dump_capabilities = DUMP_TYPE_NONE;
static enum dump_type dump_type = DUMP_TYPE_NONE;
-static enum dump_method dump_method = DUMP_METHOD_NONE;
static struct ipl_parameter_block *dump_block_fcp;
static struct ipl_parameter_block *dump_block_ccw;
@@ -260,33 +230,25 @@ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store)
-static void make_attrs_ro(struct attribute **attrs)
-{
- while (*attrs) {
- (*attrs)->mode = S_IRUGO;
- attrs++;
- }
-}
-
/*
* ipl section
*/
static __init enum ipl_type get_ipl_type(void)
{
- struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
-
- if (!(ipl_flags & IPL_DEVNO_VALID))
+ if (!ipl_block_valid)
return IPL_TYPE_UNKNOWN;
- if (!(ipl_flags & IPL_PARMBLOCK_VALID))
+
+ switch (ipl_block.hdr.pbt) {
+ case DIAG308_IPL_TYPE_CCW:
return IPL_TYPE_CCW;
- if (ipl->hdr.version > IPL_MAX_SUPPORTED_VERSION)
- return IPL_TYPE_UNKNOWN;
- if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP)
- return IPL_TYPE_UNKNOWN;
- if (ipl->ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP)
- return IPL_TYPE_FCP_DUMP;
- return IPL_TYPE_FCP;
+ case DIAG308_IPL_TYPE_FCP:
+ if (ipl_block.ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP)
+ return IPL_TYPE_FCP_DUMP;
+ else
+ return IPL_TYPE_FCP;
+ }
+ return IPL_TYPE_UNKNOWN;
}
struct ipl_info ipl_info;
@@ -338,7 +300,7 @@ size_t append_ipl_vmparm(char *dest, size_t size)
size_t rc;
rc = 0;
- if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
+ if (ipl_block_valid && ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)
rc = reipl_get_ascii_vmparm(dest, size, &ipl_block);
else
dest[0] = 0;
@@ -401,7 +363,7 @@ size_t append_ipl_scpdata(char *dest, size_t len)
size_t rc;
rc = 0;
- if (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP)
+ if (ipl_block_valid && ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP)
rc = reipl_append_ascii_scpdata(dest, len, &ipl_block);
else
dest[0] = 0;
@@ -415,14 +377,14 @@ static struct kobj_attribute sys_ipl_vm_parm_attr =
static ssize_t sys_ipl_device_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
- struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
-
switch (ipl_info.type) {
case IPL_TYPE_CCW:
- return sprintf(page, "0.%x.%04x\n", ipl_ssid, ipl_devno);
+ return sprintf(page, "0.%x.%04x\n", ipl_block.ipl_info.ccw.ssid,
+ ipl_block.ipl_info.ccw.devno);
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
- return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
+ return sprintf(page, "0.0.%04x\n",
+ ipl_block.ipl_info.fcp.devno);
default:
return 0;
}
@@ -435,8 +397,8 @@ static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
- return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
- IPL_PARMBLOCK_SIZE);
+ return memory_read_from_buffer(buf, count, &off, &ipl_block,
+ ipl_block.hdr.len);
}
static struct bin_attribute ipl_parameter_attr =
__BIN_ATTR(binary_parameter, S_IRUGO, ipl_parameter_read, NULL,
@@ -446,8 +408,8 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
- unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
- void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
+ unsigned int size = ipl_block.ipl_info.fcp.scp_data_len;
+ void *scp_data = &ipl_block.ipl_info.fcp.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
@@ -462,14 +424,14 @@ static struct bin_attribute *ipl_fcp_bin_attrs[] = {
/* FCP ipl device attributes */
-DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n", (unsigned long long)
- IPL_PARMBLOCK_START->ipl_info.fcp.wwpn);
-DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n", (unsigned long long)
- IPL_PARMBLOCK_START->ipl_info.fcp.lun);
-DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long)
- IPL_PARMBLOCK_START->ipl_info.fcp.bootprog);
-DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long)
- IPL_PARMBLOCK_START->ipl_info.fcp.br_lba);
+DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n",
+ (unsigned long long)ipl_block.ipl_info.fcp.wwpn);
+DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n",
+ (unsigned long long)ipl_block.ipl_info.fcp.lun);
+DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n",
+ (unsigned long long)ipl_block.ipl_info.fcp.bootprog);
+DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n",
+ (unsigned long long)ipl_block.ipl_info.fcp.br_lba);
static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
@@ -545,10 +507,6 @@ static void __ipl_run(void *unused)
{
__bpon();
diag308(DIAG308_LOAD_CLEAR, NULL);
- if (MACHINE_IS_VM)
- __cpcmd("IPL", NULL, 0, NULL);
- else if (ipl_info.type == IPL_TYPE_CCW)
- reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
}
static void ipl_run(struct shutdown_trigger *trigger)
@@ -776,6 +734,7 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
/* copy and convert to ebcdic */
memcpy(ipb->hdr.loadparm, buf, lp_len);
ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
+ ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
return len;
}
@@ -938,11 +897,10 @@ static struct attribute_group reipl_nss_attr_group = {
.attrs = reipl_nss_attrs,
};
-static void set_reipl_block_actual(struct ipl_parameter_block *reipl_block)
+void set_os_info_reipl_block(void)
{
- reipl_block_actual = reipl_block;
os_info_entry_add(OS_INFO_REIPL_BLOCK, reipl_block_actual,
- reipl_block->hdr.len);
+ reipl_block_actual->hdr.len);
}
/* reipl type */
@@ -954,38 +912,16 @@ static int reipl_set_type(enum ipl_type type)
switch(type) {
case IPL_TYPE_CCW:
- if (diag308_set_works)
- reipl_method = REIPL_METHOD_CCW_DIAG;
- else if (MACHINE_IS_VM)
- reipl_method = REIPL_METHOD_CCW_VM;
- else
- reipl_method = REIPL_METHOD_CCW_CIO;
- set_reipl_block_actual(reipl_block_ccw);
+ reipl_block_actual = reipl_block_ccw;
break;
case IPL_TYPE_FCP:
- if (diag308_set_works)
- reipl_method = REIPL_METHOD_FCP_RW_DIAG;
- else if (MACHINE_IS_VM)
- reipl_method = REIPL_METHOD_FCP_RO_VM;
- else
- reipl_method = REIPL_METHOD_FCP_RO_DIAG;
- set_reipl_block_actual(reipl_block_fcp);
- break;
- case IPL_TYPE_FCP_DUMP:
- reipl_method = REIPL_METHOD_FCP_DUMP;
+ reipl_block_actual = reipl_block_fcp;
break;
case IPL_TYPE_NSS:
- if (diag308_set_works)
- reipl_method = REIPL_METHOD_NSS_DIAG;
- else
- reipl_method = REIPL_METHOD_NSS;
- set_reipl_block_actual(reipl_block_nss);
- break;
- case IPL_TYPE_UNKNOWN:
- reipl_method = REIPL_METHOD_DEFAULT;
+ reipl_block_actual = reipl_block_nss;
break;
default:
- BUG();
+ break;
}
reipl_type = type;
return 0;
@@ -1018,77 +954,25 @@ static struct kobj_attribute reipl_type_attr =
static struct kset *reipl_kset;
static struct kset *reipl_fcp_kset;
-static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
- const enum ipl_method m)
-{
- char loadparm[LOADPARM_LEN + 1] = {};
- char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
- char nss_name[NSS_NAME_SIZE + 1] = {};
- size_t pos = 0;
-
- reipl_get_ascii_loadparm(loadparm, ipb);
- reipl_get_ascii_nss_name(nss_name, ipb);
- reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
-
- switch (m) {
- case REIPL_METHOD_CCW_VM:
- pos = sprintf(dst, "IPL %X CLEAR", ipb->ipl_info.ccw.devno);
- break;
- case REIPL_METHOD_NSS:
- pos = sprintf(dst, "IPL %s", nss_name);
- break;
- default:
- break;
- }
- if (strlen(loadparm) > 0)
- pos += sprintf(dst + pos, " LOADPARM '%s'", loadparm);
- if (strlen(vmparm) > 0)
- sprintf(dst + pos, " PARM %s", vmparm);
-}
-
static void __reipl_run(void *unused)
{
- struct ccw_dev_id devid;
- static char buf[128];
-
- switch (reipl_method) {
- case REIPL_METHOD_CCW_CIO:
- devid.ssid = reipl_block_ccw->ipl_info.ccw.ssid;
- devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
- reipl_ccw_dev(&devid);
- break;
- case REIPL_METHOD_CCW_VM:
- get_ipl_string(buf, reipl_block_ccw, REIPL_METHOD_CCW_VM);
- __cpcmd(buf, NULL, 0, NULL);
- break;
- case REIPL_METHOD_CCW_DIAG:
+ switch (reipl_type) {
+ case IPL_TYPE_CCW:
diag308(DIAG308_SET, reipl_block_ccw);
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
- case REIPL_METHOD_FCP_RW_DIAG:
+ case IPL_TYPE_FCP:
diag308(DIAG308_SET, reipl_block_fcp);
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
- case REIPL_METHOD_FCP_RO_DIAG:
- diag308(DIAG308_LOAD_CLEAR, NULL);
- break;
- case REIPL_METHOD_FCP_RO_VM:
- __cpcmd("IPL", NULL, 0, NULL);
- break;
- case REIPL_METHOD_NSS_DIAG:
+ case IPL_TYPE_NSS:
diag308(DIAG308_SET, reipl_block_nss);
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
- case REIPL_METHOD_NSS:
- get_ipl_string(buf, reipl_block_nss, REIPL_METHOD_NSS);
- __cpcmd(buf, NULL, 0, NULL);
- break;
- case REIPL_METHOD_DEFAULT:
- if (MACHINE_IS_VM)
- __cpcmd("IPL", NULL, 0, NULL);
+ case IPL_TYPE_UNKNOWN:
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
- case REIPL_METHOD_FCP_DUMP:
+ case IPL_TYPE_FCP_DUMP:
break;
}
disabled_wait((unsigned long) __builtin_return_address(0));
@@ -1119,7 +1003,7 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
/* VM PARM */
- if (MACHINE_IS_VM && diag308_set_works &&
+ if (MACHINE_IS_VM && ipl_block_valid &&
(ipl_block.ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID)) {
ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
@@ -1141,9 +1025,6 @@ static int __init reipl_nss_init(void)
if (!reipl_block_nss)
return -ENOMEM;
- if (!diag308_set_works)
- sys_reipl_nss_vmparm_attr.attr.mode = S_IRUGO;
-
rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group);
if (rc)
return rc;
@@ -1161,24 +1042,16 @@ static int __init reipl_ccw_init(void)
if (!reipl_block_ccw)
return -ENOMEM;
- if (MACHINE_IS_VM) {
- if (!diag308_set_works)
- sys_reipl_ccw_vmparm_attr.attr.mode = S_IRUGO;
- rc = sysfs_create_group(&reipl_kset->kobj,
- &reipl_ccw_attr_group_vm);
- } else {
- if(!diag308_set_works)
- sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
- rc = sysfs_create_group(&reipl_kset->kobj,
- &reipl_ccw_attr_group_lpar);
- }
+ rc = sysfs_create_group(&reipl_kset->kobj,
+ MACHINE_IS_VM ? &reipl_ccw_attr_group_vm
+ : &reipl_ccw_attr_group_lpar);
if (rc)
return rc;
reipl_block_ccw_init(reipl_block_ccw);
if (ipl_info.type == IPL_TYPE_CCW) {
- reipl_block_ccw->ipl_info.ccw.ssid = ipl_ssid;
- reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
+ reipl_block_ccw->ipl_info.ccw.ssid = ipl_block.ipl_info.ccw.ssid;
+ reipl_block_ccw->ipl_info.ccw.devno = ipl_block.ipl_info.ccw.devno;
reipl_block_ccw_fill_parms(reipl_block_ccw);
}
@@ -1190,14 +1063,6 @@ static int __init reipl_fcp_init(void)
{
int rc;
- if (!diag308_set_works) {
- if (ipl_info.type == IPL_TYPE_FCP) {
- make_attrs_ro(reipl_fcp_attrs);
- sys_reipl_fcp_scp_data_attr.attr.mode = S_IRUGO;
- } else
- return 0;
- }
-
reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_fcp)
return -ENOMEM;
@@ -1218,7 +1083,7 @@ static int __init reipl_fcp_init(void)
}
if (ipl_info.type == IPL_TYPE_FCP) {
- memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
+ memcpy(reipl_block_fcp, &ipl_block, sizeof(ipl_block));
/*
* Fix loadparm: There are systems where the (SCSI) LOADPARM
* is invalid in the SCSI IPL parameter block, so take it
@@ -1340,21 +1205,6 @@ static int dump_set_type(enum dump_type type)
{
if (!(dump_capabilities & type))
return -EINVAL;
- switch (type) {
- case DUMP_TYPE_CCW:
- if (diag308_set_works)
- dump_method = DUMP_METHOD_CCW_DIAG;
- else if (MACHINE_IS_VM)
- dump_method = DUMP_METHOD_CCW_VM;
- else
- dump_method = DUMP_METHOD_CCW_CIO;
- break;
- case DUMP_TYPE_FCP:
- dump_method = DUMP_METHOD_FCP_DIAG;
- break;
- default:
- dump_method = DUMP_METHOD_NONE;
- }
dump_type = type;
return 0;
}
@@ -1397,25 +1247,11 @@ static void diag308_dump(void *dump_block)
static void __dump_run(void *unused)
{
- struct ccw_dev_id devid;
- static char buf[100];
-
- switch (dump_method) {
- case DUMP_METHOD_CCW_CIO:
- devid.ssid = dump_block_ccw->ipl_info.ccw.ssid;
- devid.devno = dump_block_ccw->ipl_info.ccw.devno;
- reipl_ccw_dev(&devid);
- break;
- case DUMP_METHOD_CCW_VM:
- sprintf(buf, "STORE STATUS");
- __cpcmd(buf, NULL, 0, NULL);
- sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
- __cpcmd(buf, NULL, 0, NULL);
- break;
- case DUMP_METHOD_CCW_DIAG:
+ switch (dump_type) {
+ case DUMP_TYPE_CCW:
diag308_dump(dump_block_ccw);
break;
- case DUMP_METHOD_FCP_DIAG:
+ case DUMP_TYPE_FCP:
diag308_dump(dump_block_fcp);
break;
default:
@@ -1425,7 +1261,7 @@ static void __dump_run(void *unused)
static void dump_run(struct shutdown_trigger *trigger)
{
- if (dump_method == DUMP_METHOD_NONE)
+ if (dump_type == DUMP_TYPE_NONE)
return;
smp_send_stop();
smp_call_ipl_cpu(__dump_run, NULL);
@@ -1457,8 +1293,6 @@ static int __init dump_fcp_init(void)
if (!sclp_ipl_info.has_dump)
return 0; /* LDIPL DUMP is not installed */
- if (!diag308_set_works)
- return 0;
dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
if (!dump_block_fcp)
return -ENOMEM;
@@ -1516,18 +1350,9 @@ static void dump_reipl_run(struct shutdown_trigger *trigger)
dump_run(trigger);
}
-static int __init dump_reipl_init(void)
-{
- if (!diag308_set_works)
- return -EOPNOTSUPP;
- else
- return 0;
-}
-
static struct shutdown_action __refdata dump_reipl_action = {
.name = SHUTDOWN_ACTION_DUMP_REIPL_STR,
.fn = dump_reipl_run,
- .init = dump_reipl_init,
};
/*
@@ -1838,10 +1663,8 @@ static int __init s390_ipl_init(void)
* case the system is booted from HMC. Fortunately in this case
* READ SCP info provides the correct value.
*/
- if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 &&
- diag308_set_works)
- memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm,
- LOADPARM_LEN);
+ if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 && ipl_block_valid)
+ memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm, LOADPARM_LEN);
shutdown_actions_init();
shutdown_triggers_init();
return 0;
@@ -1921,19 +1744,20 @@ static struct notifier_block on_panic_nb = {
void __init setup_ipl(void)
{
+ BUILD_BUG_ON(sizeof(struct ipl_parameter_block) != PAGE_SIZE);
+
ipl_info.type = get_ipl_type();
switch (ipl_info.type) {
case IPL_TYPE_CCW:
- ipl_info.data.ccw.dev_id.ssid = ipl_ssid;
- ipl_info.data.ccw.dev_id.devno = ipl_devno;
+ ipl_info.data.ccw.dev_id.ssid = ipl_block.ipl_info.ccw.ssid;
+ ipl_info.data.ccw.dev_id.devno = ipl_block.ipl_info.ccw.devno;
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
ipl_info.data.fcp.dev_id.ssid = 0;
- ipl_info.data.fcp.dev_id.devno =
- IPL_PARMBLOCK_START->ipl_info.fcp.devno;
- ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
- ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
+ ipl_info.data.fcp.dev_id.devno = ipl_block.ipl_info.fcp.devno;
+ ipl_info.data.fcp.wwpn = ipl_block.ipl_info.fcp.wwpn;
+ ipl_info.data.fcp.lun = ipl_block.ipl_info.fcp.lun;
break;
case IPL_TYPE_NSS:
case IPL_TYPE_UNKNOWN:
@@ -1943,85 +1767,21 @@ void __init setup_ipl(void)
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
}
-void __init ipl_update_parameters(void)
+void __init ipl_store_parameters(void)
{
int rc;
rc = diag308(DIAG308_STORE, &ipl_block);
- if ((rc == DIAG308_RC_OK) || (rc == DIAG308_RC_NOCONFIG))
- diag308_set_works = 1;
-}
-
-void __init ipl_verify_parameters(void)
-{
- struct cio_iplinfo iplinfo;
-
- if (cio_get_iplinfo(&iplinfo))
- return;
-
- ipl_ssid = iplinfo.ssid;
- ipl_devno = iplinfo.devno;
- ipl_flags |= IPL_DEVNO_VALID;
- if (!iplinfo.is_qdio)
- return;
- ipl_flags |= IPL_PARMBLOCK_VALID;
-}
-
-static LIST_HEAD(rcall);
-static DEFINE_MUTEX(rcall_mutex);
-
-void register_reset_call(struct reset_call *reset)
-{
- mutex_lock(&rcall_mutex);
- list_add(&reset->list, &rcall);
- mutex_unlock(&rcall_mutex);
-}
-EXPORT_SYMBOL_GPL(register_reset_call);
-
-void unregister_reset_call(struct reset_call *reset)
-{
- mutex_lock(&rcall_mutex);
- list_del(&reset->list);
- mutex_unlock(&rcall_mutex);
-}
-EXPORT_SYMBOL_GPL(unregister_reset_call);
-
-static void do_reset_calls(void)
-{
- struct reset_call *reset;
-
- if (diag308_set_works) {
- diag308_reset();
- return;
- }
- list_for_each_entry(reset, &rcall, list)
- reset->fn();
+ if (rc == DIAG308_RC_OK && ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
+ ipl_block_valid = 1;
}
void s390_reset_system(void)
{
- struct lowcore *lc;
-
- lc = (struct lowcore *)(unsigned long) store_prefix();
-
- /* Stack for interrupt/machine check handler */
- lc->panic_stack = S390_lowcore.panic_stack;
-
/* Disable prefixing */
set_prefix(0);
/* Disable lowcore protection */
- __ctl_clear_bit(0,28);
-
- /* Set new machine check handler */
- S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
- S390_lowcore.mcck_new_psw.addr =
- (unsigned long) s390_base_mcck_handler;
-
- /* Set new program check handler */
- S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
- S390_lowcore.program_new_psw.addr =
- (unsigned long) s390_base_pgm_handler;
-
- do_reset_calls();
+ __ctl_clear_bit(0, 28);
+ diag308_reset();
}
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index a80050bbe2e4..b7020e721ae3 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -20,7 +20,6 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/smp.h>
-#include <asm/reset.h>
#include <asm/ipl.h>
#include <asm/diag.h>
#include <asm/elf.h>
@@ -253,6 +252,7 @@ void machine_shutdown(void)
void machine_crash_shutdown(struct pt_regs *regs)
{
+ set_os_info_reipl_block();
}
/*
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 14867ec5f726..f236ce8757e8 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -72,7 +72,7 @@ static int __init nospectre_v2_setup_early(char *str)
}
early_param("nospectre_v2", nospectre_v2_setup_early);
-static int __init spectre_v2_auto_early(void)
+void __init nospec_auto_detect(void)
{
if (IS_ENABLED(CC_USING_EXPOLINE)) {
/*
@@ -87,11 +87,7 @@ static int __init spectre_v2_auto_early(void)
* nobp setting decides what is done, this depends on the
* CONFIG_KERNEL_NP option and the nobp/nospec parameters.
*/
- return 0;
}
-#ifdef CONFIG_EXPOLINE_AUTO
-early_initcall(spectre_v2_auto_early);
-#endif
static int __init spectre_v2_setup_early(char *str)
{
@@ -102,7 +98,7 @@ static int __init spectre_v2_setup_early(char *str)
if (str && !strncmp(str, "off", 3))
nospec_disable = 1;
if (str && !strncmp(str, "auto", 4))
- spectre_v2_auto_early();
+ nospec_auto_detect();
return 0;
}
early_param("spectre_v2", spectre_v2_setup_early);
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index a40ebd1d29d0..73cc3750f0d3 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -75,90 +75,3 @@ ENTRY(store_status)
.align 8
.Lclkcmp: .quad 0x0000000000000000
.previous
-
-#
-# do_reipl_asm
-# Parameter: r2 = schid of reipl device
-#
-
-ENTRY(do_reipl_asm)
- basr %r13,0
-.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
-.Lpg1: lgr %r3,%r2
- larl %r2,.Lstatus
- brasl %r14,store_status
-
-.Lstatus: lctlg %c6,%c6,.Lall-.Lpg0(%r13)
- lgr %r1,%r2
- mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
- stsch .Lschib-.Lpg0(%r13)
- oi .Lschib+5-.Lpg0(%r13),0x84
-.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
- msch .Lschib-.Lpg0(%r13)
- lghi %r0,5
-.Lssch: ssch .Liplorb-.Lpg0(%r13)
- jz .L001
- brct %r0,.Lssch
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
-.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
-.Lcont: c %r1,__LC_SUBCHANNEL_ID
- jnz .Ltpi
- clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
- jnz .Ltpi
- tsch .Liplirb-.Lpg0(%r13)
- tm .Liplirb+9-.Lpg0(%r13),0xbf
- jz .L002
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
- jz .L003
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L003: st %r1,__LC_SUBCHANNEL_ID
- lhi %r1,0 # mode 0 = esa
- slr %r0,%r0 # set cpuid to zero
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode
- lpsw 0
-.Ldisab: sll %r14,1
- srl %r14,1 # need to kill hi bit to avoid specification exceptions.
- st %r14,.Ldispsw+12-.Lpg0(%r13)
- lpswe .Ldispsw-.Lpg0(%r13)
- .align 8
-.Lall: .quad 0x00000000ff000000
- .align 16
-/*
- * These addresses have to be 31 bit otherwise
- * the sigp will throw a specifcation exception
- * when switching to ESA mode as bit 31 be set
- * in the ESA psw.
- * Bit 31 of the addresses has to be 0 for the
- * 31bit lpswe instruction a fact they appear to have
- * omitted from the pop.
- */
-.Lnewpsw: .quad 0x0000000080000000
- .quad .Lpg1
-.Lpcnew: .quad 0x0000000080000000
- .quad .Lecs
-.Lionew: .quad 0x0000000080000000
- .quad .Lcont
-.Lwaitpsw: .quad 0x0202000080000000
- .quad .Ltpi
-.Ldispsw: .quad 0x0002000080000000
- .quad 0x0000000000000000
-.Liplccws: .long 0x02000000,0x60000018
- .long 0x08000008,0x20000001
-.Liplorb: .long 0x0049504c,0x0040ff80
- .long 0x00000000+.Liplccws
-.Lschib: .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
-.Liplirb: .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index 9c2c96da23d0..c97c2d40fe15 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -29,33 +29,6 @@
ENTRY(relocate_kernel)
basr %r13,0 # base address
.base:
- stctg %c0,%c15,ctlregs-.base(%r13)
- stmg %r0,%r15,gprregs-.base(%r13)
- lghi %r0,3
- sllg %r0,%r0,31
- stg %r0,0x1d0(%r0)
- la %r0,.back_pgm-.base(%r13)
- stg %r0,0x1d8(%r0)
- la %r1,load_psw-.base(%r13)
- mvc 0(8,%r0),0(%r1)
- la %r0,.back-.base(%r13)
- st %r0,4(%r0)
- oi 4(%r0),0x80
- lghi %r0,0
- diag %r0,%r0,0x308
- .back:
- lhi %r1,1 # mode 1 = esame
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode
- sam64 # switch to 64 bit addressing mode
- basr %r13,0
- .back_base:
- oi have_diag308-.back_base(%r13),0x01
- lctlg %c0,%c15,ctlregs-.back_base(%r13)
- lmg %r0,%r15,gprregs-.back_base(%r13)
- j .top
- .back_pgm:
- lmg %r0,%r15,gprregs-.base(%r13)
- .top:
lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7
lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9
lg %r5,0(%r2) # read another word for indirection page
@@ -64,55 +37,36 @@ ENTRY(relocate_kernel)
je .indir_check # NO, goto "indir_check"
lgr %r6,%r5 # r6 = r5
nill %r6,0xf000 # mask it out and...
- j .top # ...next iteration
+ j .base # ...next iteration
.indir_check:
tml %r5,0x2 # is it a indirection page?
je .done_test # NO, goto "done_test"
nill %r5,0xf000 # YES, mask out,
lgr %r2,%r5 # move it into the right register,
- j .top # and read next...
+ j .base # and read next...
.done_test:
tml %r5,0x4 # is it the done indicator?
je .source_test # NO! Well, then it should be the source indicator...
j .done # ok, lets finish it here...
.source_test:
tml %r5,0x8 # it should be a source indicator...
- je .top # NO, ignore it...
+ je .base # NO, ignore it...
lgr %r8,%r5 # r8 = r5
nill %r8,0xf000 # masking
0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
jo 0b
- j .top
+ j .base
.done:
sgr %r0,%r0 # clear register r0
la %r4,load_psw-.base(%r13) # load psw-address into the register
o %r3,4(%r4) # or load address into psw
st %r3,4(%r4)
mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
- tm have_diag308-.base(%r13),0x01
- jno .no_diag308
diag %r0,%r0,0x308
- .no_diag308:
- sam31 # 31 bit mode
- sr %r1,%r1 # erase register r1
- sr %r2,%r2 # erase register r2
- sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
- lpsw 0 # hopefully start new kernel...
.align 8
load_psw:
.long 0x00080000,0x80000000
- ctlregs:
- .rept 16
- .quad 0
- .endr
- gprregs:
- .rept 16
- .quad 0
- .endr
- have_diag308:
- .byte 0
- .align 8
relocate_kernel_end:
.align 8
.globl relocate_kernel_len
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7b58a712f818..fc3b4aa185cc 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -894,6 +894,9 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (unsigned long) _end;
+ if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
+ nospec_auto_detect();
+
parse_early_param();
#ifdef CONFIG_CRASH_DUMP
/* Deactivate elfcorehdr= kernel parameter */
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 05c8abd864f1..2809d11c7a28 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -220,6 +220,8 @@ static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
/*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP.
+ * Note a difference with get_user_pages_fast: this always returns the
+ * number of pages pinned, 0 if no pages were pinned.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
index 4feb7c86f4ac..46b2481eec90 100644
--- a/arch/sh/boards/of-generic.c
+++ b/arch/sh/boards/of-generic.c
@@ -126,12 +126,6 @@ static void __init sh_of_setup(char **cmdline_p)
{
struct device_node *root;
-#ifdef CONFIG_USE_BUILTIN_DTB
- unflatten_and_copy_device_tree();
-#else
- unflatten_device_tree();
-#endif
-
board_time_init = sh_of_time_init;
sh_mv.mv_name = "Unknown SH model";
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 5976a2c8a3e3..e5b7437ab4af 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -49,6 +49,8 @@ static void pcibios_scanbus(struct pci_channel *hose)
for (i = 0; i < hose->nr_resources; i++) {
res = hose->resources + i;
offset = 0;
+ if (res->flags & IORESOURCE_DISABLED)
+ continue;
if (res->flags & IORESOURCE_IO)
offset = hose->io_offset;
else if (res->flags & IORESOURCE_MEM)
@@ -102,6 +104,9 @@ int register_pci_controller(struct pci_channel *hose)
for (i = 0; i < hose->nr_resources; i++) {
struct resource *res = hose->resources + i;
+ if (res->flags & IORESOURCE_DISABLED)
+ continue;
+
if (res->flags & IORESOURCE_IO) {
if (request_resource(&ioport_resource, res) < 0)
goto out;
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
index 0167a7352719..382e7ecf4c82 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.c
+++ b/arch/sh/drivers/pci/pcie-sh7786.c
@@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/sh_clk.h>
#include <linux/sh_intc.h>
+#include <cpu/sh7786.h>
#include "pcie-sh7786.h"
#include <asm/sizes.h>
@@ -32,6 +33,7 @@ struct sh7786_pcie_port {
static struct sh7786_pcie_port *sh7786_pcie_ports;
static unsigned int nr_ports;
+static unsigned long dma_pfn_offset;
static struct sh7786_pcie_hwops {
int (*core_init)(void);
@@ -40,73 +42,73 @@ static struct sh7786_pcie_hwops {
static struct resource sh7786_pci0_resources[] = {
{
- .name = "PCIe0 IO",
+ .name = "PCIe0 MEM 0",
.start = 0xfd000000,
.end = 0xfd000000 + SZ_8M - 1,
- .flags = IORESOURCE_IO,
+ .flags = IORESOURCE_MEM,
}, {
- .name = "PCIe0 MEM 0",
+ .name = "PCIe0 MEM 1",
.start = 0xc0000000,
.end = 0xc0000000 + SZ_512M - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
}, {
- .name = "PCIe0 MEM 1",
+ .name = "PCIe0 MEM 2",
.start = 0x10000000,
.end = 0x10000000 + SZ_64M - 1,
.flags = IORESOURCE_MEM,
}, {
- .name = "PCIe0 MEM 2",
+ .name = "PCIe0 IO",
.start = 0xfe100000,
.end = 0xfe100000 + SZ_1M - 1,
- .flags = IORESOURCE_MEM,
+ .flags = IORESOURCE_IO,
},
};
static struct resource sh7786_pci1_resources[] = {
{
- .name = "PCIe1 IO",
+ .name = "PCIe1 MEM 0",
.start = 0xfd800000,
.end = 0xfd800000 + SZ_8M - 1,
- .flags = IORESOURCE_IO,
+ .flags = IORESOURCE_MEM,
}, {
- .name = "PCIe1 MEM 0",
+ .name = "PCIe1 MEM 1",
.start = 0xa0000000,
.end = 0xa0000000 + SZ_512M - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
}, {
- .name = "PCIe1 MEM 1",
+ .name = "PCIe1 MEM 2",
.start = 0x30000000,
.end = 0x30000000 + SZ_256M - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
}, {
- .name = "PCIe1 MEM 2",
+ .name = "PCIe1 IO",
.start = 0xfe300000,
.end = 0xfe300000 + SZ_1M - 1,
- .flags = IORESOURCE_MEM,
+ .flags = IORESOURCE_IO,
},
};
static struct resource sh7786_pci2_resources[] = {
{
- .name = "PCIe2 IO",
+ .name = "PCIe2 MEM 0",
.start = 0xfc800000,
.end = 0xfc800000 + SZ_4M - 1,
- .flags = IORESOURCE_IO,
+ .flags = IORESOURCE_MEM,
}, {
- .name = "PCIe2 MEM 0",
+ .name = "PCIe2 MEM 1",
.start = 0x80000000,
.end = 0x80000000 + SZ_512M - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
}, {
- .name = "PCIe2 MEM 1",
+ .name = "PCIe2 MEM 2",
.start = 0x20000000,
.end = 0x20000000 + SZ_256M - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
}, {
- .name = "PCIe2 MEM 2",
+ .name = "PCIe2 IO",
.start = 0xfcd00000,
.end = 0xfcd00000 + SZ_1M - 1,
- .flags = IORESOURCE_MEM,
+ .flags = IORESOURCE_IO,
},
};
@@ -301,7 +303,7 @@ static int __init pcie_init(struct sh7786_pcie_port *port)
{
struct pci_channel *chan = port->hose;
unsigned int data;
- phys_addr_t memphys;
+ phys_addr_t memstart, memend;
size_t memsize;
int ret, i, win;
@@ -357,15 +359,26 @@ static int __init pcie_init(struct sh7786_pcie_port *port)
data |= (0xff << 16);
pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
- memphys = __pa(memory_start);
- memsize = roundup_pow_of_two(memory_end - memory_start);
+ memstart = __pa(memory_start);
+ memend = __pa(memory_end);
+ memsize = roundup_pow_of_two(memend - memstart);
+
+ /*
+ * The start address must be aligned on its size. So we round
+ * it down, and then recalculate the size so that it covers
+ * the entire memory.
+ */
+ memstart = ALIGN_DOWN(memstart, memsize);
+ memsize = roundup_pow_of_two(memend - memstart);
+
+ dma_pfn_offset = memstart >> PAGE_SHIFT;
/*
* If there's more than 512MB of memory, we need to roll over to
* LAR1/LAMR1.
*/
if (memsize > SZ_512M) {
- pci_write_reg(chan, memphys + SZ_512M, SH4A_PCIELAR1);
+ pci_write_reg(chan, memstart + SZ_512M, SH4A_PCIELAR1);
pci_write_reg(chan, ((memsize - SZ_512M) - SZ_256) | 1,
SH4A_PCIELAMR1);
memsize = SZ_512M;
@@ -381,7 +394,7 @@ static int __init pcie_init(struct sh7786_pcie_port *port)
* LAR0/LAMR0 covers up to the first 512MB, which is enough to
* cover all of lowmem on most platforms.
*/
- pci_write_reg(chan, memphys, SH4A_PCIELAR0);
+ pci_write_reg(chan, memstart, SH4A_PCIELAR0);
pci_write_reg(chan, (memsize - SZ_256) | 1, SH4A_PCIELAMR0);
/* Finish initialization */
@@ -438,6 +451,9 @@ static int __init pcie_init(struct sh7786_pcie_port *port)
* mode, so just skip them entirely.
*/
if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode())
+ res->flags |= IORESOURCE_DISABLED;
+
+ if (res->flags & IORESOURCE_DISABLED)
continue;
pci_write_reg(chan, 0x00000000, SH4A_PCIEPTCTLR(win));
@@ -472,6 +488,11 @@ int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
return evt2irq(0xae0);
}
+void pcibios_bus_add_device(struct pci_dev *pdev)
+{
+ pdev->dev.dma_pfn_offset = dma_pfn_offset;
+}
+
static int __init sh7786_pcie_core_init(void)
{
/* Return the number of ports */
@@ -527,6 +548,7 @@ static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
static int __init sh7786_pcie_init(void)
{
struct clk *platclk;
+ u32 mm_sel;
int i;
printk(KERN_NOTICE "PCI: Starting initialization.\n");
@@ -560,6 +582,16 @@ static int __init sh7786_pcie_init(void)
clk_enable(platclk);
+ mm_sel = sh7786_mm_sel();
+
+ /*
+ * Depending on the MMSELR register value, the PCIe0 MEM 1
+ * area may not be available. See Table 13.11 of the SH7786
+ * datasheet.
+ */
+ if (mm_sel != 1 && mm_sel != 2 && mm_sel != 5 && mm_sel != 6)
+ sh7786_pci0_resources[2].flags |= IORESOURCE_DISABLED;
+
printk(KERN_NOTICE "PCI: probing %d ports.\n", nr_ports);
for (i = 0; i < nr_ports; i++) {
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
index 15bf07bfa96b..6d192f4908a7 100644
--- a/arch/sh/include/asm/futex.h
+++ b/arch/sh/include/asm/futex.h
@@ -37,10 +37,7 @@ static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
pagefault_disable();
do {
- if (op == FUTEX_OP_SET)
- ret = oldval = 0;
- else
- ret = get_user(oldval, uaddr);
+ ret = get_user(oldval, uaddr);
if (ret) break;
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h
index 0df09e638f09..96b8cb1f754a 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7786.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h
@@ -14,6 +14,8 @@
#ifndef __CPU_SH7786_H__
#define __CPU_SH7786_H__
+#include <linux/io.h>
+
enum {
/* PA */
GPIO_PA7, GPIO_PA6, GPIO_PA5, GPIO_PA4,
@@ -131,4 +133,9 @@ enum {
GPIO_FN_IRL7, GPIO_FN_IRL6, GPIO_FN_IRL5, GPIO_FN_IRL4,
};
+static inline u32 sh7786_mm_sel(void)
+{
+ return __raw_readl(0xFC400020) & 0x7;
+}
+
#endif /* __CPU_SH7786_H__ */
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
index 62b485107eae..178457d7620c 100644
--- a/arch/sh/kernel/dma-nommu.c
+++ b/arch/sh/kernel/dma-nommu.c
@@ -16,7 +16,8 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- dma_addr_t addr = page_to_phys(page) + offset;
+ dma_addr_t addr = page_to_phys(page) + offset
+ - PFN_PHYS(dev->dma_pfn_offset);
WARN_ON(size == 0);
@@ -36,12 +37,14 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
WARN_ON(nents == 0 || sg[0].length == 0);
for_each_sg(sg, s, nents, i) {
+ dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset);
+
BUG_ON(!sg_page(s));
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
sh_sync_dma_for_device(sg_virt(s), s->length, dir);
- s->dma_address = sg_phys(s);
+ s->dma_address = sg_phys(s) - offset;
s->dma_length = s->length;
}
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index c001f782c5f1..28cc61216b64 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -255,7 +255,7 @@ debug_trap:
mov.l @r8, r8
jsr @r8
nop
- bra __restore_all
+ bra ret_from_exception
nop
CFI_ENDPROC
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index b95c411d0333..d34e998b809f 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -330,6 +330,14 @@ void __init setup_arch(char **cmdline_p)
/* Let earlyprintk output early console messages */
early_platform_driver_probe("earlyprintk", 1, 1);
+#ifdef CONFIG_OF_FLATTREE
+#ifdef CONFIG_USE_BUILTIN_DTB
+ unflatten_and_copy_device_tree();
+#else
+ unflatten_device_tree();
+#endif
+#endif
+
paging_init();
#ifdef CONFIG_DUMMY_CONSOLE
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 6ea3aab508f2..8ce98691d822 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -59,7 +59,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
- *dma_handle = virt_to_phys(ret);
+ *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset);
return ret_nocache;
}
@@ -69,7 +69,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
int order = get_order(size);
- unsigned long pfn = dma_handle >> PAGE_SHIFT;
+ unsigned long pfn = (dma_handle >> PAGE_SHIFT) + dev->dma_pfn_offset;
int k;
for (k = 0; k < (1 << order); k++)
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
index 8045b5bb7075..56c86ca98ecf 100644
--- a/arch/sh/mm/gup.c
+++ b/arch/sh/mm/gup.c
@@ -160,6 +160,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
/*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP.
+ * Note a difference with get_user_pages_fast: this always returns the
+ * number of pages pinned, 0 if no pages were pinned.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 357b6047653a..aee6dba83d0e 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -193,6 +193,10 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
return 1;
}
+/*
+ * Note a difference with get_user_pages_fast: this always returns the
+ * number of pages pinned, 0 if no pages were pinned.
+ */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index a6615d8864f7..dd0b5a92ffd0 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -29,9 +29,7 @@ vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c)
vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg)
obj-y += $(vdso_img_objs)
targets += $(vdso_img_cfiles)
-targets += $(vdso_img_sodbg)
-.SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c) \
- $(vdso_img-y:%=$(obj)/vdso%.so)
+targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
export CPPFLAGS_vdso.lds += -P -C
diff --git a/arch/um/Kconfig.net b/arch/um/Kconfig.net
index e871af24d9cd..c390f3deb0dc 100644
--- a/arch/um/Kconfig.net
+++ b/arch/um/Kconfig.net
@@ -109,6 +109,17 @@ config UML_NET_DAEMON
more than one without conflict. If you don't need UML networking,
say N.
+config UML_NET_VECTOR
+ bool "Vector I/O high performance network devices"
+ depends on UML_NET
+ help
+ This User-Mode Linux network driver uses multi-message send
+ and receive functions. The host running the UML guest must have
+ a linux kernel version above 3.0 and a libc version > 2.13.
+ This driver provides tap, raw, gre and l2tpv3 network transports
+ with up to 4 times higher network throughput than the UML network
+ drivers.
+
config UML_NET_VDE
bool "VDE transport"
depends on UML_NET
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index e7582e1d248c..16b3cebddafb 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -9,6 +9,7 @@
slip-objs := slip_kern.o slip_user.o
slirp-objs := slirp_kern.o slirp_user.o
daemon-objs := daemon_kern.o daemon_user.o
+vector-objs := vector_kern.o vector_user.o vector_transports.o
umcast-objs := umcast_kern.o umcast_user.o
net-objs := net_kern.o net_user.o
mconsole-objs := mconsole_kern.o mconsole_user.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_STDERR_CONSOLE) += stderr_console.o
obj-$(CONFIG_UML_NET_SLIP) += slip.o slip_common.o
obj-$(CONFIG_UML_NET_SLIRP) += slirp.o slip_common.o
obj-$(CONFIG_UML_NET_DAEMON) += daemon.o
+obj-$(CONFIG_UML_NET_VECTOR) += vector.o
obj-$(CONFIG_UML_NET_VDE) += vde.o
obj-$(CONFIG_UML_NET_MCAST) += umcast.o
obj-$(CONFIG_UML_NET_PCAP) += pcap.o
@@ -61,7 +63,7 @@ obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o
obj-$(CONFIG_UML_RANDOM) += random.o
# pcap_user.o must be added explicitly.
-USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o pcap_user.o vde_user.o
+USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o pcap_user.o vde_user.o vector_user.o
CFLAGS_null.o = -DDEV_NULL=$(DEV_NULL_PATH)
include arch/um/scripts/Makefile.rules
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index acbe6c67afba..05588f9466c7 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -171,56 +171,19 @@ int enable_chan(struct line *line)
return err;
}
-/* Items are added in IRQ context, when free_irq can't be called, and
- * removed in process context, when it can.
- * This handles interrupt sources which disappear, and which need to
- * be permanently disabled. This is discovered in IRQ context, but
- * the freeing of the IRQ must be done later.
- */
-static DEFINE_SPINLOCK(irqs_to_free_lock);
-static LIST_HEAD(irqs_to_free);
-
-void free_irqs(void)
-{
- struct chan *chan;
- LIST_HEAD(list);
- struct list_head *ele;
- unsigned long flags;
-
- spin_lock_irqsave(&irqs_to_free_lock, flags);
- list_splice_init(&irqs_to_free, &list);
- spin_unlock_irqrestore(&irqs_to_free_lock, flags);
-
- list_for_each(ele, &list) {
- chan = list_entry(ele, struct chan, free_list);
-
- if (chan->input && chan->enabled)
- um_free_irq(chan->line->driver->read_irq, chan);
- if (chan->output && chan->enabled)
- um_free_irq(chan->line->driver->write_irq, chan);
- chan->enabled = 0;
- }
-}
-
static void close_one_chan(struct chan *chan, int delay_free_irq)
{
- unsigned long flags;
-
if (!chan->opened)
return;
- if (delay_free_irq) {
- spin_lock_irqsave(&irqs_to_free_lock, flags);
- list_add(&chan->free_list, &irqs_to_free);
- spin_unlock_irqrestore(&irqs_to_free_lock, flags);
- }
- else {
- if (chan->input && chan->enabled)
- um_free_irq(chan->line->driver->read_irq, chan);
- if (chan->output && chan->enabled)
- um_free_irq(chan->line->driver->write_irq, chan);
- chan->enabled = 0;
- }
+ /* we can safely call free now - it will be marked
+ * as free and freed once the IRQ stopped processing
+ */
+ if (chan->input && chan->enabled)
+ um_free_irq(chan->line->driver->read_irq, chan);
+ if (chan->output && chan->enabled)
+ um_free_irq(chan->line->driver->write_irq, chan);
+ chan->enabled = 0;
if (chan->ops->close != NULL)
(*chan->ops->close)(chan->fd, chan->data);
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 366e57f5e8d6..8d80b27502e6 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -284,7 +284,7 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
if (err)
return err;
if (output)
- err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
+ err = um_request_irq(driver->write_irq, fd, IRQ_NONE,
line_write_interrupt, IRQF_SHARED,
driver->write_irq_name, data);
return err;
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index b305f8247909..3ef1b48e064a 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -288,7 +288,7 @@ static void uml_net_user_timer_expire(struct timer_list *t)
#endif
}
-static void setup_etheraddr(struct net_device *dev, char *str)
+void uml_net_setup_etheraddr(struct net_device *dev, char *str)
{
unsigned char *addr = dev->dev_addr;
char *end;
@@ -412,7 +412,7 @@ static void eth_configure(int n, void *init, char *mac,
*/
snprintf(dev->name, sizeof(dev->name), "eth%d", n);
- setup_etheraddr(dev, mac);
+ uml_net_setup_etheraddr(dev, mac);
printk(KERN_INFO "Netdevice %d (%pM) : ", n, dev->dev_addr);
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index 37c51a6be690..778a0e52d5a5 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -13,6 +13,7 @@
#include <linux/miscdevice.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
+#include <init.h>
#include <irq_kern.h>
#include <os.h>
@@ -154,7 +155,14 @@ err_out_cleanup_hw:
/*
* rng_cleanup - shutdown RNG module
*/
-static void __exit rng_cleanup (void)
+
+static void cleanup(void)
+{
+ free_irq_by_fd(random_fd);
+ os_close_file(random_fd);
+}
+
+static void __exit rng_cleanup(void)
{
os_close_file(random_fd);
misc_deregister (&rng_miscdev);
@@ -162,6 +170,7 @@ static void __exit rng_cleanup (void)
module_init (rng_init);
module_exit (rng_cleanup);
+__uml_exitcall(cleanup);
MODULE_DESCRIPTION("UML Host Random Number Generator (RNG) driver");
MODULE_LICENSE("GPL");
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index b55fe9bf5d3e..d4e8c497ae86 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1587,11 +1587,11 @@ int io_thread(void *arg)
do {
res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n);
- if (res > 0) {
+ if (res >= 0) {
written += res;
} else {
if (res != -EAGAIN) {
- printk("io_thread - read failed, fd = %d, "
+ printk("io_thread - write failed, fd = %d, "
"err = %d\n", kernel_fd, -n);
}
}
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
new file mode 100644
index 000000000000..02168fe25105
--- /dev/null
+++ b/arch/um/drivers/vector_kern.c
@@ -0,0 +1,1633 @@
+/*
+ * Copyright (C) 2017 - Cambridge Greys Limited
+ * Copyright (C) 2011 - 2014 Cisco Systems Inc
+ * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
+ * James Leu (jleu@mindspring.net).
+ * Copyright (C) 2001 by various other people who didn't put their name here.
+ * Licensed under the GPL.
+ */
+
+#include <linux/version.h>
+#include <linux/bootmem.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <init.h>
+#include <irq_kern.h>
+#include <irq_user.h>
+#include <net_kern.h>
+#include <os.h>
+#include "mconsole_kern.h"
+#include "vector_user.h"
+#include "vector_kern.h"
+
+/*
+ * Adapted from network devices with the following major changes:
+ * All transports are static - simplifies the code significantly
+ * Multiple FDs/IRQs per device
+ * Vector IO optionally used for read/write, falling back to legacy
+ * based on configuration and/or availability
+ * Configuration is no longer positional - L2TPv3 and GRE require up to
+ * 10 parameters, passing this as positional is not fit for purpose.
+ * Only socket transports are supported
+ */
+
+
+#define DRIVER_NAME "uml-vector"
+#define DRIVER_VERSION "01"
+struct vector_cmd_line_arg {
+ struct list_head list;
+ int unit;
+ char *arguments;
+};
+
+struct vector_device {
+ struct list_head list;
+ struct net_device *dev;
+ struct platform_device pdev;
+ int unit;
+ int opened;
+};
+
+static LIST_HEAD(vec_cmd_line);
+
+static DEFINE_SPINLOCK(vector_devices_lock);
+static LIST_HEAD(vector_devices);
+
+static int driver_registered;
+
+static void vector_eth_configure(int n, struct arglist *def);
+
+/* Argument accessors to set variables (and/or set default values)
+ * mtu, buffer sizing, default headroom, etc
+ */
+
+#define DEFAULT_HEADROOM 2
+#define SAFETY_MARGIN 32
+#define DEFAULT_VECTOR_SIZE 64
+#define TX_SMALL_PACKET 128
+#define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
+
+static const struct {
+ const char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "rx_queue_max" },
+ { "rx_queue_running_average" },
+ { "tx_queue_max" },
+ { "tx_queue_running_average" },
+ { "rx_encaps_errors" },
+ { "tx_timeout_count" },
+ { "tx_restart_queue" },
+ { "tx_kicks" },
+ { "tx_flow_control_xon" },
+ { "tx_flow_control_xoff" },
+ { "rx_csum_offload_good" },
+ { "rx_csum_offload_errors"},
+ { "sg_ok"},
+ { "sg_linearized"},
+};
+
+#define VECTOR_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
+
+static void vector_reset_stats(struct vector_private *vp)
+{
+ vp->estats.rx_queue_max = 0;
+ vp->estats.rx_queue_running_average = 0;
+ vp->estats.tx_queue_max = 0;
+ vp->estats.tx_queue_running_average = 0;
+ vp->estats.rx_encaps_errors = 0;
+ vp->estats.tx_timeout_count = 0;
+ vp->estats.tx_restart_queue = 0;
+ vp->estats.tx_kicks = 0;
+ vp->estats.tx_flow_control_xon = 0;
+ vp->estats.tx_flow_control_xoff = 0;
+ vp->estats.sg_ok = 0;
+ vp->estats.sg_linearized = 0;
+}
+
+static int get_mtu(struct arglist *def)
+{
+ char *mtu = uml_vector_fetch_arg(def, "mtu");
+ long result;
+
+ if (mtu != NULL) {
+ if (kstrtoul(mtu, 10, &result) == 0)
+ return result;
+ }
+ return ETH_MAX_PACKET;
+}
+
+static int get_depth(struct arglist *def)
+{
+ char *mtu = uml_vector_fetch_arg(def, "depth");
+ long result;
+
+ if (mtu != NULL) {
+ if (kstrtoul(mtu, 10, &result) == 0)
+ return result;
+ }
+ return DEFAULT_VECTOR_SIZE;
+}
+
+static int get_headroom(struct arglist *def)
+{
+ char *mtu = uml_vector_fetch_arg(def, "headroom");
+ long result;
+
+ if (mtu != NULL) {
+ if (kstrtoul(mtu, 10, &result) == 0)
+ return result;
+ }
+ return DEFAULT_HEADROOM;
+}
+
+static int get_req_size(struct arglist *def)
+{
+ char *gro = uml_vector_fetch_arg(def, "gro");
+ long result;
+
+ if (gro != NULL) {
+ if (kstrtoul(gro, 10, &result) == 0) {
+ if (result > 0)
+ return 65536;
+ }
+ }
+ return get_mtu(def) + ETH_HEADER_OTHER +
+ get_headroom(def) + SAFETY_MARGIN;
+}
+
+
+static int get_transport_options(struct arglist *def)
+{
+ char *transport = uml_vector_fetch_arg(def, "transport");
+ char *vector = uml_vector_fetch_arg(def, "vec");
+
+ int vec_rx = VECTOR_RX;
+ int vec_tx = VECTOR_TX;
+ long parsed;
+
+ if (vector != NULL) {
+ if (kstrtoul(vector, 10, &parsed) == 0) {
+ if (parsed == 0) {
+ vec_rx = 0;
+ vec_tx = 0;
+ }
+ }
+ }
+
+
+ if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
+ return (vec_rx | VECTOR_BPF);
+ if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
+ return (vec_rx | vec_tx);
+ return (vec_rx | vec_tx);
+}
+
+
+/* A mini-buffer for packet drop read
+ * All of our supported transports are datagram oriented and we always
+ * read using recvmsg or recvmmsg. If we pass a buffer which is smaller
+ * than the packet size it still counts as full packet read and will
+ * clean the incoming stream to keep sigio/epoll happy
+ */
+
+#define DROP_BUFFER_SIZE 32
+
+static char *drop_buffer;
+
+/* Array backed queues optimized for bulk enqueue/dequeue and
+ * 1:N (small values of N) or 1:1 enqueuer/dequeuer ratios.
+ * For more details and full design rationale see
+ * http://foswiki.cambridgegreys.com/Main/EatYourTailAndEnjoyIt
+ */
+
+
+/*
+ * Advance the mmsg queue head by n = advance. Resets the queue to
+ * maximum enqueue/dequeue-at-once capacity if possible. Called by
+ * dequeuers. Caller must hold the head_lock!
+ */
+
+static int vector_advancehead(struct vector_queue *qi, int advance)
+{
+ int queue_depth;
+
+ qi->head =
+ (qi->head + advance)
+ % qi->max_depth;
+
+
+ spin_lock(&qi->tail_lock);
+ qi->queue_depth -= advance;
+
+ /* we are at 0, use this to
+ * reset head and tail so we can use max size vectors
+ */
+
+ if (qi->queue_depth == 0) {
+ qi->head = 0;
+ qi->tail = 0;
+ }
+ queue_depth = qi->queue_depth;
+ spin_unlock(&qi->tail_lock);
+ return queue_depth;
+}
+
+/* Advance the queue tail by n = advance.
+ * This is called by enqueuers which should hold the
+ * head lock already
+ */
+
+static int vector_advancetail(struct vector_queue *qi, int advance)
+{
+ int queue_depth;
+
+ qi->tail =
+ (qi->tail + advance)
+ % qi->max_depth;
+ spin_lock(&qi->head_lock);
+ qi->queue_depth += advance;
+ queue_depth = qi->queue_depth;
+ spin_unlock(&qi->head_lock);
+ return queue_depth;
+}
+
+static int prep_msg(struct vector_private *vp,
+ struct sk_buff *skb,
+ struct iovec *iov)
+{
+ int iov_index = 0;
+ int nr_frags, frag;
+ skb_frag_t *skb_frag;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (nr_frags > MAX_IOV_SIZE) {
+ if (skb_linearize(skb) != 0)
+ goto drop;
+ }
+ if (vp->header_size > 0) {
+ iov[iov_index].iov_len = vp->header_size;
+ vp->form_header(iov[iov_index].iov_base, skb, vp);
+ iov_index++;
+ }
+ iov[iov_index].iov_base = skb->data;
+ if (nr_frags > 0) {
+ iov[iov_index].iov_len = skb->len - skb->data_len;
+ vp->estats.sg_ok++;
+ } else
+ iov[iov_index].iov_len = skb->len;
+ iov_index++;
+ for (frag = 0; frag < nr_frags; frag++) {
+ skb_frag = &skb_shinfo(skb)->frags[frag];
+ iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
+ iov[iov_index].iov_len = skb_frag_size(skb_frag);
+ iov_index++;
+ }
+ return iov_index;
+drop:
+ return -1;
+}
+/*
+ * Generic vector enqueue with support for forming headers using transport
+ * specific callback. Allows GRE, L2TPv3, RAW and other transports
+ * to use a common enqueue procedure in vector mode
+ */
+
+static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
+{
+ struct vector_private *vp = netdev_priv(qi->dev);
+ int queue_depth;
+ int packet_len;
+ struct mmsghdr *mmsg_vector = qi->mmsg_vector;
+ int iov_count;
+
+ spin_lock(&qi->tail_lock);
+ spin_lock(&qi->head_lock);
+ queue_depth = qi->queue_depth;
+ spin_unlock(&qi->head_lock);
+
+ if (skb)
+ packet_len = skb->len;
+
+ if (queue_depth < qi->max_depth) {
+
+ *(qi->skbuff_vector + qi->tail) = skb;
+ mmsg_vector += qi->tail;
+ iov_count = prep_msg(
+ vp,
+ skb,
+ mmsg_vector->msg_hdr.msg_iov
+ );
+ if (iov_count < 1)
+ goto drop;
+ mmsg_vector->msg_hdr.msg_iovlen = iov_count;
+ mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
+ mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
+ queue_depth = vector_advancetail(qi, 1);
+ } else
+ goto drop;
+ spin_unlock(&qi->tail_lock);
+ return queue_depth;
+drop:
+ qi->dev->stats.tx_dropped++;
+ if (skb != NULL) {
+ packet_len = skb->len;
+ dev_consume_skb_any(skb);
+ netdev_completed_queue(qi->dev, 1, packet_len);
+ }
+ spin_unlock(&qi->tail_lock);
+ return queue_depth;
+}
+
+static int consume_vector_skbs(struct vector_queue *qi, int count)
+{
+ struct sk_buff *skb;
+ int skb_index;
+ int bytes_compl = 0;
+
+ for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) {
+ skb = *(qi->skbuff_vector + skb_index);
+ /* mark as empty to ensure correct destruction if
+ * needed
+ */
+ bytes_compl += skb->len;
+ *(qi->skbuff_vector + skb_index) = NULL;
+ dev_consume_skb_any(skb);
+ }
+ qi->dev->stats.tx_bytes += bytes_compl;
+ qi->dev->stats.tx_packets += count;
+ netdev_completed_queue(qi->dev, count, bytes_compl);
+ return vector_advancehead(qi, count);
+}
+
+/*
+ * Generic vector deque via sendmmsg with support for forming headers
+ * using transport specific callback. Allows GRE, L2TPv3, RAW and
+ * other transports to use a common dequeue procedure in vector mode
+ */
+
+
+static int vector_send(struct vector_queue *qi)
+{
+ struct vector_private *vp = netdev_priv(qi->dev);
+ struct mmsghdr *send_from;
+ int result = 0, send_len, queue_depth = qi->max_depth;
+
+ if (spin_trylock(&qi->head_lock)) {
+ if (spin_trylock(&qi->tail_lock)) {
+ /* update queue_depth to current value */
+ queue_depth = qi->queue_depth;
+ spin_unlock(&qi->tail_lock);
+ while (queue_depth > 0) {
+ /* Calculate the start of the vector */
+ send_len = queue_depth;
+ send_from = qi->mmsg_vector;
+ send_from += qi->head;
+ /* Adjust vector size if wraparound */
+ if (send_len + qi->head > qi->max_depth)
+ send_len = qi->max_depth - qi->head;
+ /* Try to TX as many packets as possible */
+ if (send_len > 0) {
+ result = uml_vector_sendmmsg(
+ vp->fds->tx_fd,
+ send_from,
+ send_len,
+ 0
+ );
+ vp->in_write_poll =
+ (result != send_len);
+ }
+ /* For some of the sendmmsg error scenarios
+ * we may end being unsure in the TX success
+ * for all packets. It is safer to declare
+ * them all TX-ed and blame the network.
+ */
+ if (result < 0) {
+ if (net_ratelimit())
+ netdev_err(vp->dev, "sendmmsg err=%i\n",
+ result);
+ result = send_len;
+ }
+ if (result > 0) {
+ queue_depth =
+ consume_vector_skbs(qi, result);
+ /* This is equivalent to an TX IRQ.
+ * Restart the upper layers to feed us
+ * more packets.
+ */
+ if (result > vp->estats.tx_queue_max)
+ vp->estats.tx_queue_max = result;
+ vp->estats.tx_queue_running_average =
+ (vp->estats.tx_queue_running_average + result) >> 1;
+ }
+ netif_trans_update(qi->dev);
+ netif_wake_queue(qi->dev);
+ /* if TX is busy, break out of the send loop,
+ * poll write IRQ will reschedule xmit for us
+ */
+ if (result != send_len) {
+ vp->estats.tx_restart_queue++;
+ break;
+ }
+ }
+ }
+ spin_unlock(&qi->head_lock);
+ } else {
+ tasklet_schedule(&vp->tx_poll);
+ }
+ return queue_depth;
+}
+
+/* Queue destructor. Deliberately stateless so we can use
+ * it in queue cleanup if initialization fails.
+ */
+
+static void destroy_queue(struct vector_queue *qi)
+{
+ int i;
+ struct iovec *iov;
+ struct vector_private *vp = netdev_priv(qi->dev);
+ struct mmsghdr *mmsg_vector;
+
+ if (qi == NULL)
+ return;
+ /* deallocate any skbuffs - we rely on any unused to be
+ * set to NULL.
+ */
+ if (qi->skbuff_vector != NULL) {
+ for (i = 0; i < qi->max_depth; i++) {
+ if (*(qi->skbuff_vector + i) != NULL)
+ dev_kfree_skb_any(*(qi->skbuff_vector + i));
+ }
+ kfree(qi->skbuff_vector);
+ }
+ /* deallocate matching IOV structures including header buffs */
+ if (qi->mmsg_vector != NULL) {
+ mmsg_vector = qi->mmsg_vector;
+ for (i = 0; i < qi->max_depth; i++) {
+ iov = mmsg_vector->msg_hdr.msg_iov;
+ if (iov != NULL) {
+ if ((vp->header_size > 0) &&
+ (iov->iov_base != NULL))
+ kfree(iov->iov_base);
+ kfree(iov);
+ }
+ mmsg_vector++;
+ }
+ kfree(qi->mmsg_vector);
+ }
+ kfree(qi);
+}
+
+/*
+ * Queue constructor. Create a queue with a given side.
+ */
+static struct vector_queue *create_queue(
+ struct vector_private *vp,
+ int max_size,
+ int header_size,
+ int num_extra_frags)
+{
+ struct vector_queue *result;
+ int i;
+ struct iovec *iov;
+ struct mmsghdr *mmsg_vector;
+
+ result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL);
+ if (result == NULL)
+ goto out_fail;
+ result->max_depth = max_size;
+ result->dev = vp->dev;
+ result->mmsg_vector = kmalloc(
+ (sizeof(struct mmsghdr) * max_size), GFP_KERNEL);
+ result->skbuff_vector = kmalloc(
+ (sizeof(void *) * max_size), GFP_KERNEL);
+ if (result->mmsg_vector == NULL || result->skbuff_vector == NULL)
+ goto out_fail;
+
+ mmsg_vector = result->mmsg_vector;
+ for (i = 0; i < max_size; i++) {
+ /* Clear all pointers - we use non-NULL as marking on
+ * what to free on destruction
+ */
+ *(result->skbuff_vector + i) = NULL;
+ mmsg_vector->msg_hdr.msg_iov = NULL;
+ mmsg_vector++;
+ }
+ mmsg_vector = result->mmsg_vector;
+ result->max_iov_frags = num_extra_frags;
+ for (i = 0; i < max_size; i++) {
+ if (vp->header_size > 0)
+ iov = kmalloc(
+ sizeof(struct iovec) * (3 + num_extra_frags),
+ GFP_KERNEL
+ );
+ else
+ iov = kmalloc(
+ sizeof(struct iovec) * (2 + num_extra_frags),
+ GFP_KERNEL
+ );
+ if (iov == NULL)
+ goto out_fail;
+ mmsg_vector->msg_hdr.msg_iov = iov;
+ mmsg_vector->msg_hdr.msg_iovlen = 1;
+ mmsg_vector->msg_hdr.msg_control = NULL;
+ mmsg_vector->msg_hdr.msg_controllen = 0;
+ mmsg_vector->msg_hdr.msg_flags = MSG_DONTWAIT;
+ mmsg_vector->msg_hdr.msg_name = NULL;
+ mmsg_vector->msg_hdr.msg_namelen = 0;
+ if (vp->header_size > 0) {
+ iov->iov_base = kmalloc(header_size, GFP_KERNEL);
+ if (iov->iov_base == NULL)
+ goto out_fail;
+ iov->iov_len = header_size;
+ mmsg_vector->msg_hdr.msg_iovlen = 2;
+ iov++;
+ }
+ iov->iov_base = NULL;
+ iov->iov_len = 0;
+ mmsg_vector++;
+ }
+ spin_lock_init(&result->head_lock);
+ spin_lock_init(&result->tail_lock);
+ result->queue_depth = 0;
+ result->head = 0;
+ result->tail = 0;
+ return result;
+out_fail:
+ destroy_queue(result);
+ return NULL;
+}
+
+/*
+ * We do not use the RX queue as a proper wraparound queue for now
+ * This is not necessary because the consumption via netif_rx()
+ * happens in-line. While we can try using the return code of
+ * netif_rx() for flow control there are no drivers doing this today.
+ * For this RX specific use we ignore the tail/head locks and
+ * just read into a prepared queue filled with skbuffs.
+ */
+
+static struct sk_buff *prep_skb(
+ struct vector_private *vp,
+ struct user_msghdr *msg)
+{
+ int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN;
+ struct sk_buff *result;
+ int iov_index = 0, len;
+ struct iovec *iov = msg->msg_iov;
+ int err, nr_frags, frag;
+ skb_frag_t *skb_frag;
+
+ if (vp->req_size <= linear)
+ len = linear;
+ else
+ len = vp->req_size;
+ result = alloc_skb_with_frags(
+ linear,
+ len - vp->max_packet,
+ 3,
+ &err,
+ GFP_ATOMIC
+ );
+ if (vp->header_size > 0)
+ iov_index++;
+ if (result == NULL) {
+ iov[iov_index].iov_base = NULL;
+ iov[iov_index].iov_len = 0;
+ goto done;
+ }
+ skb_reserve(result, vp->headroom);
+ result->dev = vp->dev;
+ skb_put(result, vp->max_packet);
+ result->data_len = len - vp->max_packet;
+ result->len += len - vp->max_packet;
+ skb_reset_mac_header(result);
+ result->ip_summed = CHECKSUM_NONE;
+ iov[iov_index].iov_base = result->data;
+ iov[iov_index].iov_len = vp->max_packet;
+ iov_index++;
+
+ nr_frags = skb_shinfo(result)->nr_frags;
+ for (frag = 0; frag < nr_frags; frag++) {
+ skb_frag = &skb_shinfo(result)->frags[frag];
+ iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
+ if (iov[iov_index].iov_base != NULL)
+ iov[iov_index].iov_len = skb_frag_size(skb_frag);
+ else
+ iov[iov_index].iov_len = 0;
+ iov_index++;
+ }
+done:
+ msg->msg_iovlen = iov_index;
+ return result;
+}
+
+
+/* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs*/
+
+static void prep_queue_for_rx(struct vector_queue *qi)
+{
+ struct vector_private *vp = netdev_priv(qi->dev);
+ struct mmsghdr *mmsg_vector = qi->mmsg_vector;
+ void **skbuff_vector = qi->skbuff_vector;
+ int i;
+
+ if (qi->queue_depth == 0)
+ return;
+ for (i = 0; i < qi->queue_depth; i++) {
+ /* it is OK if allocation fails - recvmmsg with NULL data in
+ * iov argument still performs an RX, just drops the packet
+ * This allows us stop faffing around with a "drop buffer"
+ */
+
+ *skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr);
+ skbuff_vector++;
+ mmsg_vector++;
+ }
+ qi->queue_depth = 0;
+}
+
+static struct vector_device *find_device(int n)
+{
+ struct vector_device *device;
+ struct list_head *ele;
+
+ spin_lock(&vector_devices_lock);
+ list_for_each(ele, &vector_devices) {
+ device = list_entry(ele, struct vector_device, list);
+ if (device->unit == n)
+ goto out;
+ }
+ device = NULL;
+ out:
+ spin_unlock(&vector_devices_lock);
+ return device;
+}
+
+static int vector_parse(char *str, int *index_out, char **str_out,
+ char **error_out)
+{
+ int n, len, err;
+ char *start = str;
+
+ len = strlen(str);
+
+ while ((*str != ':') && (strlen(str) > 1))
+ str++;
+ if (*str != ':') {
+ *error_out = "Expected ':' after device number";
+ return -EINVAL;
+ }
+ *str = '\0';
+
+ err = kstrtouint(start, 0, &n);
+ if (err < 0) {
+ *error_out = "Bad device number";
+ return err;
+ }
+
+ str++;
+ if (find_device(n)) {
+ *error_out = "Device already configured";
+ return -EINVAL;
+ }
+
+ *index_out = n;
+ *str_out = str;
+ return 0;
+}
+
+static int vector_config(char *str, char **error_out)
+{
+ int err, n;
+ char *params;
+ struct arglist *parsed;
+
+ err = vector_parse(str, &n, &params, error_out);
+ if (err != 0)
+ return err;
+
+ /* This string is broken up and the pieces used by the underlying
+ * driver. We should copy it to make sure things do not go wrong
+ * later.
+ */
+
+ params = kstrdup(params, GFP_KERNEL);
+ if (params == NULL) {
+ *error_out = "vector_config failed to strdup string";
+ return -ENOMEM;
+ }
+
+ parsed = uml_parse_vector_ifspec(params);
+
+ if (parsed == NULL) {
+ *error_out = "vector_config failed to parse parameters";
+ return -EINVAL;
+ }
+
+ vector_eth_configure(n, parsed);
+ return 0;
+}
+
+static int vector_id(char **str, int *start_out, int *end_out)
+{
+ char *end;
+ int n;
+
+ n = simple_strtoul(*str, &end, 0);
+ if ((*end != '\0') || (end == *str))
+ return -1;
+
+ *start_out = n;
+ *end_out = n;
+ *str = end;
+ return n;
+}
+
+static int vector_remove(int n, char **error_out)
+{
+ struct vector_device *vec_d;
+ struct net_device *dev;
+ struct vector_private *vp;
+
+ vec_d = find_device(n);
+ if (vec_d == NULL)
+ return -ENODEV;
+ dev = vec_d->dev;
+ vp = netdev_priv(dev);
+ if (vp->fds != NULL)
+ return -EBUSY;
+ unregister_netdev(dev);
+ platform_device_unregister(&vec_d->pdev);
+ return 0;
+}
+
+/*
+ * There is no shared per-transport initialization code, so
+ * we will just initialize each interface one by one and
+ * add them to a list
+ */
+
+static struct platform_driver uml_net_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+
+static void vector_device_release(struct device *dev)
+{
+ struct vector_device *device = dev_get_drvdata(dev);
+ struct net_device *netdev = device->dev;
+
+ list_del(&device->list);
+ kfree(device);
+ free_netdev(netdev);
+}
+
+/* Bog standard recv using recvmsg - not used normally unless the user
+ * explicitly specifies not to use recvmmsg vector RX.
+ */
+
+static int vector_legacy_rx(struct vector_private *vp)
+{
+ int pkt_len;
+ struct user_msghdr hdr;
+ struct iovec iov[2 + MAX_IOV_SIZE]; /* header + data use case only */
+ int iovpos = 0;
+ struct sk_buff *skb;
+ int header_check;
+
+ hdr.msg_name = NULL;
+ hdr.msg_namelen = 0;
+ hdr.msg_iov = (struct iovec *) &iov;
+ hdr.msg_control = NULL;
+ hdr.msg_controllen = 0;
+ hdr.msg_flags = 0;
+
+ if (vp->header_size > 0) {
+ iov[0].iov_base = vp->header_rxbuffer;
+ iov[0].iov_len = vp->header_size;
+ }
+
+ skb = prep_skb(vp, &hdr);
+
+ if (skb == NULL) {
+ /* Read a packet into drop_buffer and don't do
+ * anything with it.
+ */
+ iov[iovpos].iov_base = drop_buffer;
+ iov[iovpos].iov_len = DROP_BUFFER_SIZE;
+ hdr.msg_iovlen = 1;
+ vp->dev->stats.rx_dropped++;
+ }
+
+ pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0);
+
+ if (skb != NULL) {
+ if (pkt_len > vp->header_size) {
+ if (vp->header_size > 0) {
+ header_check = vp->verify_header(
+ vp->header_rxbuffer, skb, vp);
+ if (header_check < 0) {
+ dev_kfree_skb_irq(skb);
+ vp->dev->stats.rx_dropped++;
+ vp->estats.rx_encaps_errors++;
+ return 0;
+ }
+ if (header_check > 0) {
+ vp->estats.rx_csum_offload_good++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ }
+ pskb_trim(skb, pkt_len - vp->rx_header_size);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ vp->dev->stats.rx_bytes += skb->len;
+ vp->dev->stats.rx_packets++;
+ netif_rx(skb);
+ } else {
+ dev_kfree_skb_irq(skb);
+ }
+ }
+ return pkt_len;
+}
+
+/*
+ * Packet at a time TX which falls back to vector TX if the
+ * underlying transport is busy.
+ */
+
+
+
+static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
+{
+ struct iovec iov[3 + MAX_IOV_SIZE];
+ int iov_count, pkt_len = 0;
+
+ iov[0].iov_base = vp->header_txbuffer;
+ iov_count = prep_msg(vp, skb, (struct iovec *) &iov);
+
+ if (iov_count < 1)
+ goto drop;
+ pkt_len = uml_vector_writev(
+ vp->fds->tx_fd,
+ (struct iovec *) &iov,
+ iov_count
+ );
+
+ netif_trans_update(vp->dev);
+ netif_wake_queue(vp->dev);
+
+ if (pkt_len > 0) {
+ vp->dev->stats.tx_bytes += skb->len;
+ vp->dev->stats.tx_packets++;
+ } else {
+ vp->dev->stats.tx_dropped++;
+ }
+ consume_skb(skb);
+ return pkt_len;
+drop:
+ vp->dev->stats.tx_dropped++;
+ consume_skb(skb);
+ return pkt_len;
+}
+
+/*
+ * Receive as many messages as we can in one call using the special
+ * mmsg vector matched to an skb vector which we prepared earlier.
+ */
+
+static int vector_mmsg_rx(struct vector_private *vp)
+{
+ int packet_count, i;
+ struct vector_queue *qi = vp->rx_queue;
+ struct sk_buff *skb;
+ struct mmsghdr *mmsg_vector = qi->mmsg_vector;
+ void **skbuff_vector = qi->skbuff_vector;
+ int header_check;
+
+ /* Refresh the vector and make sure it is with new skbs and the
+ * iovs are updated to point to them.
+ */
+
+ prep_queue_for_rx(qi);
+
+ /* Fire the Lazy Gun - get as many packets as we can in one go. */
+
+ packet_count = uml_vector_recvmmsg(
+ vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
+
+ if (packet_count <= 0)
+ return packet_count;
+
+ /* We treat packet processing as enqueue, buffer refresh as dequeue
+ * The queue_depth tells us how many buffers have been used and how
+ * many do we need to prep the next time prep_queue_for_rx() is called.
+ */
+
+ qi->queue_depth = packet_count;
+
+ for (i = 0; i < packet_count; i++) {
+ skb = (*skbuff_vector);
+ if (mmsg_vector->msg_len > vp->header_size) {
+ if (vp->header_size > 0) {
+ header_check = vp->verify_header(
+ mmsg_vector->msg_hdr.msg_iov->iov_base,
+ skb,
+ vp
+ );
+ if (header_check < 0) {
+ /* Overlay header failed to verify - discard.
+ * We can actually keep this skb and reuse it,
+ * but that will make the prep logic too
+ * complex.
+ */
+ dev_kfree_skb_irq(skb);
+ vp->estats.rx_encaps_errors++;
+ continue;
+ }
+ if (header_check > 0) {
+ vp->estats.rx_csum_offload_good++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ }
+ pskb_trim(skb,
+ mmsg_vector->msg_len - vp->rx_header_size);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ /*
+ * We do not need to lock on updating stats here
+ * The interrupt loop is non-reentrant.
+ */
+ vp->dev->stats.rx_bytes += skb->len;
+ vp->dev->stats.rx_packets++;
+ netif_rx(skb);
+ } else {
+ /* Overlay header too short to do anything - discard.
+ * We can actually keep this skb and reuse it,
+ * but that will make the prep logic too complex.
+ */
+ if (skb != NULL)
+ dev_kfree_skb_irq(skb);
+ }
+ (*skbuff_vector) = NULL;
+ /* Move to the next buffer element */
+ mmsg_vector++;
+ skbuff_vector++;
+ }
+ if (packet_count > 0) {
+ if (vp->estats.rx_queue_max < packet_count)
+ vp->estats.rx_queue_max = packet_count;
+ vp->estats.rx_queue_running_average =
+ (vp->estats.rx_queue_running_average + packet_count) >> 1;
+ }
+ return packet_count;
+}
+
+static void vector_rx(struct vector_private *vp)
+{
+ int err;
+
+ if ((vp->options & VECTOR_RX) > 0)
+ while ((err = vector_mmsg_rx(vp)) > 0)
+ ;
+ else
+ while ((err = vector_legacy_rx(vp)) > 0)
+ ;
+ if ((err != 0) && net_ratelimit())
+ netdev_err(vp->dev, "vector_rx: error(%d)\n", err);
+}
+
+static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vector_private *vp = netdev_priv(dev);
+ int queue_depth = 0;
+
+ if ((vp->options & VECTOR_TX) == 0) {
+ writev_tx(vp, skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* We do BQL only in the vector path, no point doing it in
+ * packet at a time mode as there is no device queue
+ */
+
+ netdev_sent_queue(vp->dev, skb->len);
+ queue_depth = vector_enqueue(vp->tx_queue, skb);
+
+ /* if the device queue is full, stop the upper layers and
+ * flush it.
+ */
+
+ if (queue_depth >= vp->tx_queue->max_depth - 1) {
+ vp->estats.tx_kicks++;
+ netif_stop_queue(dev);
+ vector_send(vp->tx_queue);
+ return NETDEV_TX_OK;
+ }
+ if (skb->xmit_more) {
+ mod_timer(&vp->tl, vp->coalesce);
+ return NETDEV_TX_OK;
+ }
+ if (skb->len < TX_SMALL_PACKET) {
+ vp->estats.tx_kicks++;
+ vector_send(vp->tx_queue);
+ } else
+ tasklet_schedule(&vp->tx_poll);
+ return NETDEV_TX_OK;
+}
+
+static irqreturn_t vector_rx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct vector_private *vp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return IRQ_NONE;
+ vector_rx(vp);
+ return IRQ_HANDLED;
+
+}
+
+static irqreturn_t vector_tx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct vector_private *vp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return IRQ_NONE;
+ /* We need to pay attention to it only if we got
+ * -EAGAIN or -ENOBUFFS from sendmmsg. Otherwise
+ * we ignore it. In the future, it may be worth
+ * it to improve the IRQ controller a bit to make
+ * tweaking the IRQ mask less costly
+ */
+
+ if (vp->in_write_poll)
+ tasklet_schedule(&vp->tx_poll);
+ return IRQ_HANDLED;
+
+}
+
+static int irq_rr;
+
+static int vector_net_close(struct net_device *dev)
+{
+ struct vector_private *vp = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ del_timer(&vp->tl);
+
+ if (vp->fds == NULL)
+ return 0;
+
+ /* Disable and free all IRQS */
+ if (vp->rx_irq > 0) {
+ um_free_irq(vp->rx_irq, dev);
+ vp->rx_irq = 0;
+ }
+ if (vp->tx_irq > 0) {
+ um_free_irq(vp->tx_irq, dev);
+ vp->tx_irq = 0;
+ }
+ tasklet_kill(&vp->tx_poll);
+ if (vp->fds->rx_fd > 0) {
+ os_close_file(vp->fds->rx_fd);
+ vp->fds->rx_fd = -1;
+ }
+ if (vp->fds->tx_fd > 0) {
+ os_close_file(vp->fds->tx_fd);
+ vp->fds->tx_fd = -1;
+ }
+ if (vp->bpf != NULL)
+ kfree(vp->bpf);
+ if (vp->fds->remote_addr != NULL)
+ kfree(vp->fds->remote_addr);
+ if (vp->transport_data != NULL)
+ kfree(vp->transport_data);
+ if (vp->header_rxbuffer != NULL)
+ kfree(vp->header_rxbuffer);
+ if (vp->header_txbuffer != NULL)
+ kfree(vp->header_txbuffer);
+ if (vp->rx_queue != NULL)
+ destroy_queue(vp->rx_queue);
+ if (vp->tx_queue != NULL)
+ destroy_queue(vp->tx_queue);
+ kfree(vp->fds);
+ vp->fds = NULL;
+ spin_lock_irqsave(&vp->lock, flags);
+ vp->opened = false;
+ spin_unlock_irqrestore(&vp->lock, flags);
+ return 0;
+}
+
+/* TX tasklet */
+
+static void vector_tx_poll(unsigned long data)
+{
+ struct vector_private *vp = (struct vector_private *)data;
+
+ vp->estats.tx_kicks++;
+ vector_send(vp->tx_queue);
+}
+static void vector_reset_tx(struct work_struct *work)
+{
+ struct vector_private *vp =
+ container_of(work, struct vector_private, reset_tx);
+ netdev_reset_queue(vp->dev);
+ netif_start_queue(vp->dev);
+ netif_wake_queue(vp->dev);
+}
+static int vector_net_open(struct net_device *dev)
+{
+ struct vector_private *vp = netdev_priv(dev);
+ unsigned long flags;
+ int err = -EINVAL;
+ struct vector_device *vdevice;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ if (vp->opened) {
+ spin_unlock_irqrestore(&vp->lock, flags);
+ return -ENXIO;
+ }
+ vp->opened = true;
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ vp->fds = uml_vector_user_open(vp->unit, vp->parsed);
+
+ if (vp->fds == NULL)
+ goto out_close;
+
+ if (build_transport_data(vp) < 0)
+ goto out_close;
+
+ if ((vp->options & VECTOR_RX) > 0) {
+ vp->rx_queue = create_queue(
+ vp,
+ get_depth(vp->parsed),
+ vp->rx_header_size,
+ MAX_IOV_SIZE
+ );
+ vp->rx_queue->queue_depth = get_depth(vp->parsed);
+ } else {
+ vp->header_rxbuffer = kmalloc(
+ vp->rx_header_size,
+ GFP_KERNEL
+ );
+ if (vp->header_rxbuffer == NULL)
+ goto out_close;
+ }
+ if ((vp->options & VECTOR_TX) > 0) {
+ vp->tx_queue = create_queue(
+ vp,
+ get_depth(vp->parsed),
+ vp->header_size,
+ MAX_IOV_SIZE
+ );
+ } else {
+ vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL);
+ if (vp->header_txbuffer == NULL)
+ goto out_close;
+ }
+
+ /* READ IRQ */
+ err = um_request_irq(
+ irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
+ IRQ_READ, vector_rx_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (err != 0) {
+ netdev_err(dev, "vector_open: failed to get rx irq(%d)\n", err);
+ err = -ENETUNREACH;
+ goto out_close;
+ }
+ vp->rx_irq = irq_rr + VECTOR_BASE_IRQ;
+ dev->irq = irq_rr + VECTOR_BASE_IRQ;
+ irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
+
+ /* WRITE IRQ - we need it only if we have vector TX */
+ if ((vp->options & VECTOR_TX) > 0) {
+ err = um_request_irq(
+ irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd,
+ IRQ_WRITE, vector_tx_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (err != 0) {
+ netdev_err(dev,
+ "vector_open: failed to get tx irq(%d)\n", err);
+ err = -ENETUNREACH;
+ goto out_close;
+ }
+ vp->tx_irq = irq_rr + VECTOR_BASE_IRQ;
+ irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
+ }
+
+ if ((vp->options & VECTOR_QDISC_BYPASS) != 0) {
+ if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
+ vp->options = vp->options | VECTOR_BPF;
+ }
+
+ if ((vp->options & VECTOR_BPF) != 0)
+ vp->bpf = uml_vector_default_bpf(vp->fds->rx_fd, dev->dev_addr);
+
+ netif_start_queue(dev);
+
+ /* clear buffer - it can happen that the host side of the interface
+ * is full when we get here. In this case, new data is never queued,
+ * SIGIOs never arrive, and the net never works.
+ */
+
+ vector_rx(vp);
+
+ vector_reset_stats(vp);
+ vdevice = find_device(vp->unit);
+ vdevice->opened = 1;
+
+ if ((vp->options & VECTOR_TX) != 0)
+ add_timer(&vp->tl);
+ return 0;
+out_close:
+ vector_net_close(dev);
+ return err;
+}
+
+
+static void vector_net_set_multicast_list(struct net_device *dev)
+{
+ /* TODO: - we can do some BPF games here */
+ return;
+}
+
+static void vector_net_tx_timeout(struct net_device *dev)
+{
+ struct vector_private *vp = netdev_priv(dev);
+
+ vp->estats.tx_timeout_count++;
+ netif_trans_update(dev);
+ schedule_work(&vp->reset_tx);
+}
+
+static netdev_features_t vector_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
+ return features;
+}
+
+static int vector_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct vector_private *vp = netdev_priv(dev);
+ /* Adjust buffer sizes for GSO/GRO. Unfortunately, there is
+ * no way to negotiate it on raw sockets, so we can change
+ * only our side.
+ */
+ if (features & NETIF_F_GRO)
+ /* All new frame buffers will be GRO-sized */
+ vp->req_size = 65536;
+ else
+ /* All new frame buffers will be normal sized */
+ vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN;
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vector_net_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ vector_rx_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static void vector_net_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+}
+
+static void vector_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct vector_private *vp = netdev_priv(netdev);
+
+ ring->rx_max_pending = vp->rx_queue->max_depth;
+ ring->tx_max_pending = vp->tx_queue->max_depth;
+ ring->rx_pending = vp->rx_queue->max_depth;
+ ring->tx_pending = vp->tx_queue->max_depth;
+}
+
+static void vector_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ *buf = '\0';
+ break;
+ case ETH_SS_STATS:
+ memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int vector_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return 0;
+ case ETH_SS_STATS:
+ return VECTOR_NUM_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void vector_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats,
+ u64 *tmp_stats)
+{
+ struct vector_private *vp = netdev_priv(dev);
+
+ memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats));
+}
+
+static int vector_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct vector_private *vp = netdev_priv(netdev);
+
+ ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ;
+ return 0;
+}
+
+static int vector_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct vector_private *vp = netdev_priv(netdev);
+
+ vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000;
+ if (vp->coalesce == 0)
+ vp->coalesce = 1;
+ return 0;
+}
+
+static const struct ethtool_ops vector_net_ethtool_ops = {
+ .get_drvinfo = vector_net_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_ringparam = vector_get_ringparam,
+ .get_strings = vector_get_strings,
+ .get_sset_count = vector_get_sset_count,
+ .get_ethtool_stats = vector_get_ethtool_stats,
+ .get_coalesce = vector_get_coalesce,
+ .set_coalesce = vector_set_coalesce,
+};
+
+
+static const struct net_device_ops vector_netdev_ops = {
+ .ndo_open = vector_net_open,
+ .ndo_stop = vector_net_close,
+ .ndo_start_xmit = vector_net_start_xmit,
+ .ndo_set_rx_mode = vector_net_set_multicast_list,
+ .ndo_tx_timeout = vector_net_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_fix_features = vector_fix_features,
+ .ndo_set_features = vector_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = vector_net_poll_controller,
+#endif
+};
+
+
+static void vector_timer_expire(struct timer_list *t)
+{
+ struct vector_private *vp = from_timer(vp, t, tl);
+
+ vp->estats.tx_kicks++;
+ vector_send(vp->tx_queue);
+}
+
+static void vector_eth_configure(
+ int n,
+ struct arglist *def
+ )
+{
+ struct vector_device *device;
+ struct net_device *dev;
+ struct vector_private *vp;
+ int err;
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (device == NULL) {
+ printk(KERN_ERR "eth_configure failed to allocate struct "
+ "vector_device\n");
+ return;
+ }
+ dev = alloc_etherdev(sizeof(struct vector_private));
+ if (dev == NULL) {
+ printk(KERN_ERR "eth_configure: failed to allocate struct "
+ "net_device for vec%d\n", n);
+ goto out_free_device;
+ }
+
+ dev->mtu = get_mtu(def);
+
+ INIT_LIST_HEAD(&device->list);
+ device->unit = n;
+
+ /* If this name ends up conflicting with an existing registered
+ * netdevice, that is OK, register_netdev{,ice}() will notice this
+ * and fail.
+ */
+ snprintf(dev->name, sizeof(dev->name), "vec%d", n);
+ uml_net_setup_etheraddr(dev, uml_vector_fetch_arg(def, "mac"));
+ vp = netdev_priv(dev);
+
+ /* sysfs register */
+ if (!driver_registered) {
+ platform_driver_register(&uml_net_driver);
+ driver_registered = 1;
+ }
+ device->pdev.id = n;
+ device->pdev.name = DRIVER_NAME;
+ device->pdev.dev.release = vector_device_release;
+ dev_set_drvdata(&device->pdev.dev, device);
+ if (platform_device_register(&device->pdev))
+ goto out_free_netdev;
+ SET_NETDEV_DEV(dev, &device->pdev.dev);
+
+ device->dev = dev;
+
+ *vp = ((struct vector_private)
+ {
+ .list = LIST_HEAD_INIT(vp->list),
+ .dev = dev,
+ .unit = n,
+ .options = get_transport_options(def),
+ .rx_irq = 0,
+ .tx_irq = 0,
+ .parsed = def,
+ .max_packet = get_mtu(def) + ETH_HEADER_OTHER,
+ /* TODO - we need to calculate headroom so that ip header
+ * is 16 byte aligned all the time
+ */
+ .headroom = get_headroom(def),
+ .form_header = NULL,
+ .verify_header = NULL,
+ .header_rxbuffer = NULL,
+ .header_txbuffer = NULL,
+ .header_size = 0,
+ .rx_header_size = 0,
+ .rexmit_scheduled = false,
+ .opened = false,
+ .transport_data = NULL,
+ .in_write_poll = false,
+ .coalesce = 2,
+ .req_size = get_req_size(def)
+ });
+
+ dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
+ tasklet_init(&vp->tx_poll, vector_tx_poll, (unsigned long)vp);
+ INIT_WORK(&vp->reset_tx, vector_reset_tx);
+
+ timer_setup(&vp->tl, vector_timer_expire, 0);
+ spin_lock_init(&vp->lock);
+
+ /* FIXME */
+ dev->netdev_ops = &vector_netdev_ops;
+ dev->ethtool_ops = &vector_net_ethtool_ops;
+ dev->watchdog_timeo = (HZ >> 1);
+ /* primary IRQ - fixme */
+ dev->irq = 0; /* we will adjust this once opened */
+
+ rtnl_lock();
+ err = register_netdevice(dev);
+ rtnl_unlock();
+ if (err)
+ goto out_undo_user_init;
+
+ spin_lock(&vector_devices_lock);
+ list_add(&device->list, &vector_devices);
+ spin_unlock(&vector_devices_lock);
+
+ return;
+
+out_undo_user_init:
+ return;
+out_free_netdev:
+ free_netdev(dev);
+out_free_device:
+ kfree(device);
+}
+
+
+
+
+/*
+ * Invoked late in the init
+ */
+
+static int __init vector_init(void)
+{
+ struct list_head *ele;
+ struct vector_cmd_line_arg *def;
+ struct arglist *parsed;
+
+ list_for_each(ele, &vec_cmd_line) {
+ def = list_entry(ele, struct vector_cmd_line_arg, list);
+ parsed = uml_parse_vector_ifspec(def->arguments);
+ if (parsed != NULL)
+ vector_eth_configure(def->unit, parsed);
+ }
+ return 0;
+}
+
+
+/* Invoked at initial argument parsing, only stores
+ * arguments until a proper vector_init is called
+ * later
+ */
+
+static int __init vector_setup(char *str)
+{
+ char *error;
+ int n, err;
+ struct vector_cmd_line_arg *new;
+
+ err = vector_parse(str, &n, &str, &error);
+ if (err) {
+ printk(KERN_ERR "vector_setup - Couldn't parse '%s' : %s\n",
+ str, error);
+ return 1;
+ }
+ new = alloc_bootmem(sizeof(*new));
+ INIT_LIST_HEAD(&new->list);
+ new->unit = n;
+ new->arguments = str;
+ list_add_tail(&new->list, &vec_cmd_line);
+ return 1;
+}
+
+__setup("vec", vector_setup);
+__uml_help(vector_setup,
+"vec[0-9]+:<option>=<value>,<option>=<value>\n"
+" Configure a vector io network device.\n\n"
+);
+
+late_initcall(vector_init);
+
+static struct mc_device vector_mc = {
+ .list = LIST_HEAD_INIT(vector_mc.list),
+ .name = "vec",
+ .config = vector_config,
+ .get_config = NULL,
+ .id = vector_id,
+ .remove = vector_remove,
+};
+
+#ifdef CONFIG_INET
+static int vector_inetaddr_event(
+ struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block vector_inetaddr_notifier = {
+ .notifier_call = vector_inetaddr_event,
+};
+
+static void inet_register(void)
+{
+ register_inetaddr_notifier(&vector_inetaddr_notifier);
+}
+#else
+static inline void inet_register(void)
+{
+}
+#endif
+
+static int vector_net_init(void)
+{
+ mconsole_register_dev(&vector_mc);
+ inet_register();
+ return 0;
+}
+
+__initcall(vector_net_init);
+
+
+
diff --git a/arch/um/drivers/vector_kern.h b/arch/um/drivers/vector_kern.h
new file mode 100644
index 000000000000..0b0a767b9076
--- /dev/null
+++ b/arch/um/drivers/vector_kern.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2002 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __UM_VECTOR_KERN_H
+#define __UM_VECTOR_KERN_H
+
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include "vector_user.h"
+
+/* Queue structure specially adapted for multiple enqueue/dequeue
+ * in a mmsgrecv/mmsgsend context
+ */
+
+/* Dequeue method */
+
+#define QUEUE_SENDMSG 0
+#define QUEUE_SENDMMSG 1
+
+#define VECTOR_RX 1
+#define VECTOR_TX (1 << 1)
+#define VECTOR_BPF (1 << 2)
+#define VECTOR_QDISC_BYPASS (1 << 3)
+
+#define ETH_MAX_PACKET 1500
+#define ETH_HEADER_OTHER 32 /* just in case someone decides to go mad on QnQ */
+
+struct vector_queue {
+ struct mmsghdr *mmsg_vector;
+ void **skbuff_vector;
+ /* backlink to device which owns us */
+ struct net_device *dev;
+ spinlock_t head_lock;
+ spinlock_t tail_lock;
+ int queue_depth, head, tail, max_depth, max_iov_frags;
+ short options;
+};
+
+struct vector_estats {
+ uint64_t rx_queue_max;
+ uint64_t rx_queue_running_average;
+ uint64_t tx_queue_max;
+ uint64_t tx_queue_running_average;
+ uint64_t rx_encaps_errors;
+ uint64_t tx_timeout_count;
+ uint64_t tx_restart_queue;
+ uint64_t tx_kicks;
+ uint64_t tx_flow_control_xon;
+ uint64_t tx_flow_control_xoff;
+ uint64_t rx_csum_offload_good;
+ uint64_t rx_csum_offload_errors;
+ uint64_t sg_ok;
+ uint64_t sg_linearized;
+};
+
+#define VERIFY_HEADER_NOK -1
+#define VERIFY_HEADER_OK 0
+#define VERIFY_CSUM_OK 1
+
+struct vector_private {
+ struct list_head list;
+ spinlock_t lock;
+ struct net_device *dev;
+
+ int unit;
+
+ /* Timeout timer in TX */
+
+ struct timer_list tl;
+
+ /* Scheduled "remove device" work */
+ struct work_struct reset_tx;
+ struct vector_fds *fds;
+
+ struct vector_queue *rx_queue;
+ struct vector_queue *tx_queue;
+
+ int rx_irq;
+ int tx_irq;
+
+ struct arglist *parsed;
+
+ void *transport_data; /* transport specific params if needed */
+
+ int max_packet;
+ int req_size; /* different from max packet - used for TSO */
+ int headroom;
+
+ int options;
+
+ /* remote address if any - some transports will leave this as null */
+
+ int header_size;
+ int rx_header_size;
+ int coalesce;
+
+ void *header_rxbuffer;
+ void *header_txbuffer;
+
+ int (*form_header)(uint8_t *header,
+ struct sk_buff *skb, struct vector_private *vp);
+ int (*verify_header)(uint8_t *header,
+ struct sk_buff *skb, struct vector_private *vp);
+
+ spinlock_t stats_lock;
+
+ struct tasklet_struct tx_poll;
+ bool rexmit_scheduled;
+ bool opened;
+ bool in_write_poll;
+
+ /* ethtool stats */
+
+ struct vector_estats estats;
+ void *bpf;
+
+ char user[0];
+};
+
+extern int build_transport_data(struct vector_private *vp);
+
+#endif
diff --git a/arch/um/drivers/vector_transports.c b/arch/um/drivers/vector_transports.c
new file mode 100644
index 000000000000..9065047f844b
--- /dev/null
+++ b/arch/um/drivers/vector_transports.c
@@ -0,0 +1,458 @@
+/*
+ * Copyright (C) 2017 - Cambridge Greys Limited
+ * Copyright (C) 2011 - 2014 Cisco Systems Inc
+ * Licensed under the GPL.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/virtio_net.h>
+#include <linux/virtio_net.h>
+#include <linux/virtio_byteorder.h>
+#include <linux/netdev_features.h>
+#include "vector_user.h"
+#include "vector_kern.h"
+
+#define GOOD_LINEAR 512
+#define GSO_ERROR "Incoming GSO frames and GRO disabled on the interface"
+
+struct gre_minimal_header {
+ uint16_t header;
+ uint16_t arptype;
+};
+
+
+struct uml_gre_data {
+ uint32_t rx_key;
+ uint32_t tx_key;
+ uint32_t sequence;
+
+ bool ipv6;
+ bool has_sequence;
+ bool pin_sequence;
+ bool checksum;
+ bool key;
+ struct gre_minimal_header expected_header;
+
+ uint32_t checksum_offset;
+ uint32_t key_offset;
+ uint32_t sequence_offset;
+
+};
+
+struct uml_l2tpv3_data {
+ uint64_t rx_cookie;
+ uint64_t tx_cookie;
+ uint64_t rx_session;
+ uint64_t tx_session;
+ uint32_t counter;
+
+ bool udp;
+ bool ipv6;
+ bool has_counter;
+ bool pin_counter;
+ bool cookie;
+ bool cookie_is_64;
+
+ uint32_t cookie_offset;
+ uint32_t session_offset;
+ uint32_t counter_offset;
+};
+
+static int l2tpv3_form_header(uint8_t *header,
+ struct sk_buff *skb, struct vector_private *vp)
+{
+ struct uml_l2tpv3_data *td = vp->transport_data;
+ uint32_t *counter;
+
+ if (td->udp)
+ *(uint32_t *) header = cpu_to_be32(L2TPV3_DATA_PACKET);
+ (*(uint32_t *) (header + td->session_offset)) = td->tx_session;
+
+ if (td->cookie) {
+ if (td->cookie_is_64)
+ (*(uint64_t *)(header + td->cookie_offset)) =
+ td->tx_cookie;
+ else
+ (*(uint32_t *)(header + td->cookie_offset)) =
+ td->tx_cookie;
+ }
+ if (td->has_counter) {
+ counter = (uint32_t *)(header + td->counter_offset);
+ if (td->pin_counter) {
+ *counter = 0;
+ } else {
+ td->counter++;
+ *counter = cpu_to_be32(td->counter);
+ }
+ }
+ return 0;
+}
+
+static int gre_form_header(uint8_t *header,
+ struct sk_buff *skb, struct vector_private *vp)
+{
+ struct uml_gre_data *td = vp->transport_data;
+ uint32_t *sequence;
+ *((uint32_t *) header) = *((uint32_t *) &td->expected_header);
+ if (td->key)
+ (*(uint32_t *) (header + td->key_offset)) = td->tx_key;
+ if (td->has_sequence) {
+ sequence = (uint32_t *)(header + td->sequence_offset);
+ if (td->pin_sequence)
+ *sequence = 0;
+ else
+ *sequence = cpu_to_be32(++td->sequence);
+ }
+ return 0;
+}
+
+static int raw_form_header(uint8_t *header,
+ struct sk_buff *skb, struct vector_private *vp)
+{
+ struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
+
+ virtio_net_hdr_from_skb(
+ skb,
+ vheader,
+ virtio_legacy_is_little_endian(),
+ false
+ );
+
+ return 0;
+}
+
+static int l2tpv3_verify_header(
+ uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
+{
+ struct uml_l2tpv3_data *td = vp->transport_data;
+ uint32_t *session;
+ uint64_t cookie;
+
+ if ((!td->udp) && (!td->ipv6))
+ header += sizeof(struct iphdr) /* fix for ipv4 raw */;
+
+ /* we do not do a strict check for "data" packets as per
+ * the RFC spec because the pure IP spec does not have
+ * that anyway.
+ */
+
+ if (td->cookie) {
+ if (td->cookie_is_64)
+ cookie = *(uint64_t *)(header + td->cookie_offset);
+ else
+ cookie = *(uint32_t *)(header + td->cookie_offset);
+ if (cookie != td->rx_cookie) {
+ if (net_ratelimit())
+ netdev_err(vp->dev, "uml_l2tpv3: unknown cookie id");
+ return -1;
+ }
+ }
+ session = (uint32_t *) (header + td->session_offset);
+ if (*session != td->rx_session) {
+ if (net_ratelimit())
+ netdev_err(vp->dev, "uml_l2tpv3: session mismatch");
+ return -1;
+ }
+ return 0;
+}
+
+static int gre_verify_header(
+ uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
+{
+
+ uint32_t key;
+ struct uml_gre_data *td = vp->transport_data;
+
+ if (!td->ipv6)
+ header += sizeof(struct iphdr) /* fix for ipv4 raw */;
+
+ if (*((uint32_t *) header) != *((uint32_t *) &td->expected_header)) {
+ if (net_ratelimit())
+ netdev_err(vp->dev, "header type disagreement, expecting %0x, got %0x",
+ *((uint32_t *) &td->expected_header),
+ *((uint32_t *) header)
+ );
+ return -1;
+ }
+
+ if (td->key) {
+ key = (*(uint32_t *)(header + td->key_offset));
+ if (key != td->rx_key) {
+ if (net_ratelimit())
+ netdev_err(vp->dev, "unknown key id %0x, expecting %0x",
+ key, td->rx_key);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int raw_verify_header(
+ uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
+{
+ struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
+
+ if ((vheader->gso_type != VIRTIO_NET_HDR_GSO_NONE) &&
+ (vp->req_size != 65536)) {
+ if (net_ratelimit())
+ netdev_err(
+ vp->dev,
+ GSO_ERROR
+ );
+ }
+ if ((vheader->flags & VIRTIO_NET_HDR_F_DATA_VALID) > 0)
+ return 1;
+
+ virtio_net_hdr_to_skb(skb, vheader, virtio_legacy_is_little_endian());
+ return 0;
+}
+
+static bool get_uint_param(
+ struct arglist *def, char *param, unsigned int *result)
+{
+ char *arg = uml_vector_fetch_arg(def, param);
+
+ if (arg != NULL) {
+ if (kstrtoint(arg, 0, result) == 0)
+ return true;
+ }
+ return false;
+}
+
+static bool get_ulong_param(
+ struct arglist *def, char *param, unsigned long *result)
+{
+ char *arg = uml_vector_fetch_arg(def, param);
+
+ if (arg != NULL) {
+ if (kstrtoul(arg, 0, result) == 0)
+ return true;
+ return true;
+ }
+ return false;
+}
+
+static int build_gre_transport_data(struct vector_private *vp)
+{
+ struct uml_gre_data *td;
+ int temp_int;
+ int temp_rx;
+ int temp_tx;
+
+ vp->transport_data = kmalloc(sizeof(struct uml_gre_data), GFP_KERNEL);
+ if (vp->transport_data == NULL)
+ return -ENOMEM;
+ td = vp->transport_data;
+ td->sequence = 0;
+
+ td->expected_header.arptype = GRE_IRB;
+ td->expected_header.header = 0;
+
+ vp->form_header = &gre_form_header;
+ vp->verify_header = &gre_verify_header;
+ vp->header_size = 4;
+ td->key_offset = 4;
+ td->sequence_offset = 4;
+ td->checksum_offset = 4;
+
+ td->ipv6 = false;
+ if (get_uint_param(vp->parsed, "v6", &temp_int)) {
+ if (temp_int > 0)
+ td->ipv6 = true;
+ }
+ td->key = false;
+ if (get_uint_param(vp->parsed, "rx_key", &temp_rx)) {
+ if (get_uint_param(vp->parsed, "tx_key", &temp_tx)) {
+ td->key = true;
+ td->expected_header.header |= GRE_MODE_KEY;
+ td->rx_key = cpu_to_be32(temp_rx);
+ td->tx_key = cpu_to_be32(temp_tx);
+ vp->header_size += 4;
+ td->sequence_offset += 4;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ td->sequence = false;
+ if (get_uint_param(vp->parsed, "sequence", &temp_int)) {
+ if (temp_int > 0) {
+ vp->header_size += 4;
+ td->has_sequence = true;
+ td->expected_header.header |= GRE_MODE_SEQUENCE;
+ if (get_uint_param(
+ vp->parsed, "pin_sequence", &temp_int)) {
+ if (temp_int > 0)
+ td->pin_sequence = true;
+ }
+ }
+ }
+ vp->rx_header_size = vp->header_size;
+ if (!td->ipv6)
+ vp->rx_header_size += sizeof(struct iphdr);
+ return 0;
+}
+
+static int build_l2tpv3_transport_data(struct vector_private *vp)
+{
+
+ struct uml_l2tpv3_data *td;
+ int temp_int, temp_rxs, temp_txs;
+ unsigned long temp_rx;
+ unsigned long temp_tx;
+
+ vp->transport_data = kmalloc(
+ sizeof(struct uml_l2tpv3_data), GFP_KERNEL);
+
+ if (vp->transport_data == NULL)
+ return -ENOMEM;
+
+ td = vp->transport_data;
+
+ vp->form_header = &l2tpv3_form_header;
+ vp->verify_header = &l2tpv3_verify_header;
+ td->counter = 0;
+
+ vp->header_size = 4;
+ td->session_offset = 0;
+ td->cookie_offset = 4;
+ td->counter_offset = 4;
+
+
+ td->ipv6 = false;
+ if (get_uint_param(vp->parsed, "v6", &temp_int)) {
+ if (temp_int > 0)
+ td->ipv6 = true;
+ }
+
+ if (get_uint_param(vp->parsed, "rx_session", &temp_rxs)) {
+ if (get_uint_param(vp->parsed, "tx_session", &temp_txs)) {
+ td->tx_session = cpu_to_be32(temp_txs);
+ td->rx_session = cpu_to_be32(temp_rxs);
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ td->cookie_is_64 = false;
+ if (get_uint_param(vp->parsed, "cookie64", &temp_int)) {
+ if (temp_int > 0)
+ td->cookie_is_64 = true;
+ }
+ td->cookie = false;
+ if (get_ulong_param(vp->parsed, "rx_cookie", &temp_rx)) {
+ if (get_ulong_param(vp->parsed, "tx_cookie", &temp_tx)) {
+ td->cookie = true;
+ if (td->cookie_is_64) {
+ td->rx_cookie = cpu_to_be64(temp_rx);
+ td->tx_cookie = cpu_to_be64(temp_tx);
+ vp->header_size += 8;
+ td->counter_offset += 8;
+ } else {
+ td->rx_cookie = cpu_to_be32(temp_rx);
+ td->tx_cookie = cpu_to_be32(temp_tx);
+ vp->header_size += 4;
+ td->counter_offset += 4;
+ }
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ td->has_counter = false;
+ if (get_uint_param(vp->parsed, "counter", &temp_int)) {
+ if (temp_int > 0) {
+ td->has_counter = true;
+ vp->header_size += 4;
+ if (get_uint_param(
+ vp->parsed, "pin_counter", &temp_int)) {
+ if (temp_int > 0)
+ td->pin_counter = true;
+ }
+ }
+ }
+
+ if (get_uint_param(vp->parsed, "udp", &temp_int)) {
+ if (temp_int > 0) {
+ td->udp = true;
+ vp->header_size += 4;
+ td->counter_offset += 4;
+ td->session_offset += 4;
+ td->cookie_offset += 4;
+ }
+ }
+
+ vp->rx_header_size = vp->header_size;
+ if ((!td->ipv6) && (!td->udp))
+ vp->rx_header_size += sizeof(struct iphdr);
+
+ return 0;
+}
+
+static int build_raw_transport_data(struct vector_private *vp)
+{
+ if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
+ if (!uml_raw_enable_vnet_headers(vp->fds->tx_fd))
+ return -1;
+ vp->form_header = &raw_form_header;
+ vp->verify_header = &raw_verify_header;
+ vp->header_size = sizeof(struct virtio_net_hdr);
+ vp->rx_header_size = sizeof(struct virtio_net_hdr);
+ vp->dev->hw_features |= (NETIF_F_TSO | NETIF_F_GRO);
+ vp->dev->features |=
+ (NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_GRO);
+ netdev_info(
+ vp->dev,
+ "raw: using vnet headers for tso and tx/rx checksum"
+ );
+ }
+ return 0;
+}
+
+static int build_tap_transport_data(struct vector_private *vp)
+{
+ if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
+ vp->form_header = &raw_form_header;
+ vp->verify_header = &raw_verify_header;
+ vp->header_size = sizeof(struct virtio_net_hdr);
+ vp->rx_header_size = sizeof(struct virtio_net_hdr);
+ vp->dev->hw_features |=
+ (NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
+ vp->dev->features |=
+ (NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
+ netdev_info(
+ vp->dev,
+ "tap/raw: using vnet headers for tso and tx/rx checksum"
+ );
+ } else {
+ return 0; /* do not try to enable tap too if raw failed */
+ }
+ if (uml_tap_enable_vnet_headers(vp->fds->tx_fd))
+ return 0;
+ return -1;
+}
+
+int build_transport_data(struct vector_private *vp)
+{
+ char *transport = uml_vector_fetch_arg(vp->parsed, "transport");
+
+ if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0)
+ return build_gre_transport_data(vp);
+ if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0)
+ return build_l2tpv3_transport_data(vp);
+ if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
+ return build_raw_transport_data(vp);
+ if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
+ return build_tap_transport_data(vp);
+ return 0;
+}
+
diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c
new file mode 100644
index 000000000000..4d6a78e31089
--- /dev/null
+++ b/arch/um/drivers/vector_user.c
@@ -0,0 +1,590 @@
+/*
+ * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <linux/if_tun.h>
+#include <arpa/inet.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <net/ethernet.h>
+#include <netinet/ip.h>
+#include <netinet/ether.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <linux/virtio_net.h>
+#include <netdb.h>
+#include <stdlib.h>
+#include <os.h>
+#include <um_malloc.h>
+#include "vector_user.h"
+
+#define ID_GRE 0
+#define ID_L2TPV3 1
+#define ID_MAX 1
+
+#define TOKEN_IFNAME "ifname"
+
+#define TRANS_RAW "raw"
+#define TRANS_RAW_LEN strlen(TRANS_RAW)
+
+#define VNET_HDR_FAIL "could not enable vnet headers on fd %d"
+#define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s"
+#define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i"
+#define BPF_ATTACH_FAIL "Failed to attach filter size %d to %d, err %d\n"
+
+/* This is very ugly and brute force lookup, but it is done
+ * only once at initialization so not worth doing hashes or
+ * anything more intelligent
+ */
+
+char *uml_vector_fetch_arg(struct arglist *ifspec, char *token)
+{
+ int i;
+
+ for (i = 0; i < ifspec->numargs; i++) {
+ if (strcmp(ifspec->tokens[i], token) == 0)
+ return ifspec->values[i];
+ }
+ return NULL;
+
+}
+
+struct arglist *uml_parse_vector_ifspec(char *arg)
+{
+ struct arglist *result;
+ int pos, len;
+ bool parsing_token = true, next_starts = true;
+
+ if (arg == NULL)
+ return NULL;
+ result = uml_kmalloc(sizeof(struct arglist), UM_GFP_KERNEL);
+ if (result == NULL)
+ return NULL;
+ result->numargs = 0;
+ len = strlen(arg);
+ for (pos = 0; pos < len; pos++) {
+ if (next_starts) {
+ if (parsing_token) {
+ result->tokens[result->numargs] = arg + pos;
+ } else {
+ result->values[result->numargs] = arg + pos;
+ result->numargs++;
+ }
+ next_starts = false;
+ }
+ if (*(arg + pos) == '=') {
+ if (parsing_token)
+ parsing_token = false;
+ else
+ goto cleanup;
+ next_starts = true;
+ (*(arg + pos)) = '\0';
+ }
+ if (*(arg + pos) == ',') {
+ parsing_token = true;
+ next_starts = true;
+ (*(arg + pos)) = '\0';
+ }
+ }
+ return result;
+cleanup:
+ printk(UM_KERN_ERR "vector_setup - Couldn't parse '%s'\n", arg);
+ kfree(result);
+ return NULL;
+}
+
+/*
+ * Socket/FD configuration functions. These return an structure
+ * of rx and tx descriptors to cover cases where these are not
+ * the same (f.e. read via raw socket and write via tap).
+ */
+
+#define PATH_NET_TUN "/dev/net/tun"
+
+static struct vector_fds *user_init_tap_fds(struct arglist *ifspec)
+{
+ struct ifreq ifr;
+ int fd = -1;
+ struct sockaddr_ll sock;
+ int err = -ENOMEM, offload;
+ char *iface;
+ struct vector_fds *result = NULL;
+
+ iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME);
+ if (iface == NULL) {
+ printk(UM_KERN_ERR "uml_tap: failed to parse interface spec\n");
+ goto tap_cleanup;
+ }
+
+ result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
+ if (result == NULL) {
+ printk(UM_KERN_ERR "uml_tap: failed to allocate file descriptors\n");
+ goto tap_cleanup;
+ }
+ result->rx_fd = -1;
+ result->tx_fd = -1;
+ result->remote_addr = NULL;
+ result->remote_addr_size = 0;
+
+ /* TAP */
+
+ fd = open(PATH_NET_TUN, O_RDWR);
+ if (fd < 0) {
+ printk(UM_KERN_ERR "uml_tap: failed to open tun device\n");
+ goto tap_cleanup;
+ }
+ result->tx_fd = fd;
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
+ strncpy((char *)&ifr.ifr_name, iface, sizeof(ifr.ifr_name) - 1);
+
+ err = ioctl(fd, TUNSETIFF, (void *) &ifr);
+ if (err != 0) {
+ printk(UM_KERN_ERR "uml_tap: failed to select tap interface\n");
+ goto tap_cleanup;
+ }
+
+ offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6;
+ ioctl(fd, TUNSETOFFLOAD, offload);
+
+ /* RAW */
+
+ fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
+ if (fd == -1) {
+ printk(UM_KERN_ERR
+ "uml_tap: failed to create socket: %i\n", -errno);
+ goto tap_cleanup;
+ }
+ result->rx_fd = fd;
+ memset(&ifr, 0, sizeof(ifr));
+ strncpy((char *)&ifr.ifr_name, iface, sizeof(ifr.ifr_name) - 1);
+ if (ioctl(fd, SIOCGIFINDEX, (void *) &ifr) < 0) {
+ printk(UM_KERN_ERR
+ "uml_tap: failed to set interface: %i\n", -errno);
+ goto tap_cleanup;
+ }
+
+ sock.sll_family = AF_PACKET;
+ sock.sll_protocol = htons(ETH_P_ALL);
+ sock.sll_ifindex = ifr.ifr_ifindex;
+
+ if (bind(fd,
+ (struct sockaddr *) &sock, sizeof(struct sockaddr_ll)) < 0) {
+ printk(UM_KERN_ERR
+ "user_init_tap: failed to bind raw pair, err %d\n",
+ -errno);
+ goto tap_cleanup;
+ }
+ return result;
+tap_cleanup:
+ printk(UM_KERN_ERR "user_init_tap: init failed, error %d", err);
+ if (result != NULL) {
+ if (result->rx_fd >= 0)
+ os_close_file(result->rx_fd);
+ if (result->tx_fd >= 0)
+ os_close_file(result->tx_fd);
+ kfree(result);
+ }
+ return NULL;
+}
+
+
+static struct vector_fds *user_init_raw_fds(struct arglist *ifspec)
+{
+ struct ifreq ifr;
+ int rxfd = -1, txfd = -1;
+ struct sockaddr_ll sock;
+ int err = -ENOMEM;
+ char *iface;
+ struct vector_fds *result = NULL;
+
+ iface = uml_vector_fetch_arg(ifspec, TOKEN_IFNAME);
+ if (iface == NULL)
+ goto cleanup;
+
+ rxfd = socket(AF_PACKET, SOCK_RAW, ETH_P_ALL);
+ if (rxfd == -1) {
+ err = -errno;
+ goto cleanup;
+ }
+ txfd = socket(AF_PACKET, SOCK_RAW, 0); /* Turn off RX on this fd */
+ if (txfd == -1) {
+ err = -errno;
+ goto cleanup;
+ }
+ memset(&ifr, 0, sizeof(ifr));
+ strncpy((char *)&ifr.ifr_name, iface, sizeof(ifr.ifr_name) - 1);
+ if (ioctl(rxfd, SIOCGIFINDEX, (void *) &ifr) < 0) {
+ err = -errno;
+ goto cleanup;
+ }
+
+ sock.sll_family = AF_PACKET;
+ sock.sll_protocol = htons(ETH_P_ALL);
+ sock.sll_ifindex = ifr.ifr_ifindex;
+
+ if (bind(rxfd,
+ (struct sockaddr *) &sock, sizeof(struct sockaddr_ll)) < 0) {
+ err = -errno;
+ goto cleanup;
+ }
+
+ sock.sll_family = AF_PACKET;
+ sock.sll_protocol = htons(ETH_P_IP);
+ sock.sll_ifindex = ifr.ifr_ifindex;
+
+ if (bind(txfd,
+ (struct sockaddr *) &sock, sizeof(struct sockaddr_ll)) < 0) {
+ err = -errno;
+ goto cleanup;
+ }
+
+ result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
+ if (result != NULL) {
+ result->rx_fd = rxfd;
+ result->tx_fd = txfd;
+ result->remote_addr = NULL;
+ result->remote_addr_size = 0;
+ }
+ return result;
+cleanup:
+ printk(UM_KERN_ERR "user_init_raw: init failed, error %d", err);
+ if (rxfd >= 0)
+ os_close_file(rxfd);
+ if (txfd >= 0)
+ os_close_file(txfd);
+ if (result != NULL)
+ kfree(result);
+ return NULL;
+}
+
+
+bool uml_raw_enable_qdisc_bypass(int fd)
+{
+ int optval = 1;
+
+ if (setsockopt(fd,
+ SOL_PACKET, PACKET_QDISC_BYPASS,
+ &optval, sizeof(optval)) != 0) {
+ return false;
+ }
+ return true;
+}
+
+bool uml_raw_enable_vnet_headers(int fd)
+{
+ int optval = 1;
+
+ if (setsockopt(fd,
+ SOL_PACKET, PACKET_VNET_HDR,
+ &optval, sizeof(optval)) != 0) {
+ printk(UM_KERN_INFO VNET_HDR_FAIL, fd);
+ return false;
+ }
+ return true;
+}
+bool uml_tap_enable_vnet_headers(int fd)
+{
+ unsigned int features;
+ int len = sizeof(struct virtio_net_hdr);
+
+ if (ioctl(fd, TUNGETFEATURES, &features) == -1) {
+ printk(UM_KERN_INFO TUN_GET_F_FAIL, strerror(errno));
+ return false;
+ }
+ if ((features & IFF_VNET_HDR) == 0) {
+ printk(UM_KERN_INFO "tapraw: No VNET HEADER support");
+ return false;
+ }
+ ioctl(fd, TUNSETVNETHDRSZ, &len);
+ return true;
+}
+
+static struct vector_fds *user_init_socket_fds(struct arglist *ifspec, int id)
+{
+ int err = -ENOMEM;
+ int fd = -1, gairet;
+ struct addrinfo srchints;
+ struct addrinfo dsthints;
+ bool v6, udp;
+ char *value;
+ char *src, *dst, *srcport, *dstport;
+ struct addrinfo *gairesult = NULL;
+ struct vector_fds *result = NULL;
+
+
+ value = uml_vector_fetch_arg(ifspec, "v6");
+ v6 = false;
+ udp = false;
+ if (value != NULL) {
+ if (strtol((const char *) value, NULL, 10) > 0)
+ v6 = true;
+ }
+
+ value = uml_vector_fetch_arg(ifspec, "udp");
+ if (value != NULL) {
+ if (strtol((const char *) value, NULL, 10) > 0)
+ udp = true;
+ }
+ src = uml_vector_fetch_arg(ifspec, "src");
+ dst = uml_vector_fetch_arg(ifspec, "dst");
+ srcport = uml_vector_fetch_arg(ifspec, "srcport");
+ dstport = uml_vector_fetch_arg(ifspec, "dstport");
+
+ memset(&dsthints, 0, sizeof(dsthints));
+
+ if (v6)
+ dsthints.ai_family = AF_INET6;
+ else
+ dsthints.ai_family = AF_INET;
+
+ switch (id) {
+ case ID_GRE:
+ dsthints.ai_socktype = SOCK_RAW;
+ dsthints.ai_protocol = IPPROTO_GRE;
+ break;
+ case ID_L2TPV3:
+ if (udp) {
+ dsthints.ai_socktype = SOCK_DGRAM;
+ dsthints.ai_protocol = 0;
+ } else {
+ dsthints.ai_socktype = SOCK_RAW;
+ dsthints.ai_protocol = IPPROTO_L2TP;
+ }
+ break;
+ default:
+ printk(KERN_ERR "Unsupported socket type\n");
+ return NULL;
+ }
+ memcpy(&srchints, &dsthints, sizeof(struct addrinfo));
+
+ gairet = getaddrinfo(src, srcport, &dsthints, &gairesult);
+ if ((gairet != 0) || (gairesult == NULL)) {
+ printk(UM_KERN_ERR
+ "socket_open : could not resolve src, error = %s",
+ gai_strerror(gairet)
+ );
+ return NULL;
+ }
+ fd = socket(gairesult->ai_family,
+ gairesult->ai_socktype, gairesult->ai_protocol);
+ if (fd == -1) {
+ printk(UM_KERN_ERR
+ "socket_open : could not open socket, error = %d",
+ -errno
+ );
+ goto cleanup;
+ }
+ if (bind(fd,
+ (struct sockaddr *) gairesult->ai_addr,
+ gairesult->ai_addrlen)) {
+ printk(UM_KERN_ERR L2TPV3_BIND_FAIL, errno);
+ goto cleanup;
+ }
+
+ if (gairesult != NULL)
+ freeaddrinfo(gairesult);
+
+ gairesult = NULL;
+
+ gairet = getaddrinfo(dst, dstport, &dsthints, &gairesult);
+ if ((gairet != 0) || (gairesult == NULL)) {
+ printk(UM_KERN_ERR
+ "socket_open : could not resolve dst, error = %s",
+ gai_strerror(gairet)
+ );
+ return NULL;
+ }
+
+ result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
+ if (result != NULL) {
+ result->rx_fd = fd;
+ result->tx_fd = fd;
+ result->remote_addr = uml_kmalloc(
+ gairesult->ai_addrlen, UM_GFP_KERNEL);
+ if (result->remote_addr == NULL)
+ goto cleanup;
+ result->remote_addr_size = gairesult->ai_addrlen;
+ memcpy(
+ result->remote_addr,
+ gairesult->ai_addr,
+ gairesult->ai_addrlen
+ );
+ }
+ freeaddrinfo(gairesult);
+ return result;
+cleanup:
+ if (gairesult != NULL)
+ freeaddrinfo(gairesult);
+ printk(UM_KERN_ERR "user_init_socket: init failed, error %d", err);
+ if (fd >= 0)
+ os_close_file(fd);
+ if (result != NULL) {
+ if (result->remote_addr != NULL)
+ kfree(result->remote_addr);
+ kfree(result);
+ }
+ return NULL;
+}
+
+struct vector_fds *uml_vector_user_open(
+ int unit,
+ struct arglist *parsed
+)
+{
+ char *transport;
+
+ if (parsed == NULL) {
+ printk(UM_KERN_ERR "no parsed config for unit %d\n", unit);
+ return NULL;
+ }
+ transport = uml_vector_fetch_arg(parsed, "transport");
+ if (transport == NULL) {
+ printk(UM_KERN_ERR "missing transport for unit %d\n", unit);
+ return NULL;
+ }
+ if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
+ return user_init_raw_fds(parsed);
+ if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
+ return user_init_tap_fds(parsed);
+ if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0)
+ return user_init_socket_fds(parsed, ID_GRE);
+ if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0)
+ return user_init_socket_fds(parsed, ID_L2TPV3);
+ return NULL;
+}
+
+
+int uml_vector_sendmsg(int fd, void *hdr, int flags)
+{
+ int n;
+
+ CATCH_EINTR(n = sendmsg(fd, (struct msghdr *) hdr, flags));
+ if ((n < 0) && (errno == EAGAIN))
+ return 0;
+ if (n >= 0)
+ return n;
+ else
+ return -errno;
+}
+
+int uml_vector_recvmsg(int fd, void *hdr, int flags)
+{
+ int n;
+
+ CATCH_EINTR(n = recvmsg(fd, (struct msghdr *) hdr, flags));
+ if ((n < 0) && (errno == EAGAIN))
+ return 0;
+ if (n >= 0)
+ return n;
+ else
+ return -errno;
+}
+
+int uml_vector_writev(int fd, void *hdr, int iovcount)
+{
+ int n;
+
+ CATCH_EINTR(n = writev(fd, (struct iovec *) hdr, iovcount));
+ if ((n < 0) && (errno == EAGAIN))
+ return 0;
+ if (n >= 0)
+ return n;
+ else
+ return -errno;
+}
+
+int uml_vector_sendmmsg(
+ int fd,
+ void *msgvec,
+ unsigned int vlen,
+ unsigned int flags)
+{
+ int n;
+
+ CATCH_EINTR(n = sendmmsg(fd, (struct mmsghdr *) msgvec, vlen, flags));
+ if ((n < 0) && (errno == EAGAIN))
+ return 0;
+ if (n >= 0)
+ return n;
+ else
+ return -errno;
+}
+
+int uml_vector_recvmmsg(
+ int fd,
+ void *msgvec,
+ unsigned int vlen,
+ unsigned int flags)
+{
+ int n;
+
+ CATCH_EINTR(
+ n = recvmmsg(fd, (struct mmsghdr *) msgvec, vlen, flags, 0));
+ if ((n < 0) && (errno == EAGAIN))
+ return 0;
+ if (n >= 0)
+ return n;
+ else
+ return -errno;
+}
+int uml_vector_attach_bpf(int fd, void *bpf, int bpf_len)
+{
+ int err = setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, bpf, bpf_len);
+
+ if (err < 0)
+ printk(KERN_ERR BPF_ATTACH_FAIL, bpf_len, fd, -errno);
+ return err;
+}
+
+#define DEFAULT_BPF_LEN 6
+
+void *uml_vector_default_bpf(int fd, void *mac)
+{
+ struct sock_filter *bpf;
+ uint32_t *mac1 = (uint32_t *)(mac + 2);
+ uint16_t *mac2 = (uint16_t *) mac;
+ struct sock_fprog bpf_prog = {
+ .len = 6,
+ .filter = NULL,
+ };
+
+ bpf = uml_kmalloc(
+ sizeof(struct sock_filter) * DEFAULT_BPF_LEN, UM_GFP_KERNEL);
+ if (bpf != NULL) {
+ bpf_prog.filter = bpf;
+ /* ld [8] */
+ bpf[0] = (struct sock_filter){ 0x20, 0, 0, 0x00000008 };
+ /* jeq #0xMAC[2-6] jt 2 jf 5*/
+ bpf[1] = (struct sock_filter){ 0x15, 0, 3, ntohl(*mac1)};
+ /* ldh [6] */
+ bpf[2] = (struct sock_filter){ 0x28, 0, 0, 0x00000006 };
+ /* jeq #0xMAC[0-1] jt 4 jf 5 */
+ bpf[3] = (struct sock_filter){ 0x15, 0, 1, ntohs(*mac2)};
+ /* ret #0 */
+ bpf[4] = (struct sock_filter){ 0x6, 0, 0, 0x00000000 };
+ /* ret #0x40000 */
+ bpf[5] = (struct sock_filter){ 0x6, 0, 0, 0x00040000 };
+ if (uml_vector_attach_bpf(
+ fd, &bpf_prog, sizeof(struct sock_fprog)) < 0) {
+ kfree(bpf);
+ bpf = NULL;
+ }
+ }
+ return bpf;
+}
+
diff --git a/arch/um/drivers/vector_user.h b/arch/um/drivers/vector_user.h
new file mode 100644
index 000000000000..d7cbff73b7ff
--- /dev/null
+++ b/arch/um/drivers/vector_user.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2002 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __UM_VECTOR_USER_H
+#define __UM_VECTOR_USER_H
+
+#define MAXVARGS 20
+
+#define TOKEN_IFNAME "ifname"
+
+#define TRANS_RAW "raw"
+#define TRANS_RAW_LEN strlen(TRANS_RAW)
+
+#define TRANS_TAP "tap"
+#define TRANS_TAP_LEN strlen(TRANS_TAP)
+
+
+#define TRANS_GRE "gre"
+#define TRANS_GRE_LEN strlen(TRANS_RAW)
+
+#define TRANS_L2TPV3 "l2tpv3"
+#define TRANS_L2TPV3_LEN strlen(TRANS_L2TPV3)
+
+#ifndef IPPROTO_GRE
+#define IPPROTO_GRE 0x2F
+#endif
+
+#define GRE_MODE_CHECKSUM cpu_to_be16(8 << 12) /* checksum */
+#define GRE_MODE_RESERVED cpu_to_be16(4 << 12) /* unused */
+#define GRE_MODE_KEY cpu_to_be16(2 << 12) /* KEY present */
+#define GRE_MODE_SEQUENCE cpu_to_be16(1 << 12) /* sequence */
+
+#define GRE_IRB cpu_to_be16(0x6558)
+
+#define L2TPV3_DATA_PACKET 0x30000
+
+/* IANA-assigned IP protocol ID for L2TPv3 */
+
+#ifndef IPPROTO_L2TP
+#define IPPROTO_L2TP 0x73
+#endif
+
+struct arglist {
+ int numargs;
+ char *tokens[MAXVARGS];
+ char *values[MAXVARGS];
+};
+
+/* Separating read and write FDs allows us to have different
+ * rx and tx method. Example - read tap via raw socket using
+ * recvmmsg, write using legacy tap write calls
+ */
+
+struct vector_fds {
+ int rx_fd;
+ int tx_fd;
+ void *remote_addr;
+ int remote_addr_size;
+};
+
+#define VECTOR_READ 1
+#define VECTOR_WRITE (1 < 1)
+#define VECTOR_HEADERS (1 < 2)
+
+extern struct arglist *uml_parse_vector_ifspec(char *arg);
+
+extern struct vector_fds *uml_vector_user_open(
+ int unit,
+ struct arglist *parsed
+);
+
+extern char *uml_vector_fetch_arg(
+ struct arglist *ifspec,
+ char *token
+);
+
+extern int uml_vector_recvmsg(int fd, void *hdr, int flags);
+extern int uml_vector_sendmsg(int fd, void *hdr, int flags);
+extern int uml_vector_writev(int fd, void *hdr, int iovcount);
+extern int uml_vector_sendmmsg(
+ int fd, void *msgvec,
+ unsigned int vlen,
+ unsigned int flags
+);
+extern int uml_vector_recvmmsg(
+ int fd,
+ void *msgvec,
+ unsigned int vlen,
+ unsigned int flags
+);
+extern void *uml_vector_default_bpf(int fd, void *mac);
+extern int uml_vector_attach_bpf(int fd, void *bpf, int bpf_len);
+extern bool uml_raw_enable_qdisc_bypass(int fd);
+extern bool uml_raw_enable_vnet_headers(int fd);
+extern bool uml_tap_enable_vnet_headers(int fd);
+
+
+#endif
diff --git a/arch/um/include/asm/asm-prototypes.h b/arch/um/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..5898a26daa0d
--- /dev/null
+++ b/arch/um/include/asm/asm-prototypes.h
@@ -0,0 +1 @@
+#include <asm-generic/asm-prototypes.h>
diff --git a/arch/um/include/asm/irq.h b/arch/um/include/asm/irq.h
index b5cdd3f91157..49ed3e35b35a 100644
--- a/arch/um/include/asm/irq.h
+++ b/arch/um/include/asm/irq.h
@@ -18,7 +18,19 @@
#define XTERM_IRQ 13
#define RANDOM_IRQ 14
+#ifdef CONFIG_UML_NET_VECTOR
+
+#define VECTOR_BASE_IRQ 15
+#define VECTOR_IRQ_SPACE 8
+
+#define LAST_IRQ (VECTOR_IRQ_SPACE + VECTOR_BASE_IRQ)
+
+#else
+
#define LAST_IRQ RANDOM_IRQ
+
+#endif
+
#define NR_IRQS (LAST_IRQ + 1)
#endif
diff --git a/arch/um/include/shared/irq_user.h b/arch/um/include/shared/irq_user.h
index df5633053957..a7a6120f19d5 100644
--- a/arch/um/include/shared/irq_user.h
+++ b/arch/um/include/shared/irq_user.h
@@ -7,6 +7,7 @@
#define __IRQ_USER_H__
#include <sysdep/ptrace.h>
+#include <stdbool.h>
struct irq_fd {
struct irq_fd *next;
@@ -15,10 +16,17 @@ struct irq_fd {
int type;
int irq;
int events;
- int current_events;
+ bool active;
+ bool pending;
+ bool purge;
};
-enum { IRQ_READ, IRQ_WRITE };
+#define IRQ_READ 0
+#define IRQ_WRITE 1
+#define IRQ_NONE 2
+#define MAX_IRQ_TYPE (IRQ_NONE + 1)
+
+
struct siginfo;
extern void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
diff --git a/arch/um/include/shared/net_kern.h b/arch/um/include/shared/net_kern.h
index 012ac87d4900..40442b98b173 100644
--- a/arch/um/include/shared/net_kern.h
+++ b/arch/um/include/shared/net_kern.h
@@ -65,5 +65,7 @@ extern int tap_setup_common(char *str, char *type, char **dev_name,
char **mac_out, char **gate_addr);
extern void register_transport(struct transport *new);
extern unsigned short eth_protocol(struct sk_buff *skb);
+extern void uml_net_setup_etheraddr(struct net_device *dev, char *str);
+
#endif
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index d8ddaf9790d2..048ae37eb5aa 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -290,15 +290,16 @@ extern void halt_skas(void);
extern void reboot_skas(void);
/* irq.c */
-extern int os_waiting_for_events(struct irq_fd *active_fds);
-extern int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds);
-extern void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg,
- struct irq_fd *active_fds, struct irq_fd ***last_irq_ptr2);
-extern void os_free_irq_later(struct irq_fd *active_fds,
- int irq, void *dev_id);
-extern int os_get_pollfd(int i);
-extern void os_set_pollfd(int i, int fd);
+extern int os_waiting_for_events_epoll(void);
+extern void *os_epoll_get_data_pointer(int index);
+extern int os_epoll_triggered(int index, int events);
+extern int os_event_mask(int irq_type);
+extern int os_setup_epoll(void);
+extern int os_add_epoll_fd(int events, int fd, void *data);
+extern int os_mod_epoll_fd(int events, int fd, void *data);
+extern int os_del_epoll_fd(int fd);
extern void os_set_ioignore(void);
+extern void os_close_epoll_fd(void);
/* sigio.c */
extern int add_sigio_fd(int fd);
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 23cb9350d47e..6b7f3827d6e4 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -1,4 +1,6 @@
/*
+ * Copyright (C) 2017 - Cambridge Greys Ltd
+ * Copyright (C) 2011 - 2014 Cisco Systems Inc
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
* Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
@@ -16,243 +18,362 @@
#include <as-layout.h>
#include <kern_util.h>
#include <os.h>
+#include <irq_user.h>
-/*
- * This list is accessed under irq_lock, except in sigio_handler,
- * where it is safe from being modified. IRQ handlers won't change it -
- * if an IRQ source has vanished, it will be freed by free_irqs just
- * before returning from sigio_handler. That will process a separate
- * list of irqs to free, with its own locking, coming back here to
- * remove list elements, taking the irq_lock to do so.
+
+/* When epoll triggers we do not know why it did so
+ * we can also have different IRQs for read and write.
+ * This is why we keep a small irq_fd array for each fd -
+ * one entry per IRQ type
*/
-static struct irq_fd *active_fds = NULL;
-static struct irq_fd **last_irq_ptr = &active_fds;
-extern void free_irqs(void);
+struct irq_entry {
+ struct irq_entry *next;
+ int fd;
+ struct irq_fd *irq_array[MAX_IRQ_TYPE + 1];
+};
+
+static struct irq_entry *active_fds;
+
+static DEFINE_SPINLOCK(irq_lock);
+
+static void irq_io_loop(struct irq_fd *irq, struct uml_pt_regs *regs)
+{
+/*
+ * irq->active guards against reentry
+ * irq->pending accumulates pending requests
+ * if pending is raised the irq_handler is re-run
+ * until pending is cleared
+ */
+ if (irq->active) {
+ irq->active = false;
+ do {
+ irq->pending = false;
+ do_IRQ(irq->irq, regs);
+ } while (irq->pending && (!irq->purge));
+ if (!irq->purge)
+ irq->active = true;
+ } else {
+ irq->pending = true;
+ }
+}
void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
- struct irq_fd *irq_fd;
- int n;
+ struct irq_entry *irq_entry;
+ struct irq_fd *irq;
+
+ int n, i, j;
while (1) {
- n = os_waiting_for_events(active_fds);
+ /* This is now lockless - epoll keeps back-referencesto the irqs
+ * which have trigger it so there is no need to walk the irq
+ * list and lock it every time. We avoid locking by turning off
+ * IO for a specific fd by executing os_del_epoll_fd(fd) before
+ * we do any changes to the actual data structures
+ */
+ n = os_waiting_for_events_epoll();
+
if (n <= 0) {
if (n == -EINTR)
continue;
- else break;
+ else
+ break;
}
- for (irq_fd = active_fds; irq_fd != NULL;
- irq_fd = irq_fd->next) {
- if (irq_fd->current_events != 0) {
- irq_fd->current_events = 0;
- do_IRQ(irq_fd->irq, regs);
+ for (i = 0; i < n ; i++) {
+ /* Epoll back reference is the entry with 3 irq_fd
+ * leaves - one for each irq type.
+ */
+ irq_entry = (struct irq_entry *)
+ os_epoll_get_data_pointer(i);
+ for (j = 0; j < MAX_IRQ_TYPE ; j++) {
+ irq = irq_entry->irq_array[j];
+ if (irq == NULL)
+ continue;
+ if (os_epoll_triggered(i, irq->events) > 0)
+ irq_io_loop(irq, regs);
+ if (irq->purge) {
+ irq_entry->irq_array[j] = NULL;
+ kfree(irq);
+ }
}
}
}
+}
+
+static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
+{
+ int i;
+ int events = 0;
+ struct irq_fd *irq;
- free_irqs();
+ for (i = 0; i < MAX_IRQ_TYPE ; i++) {
+ irq = irq_entry->irq_array[i];
+ if (irq != NULL)
+ events = irq->events | events;
+ }
+ if (events > 0) {
+ /* os_add_epoll will call os_mod_epoll if this already exists */
+ return os_add_epoll_fd(events, irq_entry->fd, irq_entry);
+ }
+ /* No events - delete */
+ return os_del_epoll_fd(irq_entry->fd);
}
-static DEFINE_SPINLOCK(irq_lock);
+
static int activate_fd(int irq, int fd, int type, void *dev_id)
{
- struct pollfd *tmp_pfd;
- struct irq_fd *new_fd, *irq_fd;
+ struct irq_fd *new_fd;
+ struct irq_entry *irq_entry;
+ int i, err, events;
unsigned long flags;
- int events, err, n;
err = os_set_fd_async(fd);
if (err < 0)
goto out;
- err = -ENOMEM;
- new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
- if (new_fd == NULL)
- goto out;
+ spin_lock_irqsave(&irq_lock, flags);
- if (type == IRQ_READ)
- events = UM_POLLIN | UM_POLLPRI;
- else events = UM_POLLOUT;
- *new_fd = ((struct irq_fd) { .next = NULL,
- .id = dev_id,
- .fd = fd,
- .type = type,
- .irq = irq,
- .events = events,
- .current_events = 0 } );
+ /* Check if we have an entry for this fd */
err = -EBUSY;
- spin_lock_irqsave(&irq_lock, flags);
- for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
- if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
- printk(KERN_ERR "Registering fd %d twice\n", fd);
- printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
- printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
- dev_id);
+ for (irq_entry = active_fds;
+ irq_entry != NULL; irq_entry = irq_entry->next) {
+ if (irq_entry->fd == fd)
+ break;
+ }
+
+ if (irq_entry == NULL) {
+ /* This needs to be atomic as it may be called from an
+ * IRQ context.
+ */
+ irq_entry = kmalloc(sizeof(struct irq_entry), GFP_ATOMIC);
+ if (irq_entry == NULL) {
+ printk(KERN_ERR
+ "Failed to allocate new IRQ entry\n");
goto out_unlock;
}
+ irq_entry->fd = fd;
+ for (i = 0; i < MAX_IRQ_TYPE; i++)
+ irq_entry->irq_array[i] = NULL;
+ irq_entry->next = active_fds;
+ active_fds = irq_entry;
}
- if (type == IRQ_WRITE)
- fd = -1;
-
- tmp_pfd = NULL;
- n = 0;
+ /* Check if we are trying to re-register an interrupt for a
+ * particular fd
+ */
- while (1) {
- n = os_create_pollfd(fd, events, tmp_pfd, n);
- if (n == 0)
- break;
+ if (irq_entry->irq_array[type] != NULL) {
+ printk(KERN_ERR
+ "Trying to reregister IRQ %d FD %d TYPE %d ID %p\n",
+ irq, fd, type, dev_id
+ );
+ goto out_unlock;
+ } else {
+ /* New entry for this fd */
+
+ err = -ENOMEM;
+ new_fd = kmalloc(sizeof(struct irq_fd), GFP_ATOMIC);
+ if (new_fd == NULL)
+ goto out_unlock;
- /*
- * n > 0
- * It means we couldn't put new pollfd to current pollfds
- * and tmp_fds is NULL or too small for new pollfds array.
- * Needed size is equal to n as minimum.
- *
- * Here we have to drop the lock in order to call
- * kmalloc, which might sleep.
- * If something else came in and changed the pollfds array
- * so we will not be able to put new pollfd struct to pollfds
- * then we free the buffer tmp_fds and try again.
+ events = os_event_mask(type);
+
+ *new_fd = ((struct irq_fd) {
+ .id = dev_id,
+ .irq = irq,
+ .type = type,
+ .events = events,
+ .active = true,
+ .pending = false,
+ .purge = false
+ });
+ /* Turn off any IO on this fd - allows us to
+ * avoid locking the IRQ loop
*/
- spin_unlock_irqrestore(&irq_lock, flags);
- kfree(tmp_pfd);
-
- tmp_pfd = kmalloc(n, GFP_KERNEL);
- if (tmp_pfd == NULL)
- goto out_kfree;
-
- spin_lock_irqsave(&irq_lock, flags);
+ os_del_epoll_fd(irq_entry->fd);
+ irq_entry->irq_array[type] = new_fd;
}
- *last_irq_ptr = new_fd;
- last_irq_ptr = &new_fd->next;
-
+ /* Turn back IO on with the correct (new) IO event mask */
+ assign_epoll_events_to_irq(irq_entry);
spin_unlock_irqrestore(&irq_lock, flags);
-
- /*
- * This calls activate_fd, so it has to be outside the critical
- * section.
- */
- maybe_sigio_broken(fd, (type == IRQ_READ));
+ maybe_sigio_broken(fd, (type != IRQ_NONE));
return 0;
-
- out_unlock:
+out_unlock:
spin_unlock_irqrestore(&irq_lock, flags);
- out_kfree:
- kfree(new_fd);
- out:
+out:
return err;
}
-static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
+/*
+ * Walk the IRQ list and dispose of any unused entries.
+ * Should be done under irq_lock.
+ */
+
+static void garbage_collect_irq_entries(void)
{
- unsigned long flags;
+ int i;
+ bool reap;
+ struct irq_entry *walk;
+ struct irq_entry *previous = NULL;
+ struct irq_entry *to_free;
- spin_lock_irqsave(&irq_lock, flags);
- os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
- spin_unlock_irqrestore(&irq_lock, flags);
+ if (active_fds == NULL)
+ return;
+ walk = active_fds;
+ while (walk != NULL) {
+ reap = true;
+ for (i = 0; i < MAX_IRQ_TYPE ; i++) {
+ if (walk->irq_array[i] != NULL) {
+ reap = false;
+ break;
+ }
+ }
+ if (reap) {
+ if (previous == NULL)
+ active_fds = walk->next;
+ else
+ previous->next = walk->next;
+ to_free = walk;
+ } else {
+ to_free = NULL;
+ }
+ walk = walk->next;
+ if (to_free != NULL)
+ kfree(to_free);
+ }
}
-struct irq_and_dev {
- int irq;
- void *dev;
-};
+/*
+ * Walk the IRQ list and get the descriptor for our FD
+ */
-static int same_irq_and_dev(struct irq_fd *irq, void *d)
+static struct irq_entry *get_irq_entry_by_fd(int fd)
{
- struct irq_and_dev *data = d;
+ struct irq_entry *walk = active_fds;
- return ((irq->irq == data->irq) && (irq->id == data->dev));
+ while (walk != NULL) {
+ if (walk->fd == fd)
+ return walk;
+ walk = walk->next;
+ }
+ return NULL;
}
-static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
-{
- struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
- .dev = dev });
- free_irq_by_cb(same_irq_and_dev, &data);
-}
+/*
+ * Walk the IRQ list and dispose of an entry for a specific
+ * device, fd and number. Note - if sharing an IRQ for read
+ * and writefor the same FD it will be disposed in either case.
+ * If this behaviour is undesirable use different IRQ ids.
+ */
-static int same_fd(struct irq_fd *irq, void *fd)
-{
- return (irq->fd == *((int *)fd));
-}
+#define IGNORE_IRQ 1
+#define IGNORE_DEV (1<<1)
-void free_irq_by_fd(int fd)
+static void do_free_by_irq_and_dev(
+ struct irq_entry *irq_entry,
+ unsigned int irq,
+ void *dev,
+ int flags
+)
{
- free_irq_by_cb(same_fd, &fd);
+ int i;
+ struct irq_fd *to_free;
+
+ for (i = 0; i < MAX_IRQ_TYPE ; i++) {
+ if (irq_entry->irq_array[i] != NULL) {
+ if (
+ ((flags & IGNORE_IRQ) ||
+ (irq_entry->irq_array[i]->irq == irq)) &&
+ ((flags & IGNORE_DEV) ||
+ (irq_entry->irq_array[i]->id == dev))
+ ) {
+ /* Turn off any IO on this fd - allows us to
+ * avoid locking the IRQ loop
+ */
+ os_del_epoll_fd(irq_entry->fd);
+ to_free = irq_entry->irq_array[i];
+ irq_entry->irq_array[i] = NULL;
+ assign_epoll_events_to_irq(irq_entry);
+ if (to_free->active)
+ to_free->purge = true;
+ else
+ kfree(to_free);
+ }
+ }
+ }
}
-/* Must be called with irq_lock held */
-static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
+void free_irq_by_fd(int fd)
{
- struct irq_fd *irq;
- int i = 0;
- int fdi;
+ struct irq_entry *to_free;
+ unsigned long flags;
- for (irq = active_fds; irq != NULL; irq = irq->next) {
- if ((irq->fd == fd) && (irq->irq == irqnum))
- break;
- i++;
- }
- if (irq == NULL) {
- printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
- fd);
- goto out;
- }
- fdi = os_get_pollfd(i);
- if ((fdi != -1) && (fdi != fd)) {
- printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
- "and pollfds, fd %d vs %d, need %d\n", irq->fd,
- fdi, fd);
- irq = NULL;
- goto out;
+ spin_lock_irqsave(&irq_lock, flags);
+ to_free = get_irq_entry_by_fd(fd);
+ if (to_free != NULL) {
+ do_free_by_irq_and_dev(
+ to_free,
+ -1,
+ NULL,
+ IGNORE_IRQ | IGNORE_DEV
+ );
}
- *index_out = i;
- out:
- return irq;
+ garbage_collect_irq_entries();
+ spin_unlock_irqrestore(&irq_lock, flags);
}
+EXPORT_SYMBOL(free_irq_by_fd);
-void reactivate_fd(int fd, int irqnum)
+static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
{
- struct irq_fd *irq;
+ struct irq_entry *to_free;
unsigned long flags;
- int i;
spin_lock_irqsave(&irq_lock, flags);
- irq = find_irq_by_fd(fd, irqnum, &i);
- if (irq == NULL) {
- spin_unlock_irqrestore(&irq_lock, flags);
- return;
+ to_free = active_fds;
+ while (to_free != NULL) {
+ do_free_by_irq_and_dev(
+ to_free,
+ irq,
+ dev,
+ 0
+ );
+ to_free = to_free->next;
}
- os_set_pollfd(i, irq->fd);
+ garbage_collect_irq_entries();
spin_unlock_irqrestore(&irq_lock, flags);
+}
- add_sigio_fd(fd);
+
+void reactivate_fd(int fd, int irqnum)
+{
+ /** NOP - we do auto-EOI now **/
}
void deactivate_fd(int fd, int irqnum)
{
- struct irq_fd *irq;
+ struct irq_entry *to_free;
unsigned long flags;
- int i;
+ os_del_epoll_fd(fd);
spin_lock_irqsave(&irq_lock, flags);
- irq = find_irq_by_fd(fd, irqnum, &i);
- if (irq == NULL) {
- spin_unlock_irqrestore(&irq_lock, flags);
- return;
+ to_free = get_irq_entry_by_fd(fd);
+ if (to_free != NULL) {
+ do_free_by_irq_and_dev(
+ to_free,
+ irqnum,
+ NULL,
+ IGNORE_DEV
+ );
}
-
- os_set_pollfd(i, -1);
+ garbage_collect_irq_entries();
spin_unlock_irqrestore(&irq_lock, flags);
-
ignore_sigio_fd(fd);
}
EXPORT_SYMBOL(deactivate_fd);
@@ -265,17 +386,28 @@ EXPORT_SYMBOL(deactivate_fd);
*/
int deactivate_all_fds(void)
{
- struct irq_fd *irq;
- int err;
+ unsigned long flags;
+ struct irq_entry *to_free;
- for (irq = active_fds; irq != NULL; irq = irq->next) {
- err = os_clear_fd_async(irq->fd);
- if (err)
- return err;
- }
- /* If there is a signal already queued, after unblocking ignore it */
+ spin_lock_irqsave(&irq_lock, flags);
+ /* Stop IO. The IRQ loop has no lock so this is our
+ * only way of making sure we are safe to dispose
+ * of all IRQ handlers
+ */
os_set_ioignore();
-
+ to_free = active_fds;
+ while (to_free != NULL) {
+ do_free_by_irq_and_dev(
+ to_free,
+ -1,
+ NULL,
+ IGNORE_IRQ | IGNORE_DEV
+ );
+ to_free = to_free->next;
+ }
+ garbage_collect_irq_entries();
+ spin_unlock_irqrestore(&irq_lock, flags);
+ os_close_epoll_fd();
return 0;
}
@@ -353,8 +485,11 @@ void __init init_IRQ(void)
irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
+
for (i = 1; i < NR_IRQS; i++)
irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
+ /* Initialize EPOLL Loop */
+ os_setup_epoll();
}
/*
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 7f69d17de354..052de4c8acb2 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -121,12 +121,12 @@ static void __init um_timer_setup(void)
clockevents_register_device(&timer_clockevent);
}
-void read_persistent_clock(struct timespec *ts)
+void read_persistent_clock64(struct timespec64 *ts)
{
long long nsecs = os_persistent_clock_emulation();
- set_normalized_timespec(ts, nsecs / NSEC_PER_SEC,
- nsecs % NSEC_PER_SEC);
+ set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
+ nsecs % NSEC_PER_SEC);
}
void __init time_init(void)
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
index 2db18cbbb0ea..c0197097c86e 100644
--- a/arch/um/os-Linux/file.c
+++ b/arch/um/os-Linux/file.c
@@ -12,6 +12,7 @@
#include <sys/mount.h>
#include <sys/socket.h>
#include <sys/stat.h>
+#include <sys/sysmacros.h>
#include <sys/un.h>
#include <sys/types.h>
#include <os.h>
diff --git a/arch/um/os-Linux/irq.c b/arch/um/os-Linux/irq.c
index b9afb74b79ad..365823010346 100644
--- a/arch/um/os-Linux/irq.c
+++ b/arch/um/os-Linux/irq.c
@@ -1,135 +1,147 @@
/*
+ * Copyright (C) 2017 - Cambridge Greys Ltd
+ * Copyright (C) 2011 - 2014 Cisco Systems Inc
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <stdlib.h>
#include <errno.h>
-#include <poll.h>
+#include <sys/epoll.h>
#include <signal.h>
#include <string.h>
#include <irq_user.h>
#include <os.h>
#include <um_malloc.h>
+/* Epoll support */
+
+static int epollfd = -1;
+
+#define MAX_EPOLL_EVENTS 64
+
+static struct epoll_event epoll_events[MAX_EPOLL_EVENTS];
+
+/* Helper to return an Epoll data pointer from an epoll event structure.
+ * We need to keep this one on the userspace side to keep includes separate
+ */
+
+void *os_epoll_get_data_pointer(int index)
+{
+ return epoll_events[index].data.ptr;
+}
+
+/* Helper to compare events versus the events in the epoll structure.
+ * Same as above - needs to be on the userspace side
+ */
+
+
+int os_epoll_triggered(int index, int events)
+{
+ return epoll_events[index].events & events;
+}
+/* Helper to set the event mask.
+ * The event mask is opaque to the kernel side, because it does not have
+ * access to the right includes/defines for EPOLL constants.
+ */
+
+int os_event_mask(int irq_type)
+{
+ if (irq_type == IRQ_READ)
+ return EPOLLIN | EPOLLPRI;
+ if (irq_type == IRQ_WRITE)
+ return EPOLLOUT;
+ return 0;
+}
+
/*
- * Locked by irq_lock in arch/um/kernel/irq.c. Changed by os_create_pollfd
- * and os_free_irq_by_cb, which are called under irq_lock.
+ * Initial Epoll Setup
*/
-static struct pollfd *pollfds = NULL;
-static int pollfds_num = 0;
-static int pollfds_size = 0;
+int os_setup_epoll(void)
+{
+ epollfd = epoll_create(MAX_EPOLL_EVENTS);
+ return epollfd;
+}
-int os_waiting_for_events(struct irq_fd *active_fds)
+/*
+ * Helper to run the actual epoll_wait
+ */
+int os_waiting_for_events_epoll(void)
{
- struct irq_fd *irq_fd;
- int i, n, err;
+ int n, err;
- n = poll(pollfds, pollfds_num, 0);
+ n = epoll_wait(epollfd,
+ (struct epoll_event *) &epoll_events, MAX_EPOLL_EVENTS, 0);
if (n < 0) {
err = -errno;
if (errno != EINTR)
- printk(UM_KERN_ERR "os_waiting_for_events:"
- " poll returned %d, errno = %d\n", n, errno);
+ printk(
+ UM_KERN_ERR "os_waiting_for_events:"
+ " epoll returned %d, error = %s\n", n,
+ strerror(errno)
+ );
return err;
}
-
- if (n == 0)
- return 0;
-
- irq_fd = active_fds;
-
- for (i = 0; i < pollfds_num; i++) {
- if (pollfds[i].revents != 0) {
- irq_fd->current_events = pollfds[i].revents;
- pollfds[i].fd = -1;
- }
- irq_fd = irq_fd->next;
- }
return n;
}
-int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds)
-{
- if (pollfds_num == pollfds_size) {
- if (size_tmpfds <= pollfds_size * sizeof(pollfds[0])) {
- /* return min size needed for new pollfds area */
- return (pollfds_size + 1) * sizeof(pollfds[0]);
- }
-
- if (pollfds != NULL) {
- memcpy(tmp_pfd, pollfds,
- sizeof(pollfds[0]) * pollfds_size);
- /* remove old pollfds */
- kfree(pollfds);
- }
- pollfds = tmp_pfd;
- pollfds_size++;
- } else
- kfree(tmp_pfd); /* remove not used tmp_pfd */
-
- pollfds[pollfds_num] = ((struct pollfd) { .fd = fd,
- .events = events,
- .revents = 0 });
- pollfds_num++;
-
- return 0;
-}
-void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg,
- struct irq_fd *active_fds, struct irq_fd ***last_irq_ptr2)
+/*
+ * Helper to add a fd to epoll
+ */
+int os_add_epoll_fd(int events, int fd, void *data)
{
- struct irq_fd **prev;
- int i = 0;
-
- prev = &active_fds;
- while (*prev != NULL) {
- if ((*test)(*prev, arg)) {
- struct irq_fd *old_fd = *prev;
- if ((pollfds[i].fd != -1) &&
- (pollfds[i].fd != (*prev)->fd)) {
- printk(UM_KERN_ERR "os_free_irq_by_cb - "
- "mismatch between active_fds and "
- "pollfds, fd %d vs %d\n",
- (*prev)->fd, pollfds[i].fd);
- goto out;
- }
-
- pollfds_num--;
-
- /*
- * This moves the *whole* array after pollfds[i]
- * (though it doesn't spot as such)!
- */
- memmove(&pollfds[i], &pollfds[i + 1],
- (pollfds_num - i) * sizeof(pollfds[0]));
- if (*last_irq_ptr2 == &old_fd->next)
- *last_irq_ptr2 = prev;
-
- *prev = (*prev)->next;
- if (old_fd->type == IRQ_WRITE)
- ignore_sigio_fd(old_fd->fd);
- kfree(old_fd);
- continue;
- }
- prev = &(*prev)->next;
- i++;
- }
- out:
- return;
+ struct epoll_event event;
+ int result;
+
+ event.data.ptr = data;
+ event.events = events | EPOLLET;
+ result = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &event);
+ if ((result) && (errno == EEXIST))
+ result = os_mod_epoll_fd(events, fd, data);
+ if (result)
+ printk("epollctl add err fd %d, %s\n", fd, strerror(errno));
+ return result;
}
-int os_get_pollfd(int i)
+/*
+ * Helper to mod the fd event mask and/or data backreference
+ */
+int os_mod_epoll_fd(int events, int fd, void *data)
{
- return pollfds[i].fd;
+ struct epoll_event event;
+ int result;
+
+ event.data.ptr = data;
+ event.events = events;
+ result = epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &event);
+ if (result)
+ printk(UM_KERN_ERR
+ "epollctl mod err fd %d, %s\n", fd, strerror(errno));
+ return result;
}
-void os_set_pollfd(int i, int fd)
+/*
+ * Helper to delete the epoll fd
+ */
+int os_del_epoll_fd(int fd)
{
- pollfds[i].fd = fd;
+ struct epoll_event event;
+ int result;
+ /* This is quiet as we use this as IO ON/OFF - so it is often
+ * invoked on a non-existent fd
+ */
+ result = epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, &event);
+ return result;
}
void os_set_ioignore(void)
{
signal(SIGIO, SIG_IGN);
}
+
+void os_close_epoll_fd(void)
+{
+ /* Needed so we do not leak an fd when rebooting */
+ os_close_file(epollfd);
+}
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index a86d7cc2c2d8..bf0acb8aad8b 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -16,6 +16,7 @@
#include <os.h>
#include <sysdep/mcontext.h>
#include <um_malloc.h>
+#include <sys/ucontext.h>
void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
[SIGTRAP] = relay_signal,
@@ -159,7 +160,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
static void hard_handler(int sig, siginfo_t *si, void *p)
{
- struct ucontext *uc = p;
+ ucontext_t *uc = p;
mcontext_t *mc = &uc->uc_mcontext;
unsigned long pending = 1UL << sig;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d234cca296db..00fcf81f2c56 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -29,6 +29,7 @@ config X86_64
select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA
select X86_DEV_DMA_OPS
+ select ARCH_HAS_SYSCALL_WRAPPER
#
# Arch settings
@@ -2008,6 +2009,9 @@ config KEXEC_FILE
for kernel and initramfs as opposed to list of segments as
accepted by previous system call.
+config ARCH_HAS_KEXEC_PURGATORY
+ def_bool KEXEC_FILE
+
config KEXEC_VERIFY_SIG
bool "Verify kernel signature during kexec_file_load() syscall"
depends on KEXEC_FILE
@@ -2760,11 +2764,9 @@ config OLPC_XO1_RTC
config OLPC_XO1_SCI
bool "OLPC XO-1 SCI extras"
- depends on OLPC && OLPC_XO1_PM
+ depends on OLPC && OLPC_XO1_PM && GPIO_CS5535=y
depends on INPUT=y
select POWER_SUPPLY
- select GPIO_CS5535
- select MFD_CORE
---help---
Add support for SCI-based features of the OLPC XO-1 laptop:
- EC-driven system wakeups
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 66e42a098d70..a0a50b91ecef 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -54,6 +54,9 @@ unsigned int ptrs_per_p4d __ro_after_init = 1;
extern unsigned long get_cmd_line_ptr(void);
+/* Used by PAGE_KERN* macros: */
+pteval_t __default_kernel_pte_mask __read_mostly = ~0;
+
/* Simplified build-specific string for starting entropy. */
static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index be63330c5511..352e70cd33e8 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -114,7 +114,9 @@ For 32-bit we have the following conventions - kernel is built with
pushq %rsi /* pt_regs->si */
.endif
pushq \rdx /* pt_regs->dx */
+ xorl %edx, %edx /* nospec dx */
pushq %rcx /* pt_regs->cx */
+ xorl %ecx, %ecx /* nospec cx */
pushq \rax /* pt_regs->ax */
pushq %r8 /* pt_regs->r8 */
xorl %r8d, %r8d /* nospec r8 */
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 74f6eee15179..fbf6a6c3fd2d 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -266,14 +266,13 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
}
#ifdef CONFIG_X86_64
-__visible void do_syscall_64(struct pt_regs *regs)
+__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
{
- struct thread_info *ti = current_thread_info();
- unsigned long nr = regs->orig_ax;
+ struct thread_info *ti;
enter_from_user_mode();
local_irq_enable();
-
+ ti = current_thread_info();
if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
nr = syscall_trace_enter(regs);
@@ -282,11 +281,10 @@ __visible void do_syscall_64(struct pt_regs *regs)
* table. The only functional difference is the x32 bit in
* regs->orig_ax, which changes the behavior of some syscalls.
*/
- if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
- nr = array_index_nospec(nr & __SYSCALL_MASK, NR_syscalls);
- regs->ax = sys_call_table[nr](
- regs->di, regs->si, regs->dx,
- regs->r10, regs->r8, regs->r9);
+ nr &= __SYSCALL_MASK;
+ if (likely(nr < NR_syscalls)) {
+ nr = array_index_nospec(nr, NR_syscalls);
+ regs->ax = sys_call_table[nr](regs);
}
syscall_return_slowpath(regs);
@@ -321,6 +319,9 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
if (likely(nr < IA32_NR_syscalls)) {
nr = array_index_nospec(nr, IA32_NR_syscalls);
+#ifdef CONFIG_IA32_EMULATION
+ regs->ax = ia32_sys_call_table[nr](regs);
+#else
/*
* It's possible that a 32-bit syscall implementation
* takes a 64-bit parameter but nonetheless assumes that
@@ -331,6 +332,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
(unsigned int)regs->bx, (unsigned int)regs->cx,
(unsigned int)regs->dx, (unsigned int)regs->si,
(unsigned int)regs->di, (unsigned int)regs->bp);
+#endif /* CONFIG_IA32_EMULATION */
}
syscall_return_slowpath(regs);
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index b0a4649e55ce..3166b9674429 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -233,7 +233,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
TRACE_IRQS_OFF
/* IRQs are off. */
- movq %rsp, %rdi
+ movq %rax, %rdi
+ movq %rsp, %rsi
call do_syscall_64 /* returns with IRQs disabled */
TRACE_IRQS_IRETQ /* we're about to change IF */
@@ -913,7 +914,7 @@ ENTRY(\sym)
pushq $-1 /* ORIG_RAX: no syscall to restart */
.endif
- .if \paranoid < 2
+ .if \paranoid == 1
testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */
jnz .Lfrom_usermode_switch_stack_\@
.endif
@@ -960,7 +961,7 @@ ENTRY(\sym)
jmp error_exit
.endif
- .if \paranoid < 2
+ .if \paranoid == 1
/*
* Entry from userspace. Switch stacks and treat it
* as a normal entry. This means that paranoid handlers
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 08425c42f8b7..9af927e59d49 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -220,8 +220,11 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
+ xorl %esi, %esi /* nospec si */
pushq %rdx /* pt_regs->dx */
+ xorl %edx, %edx /* nospec dx */
pushq %rbp /* pt_regs->cx (stashed in bp) */
+ xorl %ecx, %ecx /* nospec cx */
pushq $-ENOSYS /* pt_regs->ax */
pushq $0 /* pt_regs->r8 = 0 */
xorl %r8d, %r8d /* nospec r8 */
@@ -365,8 +368,11 @@ ENTRY(entry_INT80_compat)
pushq (%rdi) /* pt_regs->di */
pushq %rsi /* pt_regs->si */
+ xorl %esi, %esi /* nospec si */
pushq %rdx /* pt_regs->dx */
+ xorl %edx, %edx /* nospec dx */
pushq %rcx /* pt_regs->cx */
+ xorl %ecx, %ecx /* nospec cx */
pushq $-ENOSYS /* pt_regs->ax */
pushq $0 /* pt_regs->r8 = 0 */
xorl %r8d, %r8d /* nospec r8 */
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index 95c294963612..aa3336a7cb15 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -7,14 +7,23 @@
#include <asm/asm-offsets.h>
#include <asm/syscall.h>
-#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#ifdef CONFIG_IA32_EMULATION
+/* On X86_64, we use struct pt_regs * to pass parameters to syscalls */
+#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
+
+/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */
+extern asmlinkage long sys_ni_syscall(const struct pt_regs *);
+
+#else /* CONFIG_IA32_EMULATION */
+#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+#endif /* CONFIG_IA32_EMULATION */
+
#include <asm/syscalls_32.h>
#undef __SYSCALL_I386
#define __SYSCALL_I386(nr, sym, qual) [nr] = sym,
-extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
-
__visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] = {
/*
* Smells like a compiler bug -- it doesn't work
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index c176d2fab1da..d5252bc1e380 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -7,14 +7,14 @@
#include <asm/asm-offsets.h>
#include <asm/syscall.h>
-#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */
+extern asmlinkage long sys_ni_syscall(const struct pt_regs *);
+#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
#include <asm/syscalls_64.h>
#undef __SYSCALL_64
#define __SYSCALL_64(nr, sym, qual) [nr] = sym,
-extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
-
asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
/*
* Smells like a compiler bug -- it doesn't work
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index c58f75b088c5..d6b27dab1b30 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -4,390 +4,395 @@
# The format is:
# <number> <abi> <name> <entry point> <compat entry point>
#
+# The __ia32_sys and __ia32_compat_sys stubs are created on-the-fly for
+# sys_*() system calls and compat_sys_*() compat system calls if
+# IA32_EMULATION is defined, and expect struct pt_regs *regs as their only
+# parameter.
+#
# The abi is always "i386" for this file.
#
-0 i386 restart_syscall sys_restart_syscall
-1 i386 exit sys_exit
-2 i386 fork sys_fork
-3 i386 read sys_read
-4 i386 write sys_write
-5 i386 open sys_open compat_sys_open
-6 i386 close sys_close
-7 i386 waitpid sys_waitpid
-8 i386 creat sys_creat
-9 i386 link sys_link
-10 i386 unlink sys_unlink
-11 i386 execve sys_execve compat_sys_execve
-12 i386 chdir sys_chdir
-13 i386 time sys_time compat_sys_time
-14 i386 mknod sys_mknod
-15 i386 chmod sys_chmod
-16 i386 lchown sys_lchown16
+0 i386 restart_syscall sys_restart_syscall __ia32_sys_restart_syscall
+1 i386 exit sys_exit __ia32_sys_exit
+2 i386 fork sys_fork __ia32_sys_fork
+3 i386 read sys_read __ia32_sys_read
+4 i386 write sys_write __ia32_sys_write
+5 i386 open sys_open __ia32_compat_sys_open
+6 i386 close sys_close __ia32_sys_close
+7 i386 waitpid sys_waitpid __ia32_sys_waitpid
+8 i386 creat sys_creat __ia32_sys_creat
+9 i386 link sys_link __ia32_sys_link
+10 i386 unlink sys_unlink __ia32_sys_unlink
+11 i386 execve sys_execve __ia32_compat_sys_execve
+12 i386 chdir sys_chdir __ia32_sys_chdir
+13 i386 time sys_time __ia32_compat_sys_time
+14 i386 mknod sys_mknod __ia32_sys_mknod
+15 i386 chmod sys_chmod __ia32_sys_chmod
+16 i386 lchown sys_lchown16 __ia32_sys_lchown16
17 i386 break
-18 i386 oldstat sys_stat
-19 i386 lseek sys_lseek compat_sys_lseek
-20 i386 getpid sys_getpid
-21 i386 mount sys_mount compat_sys_mount
-22 i386 umount sys_oldumount
-23 i386 setuid sys_setuid16
-24 i386 getuid sys_getuid16
-25 i386 stime sys_stime compat_sys_stime
-26 i386 ptrace sys_ptrace compat_sys_ptrace
-27 i386 alarm sys_alarm
-28 i386 oldfstat sys_fstat
-29 i386 pause sys_pause
-30 i386 utime sys_utime compat_sys_utime
+18 i386 oldstat sys_stat __ia32_sys_stat
+19 i386 lseek sys_lseek __ia32_compat_sys_lseek
+20 i386 getpid sys_getpid __ia32_sys_getpid
+21 i386 mount sys_mount __ia32_compat_sys_mount
+22 i386 umount sys_oldumount __ia32_sys_oldumount
+23 i386 setuid sys_setuid16 __ia32_sys_setuid16
+24 i386 getuid sys_getuid16 __ia32_sys_getuid16
+25 i386 stime sys_stime __ia32_compat_sys_stime
+26 i386 ptrace sys_ptrace __ia32_compat_sys_ptrace
+27 i386 alarm sys_alarm __ia32_sys_alarm
+28 i386 oldfstat sys_fstat __ia32_sys_fstat
+29 i386 pause sys_pause __ia32_sys_pause
+30 i386 utime sys_utime __ia32_compat_sys_utime
31 i386 stty
32 i386 gtty
-33 i386 access sys_access
-34 i386 nice sys_nice
+33 i386 access sys_access __ia32_sys_access
+34 i386 nice sys_nice __ia32_sys_nice
35 i386 ftime
-36 i386 sync sys_sync
-37 i386 kill sys_kill
-38 i386 rename sys_rename
-39 i386 mkdir sys_mkdir
-40 i386 rmdir sys_rmdir
-41 i386 dup sys_dup
-42 i386 pipe sys_pipe
-43 i386 times sys_times compat_sys_times
+36 i386 sync sys_sync __ia32_sys_sync
+37 i386 kill sys_kill __ia32_sys_kill
+38 i386 rename sys_rename __ia32_sys_rename
+39 i386 mkdir sys_mkdir __ia32_sys_mkdir
+40 i386 rmdir sys_rmdir __ia32_sys_rmdir
+41 i386 dup sys_dup __ia32_sys_dup
+42 i386 pipe sys_pipe __ia32_sys_pipe
+43 i386 times sys_times __ia32_compat_sys_times
44 i386 prof
-45 i386 brk sys_brk
-46 i386 setgid sys_setgid16
-47 i386 getgid sys_getgid16
-48 i386 signal sys_signal
-49 i386 geteuid sys_geteuid16
-50 i386 getegid sys_getegid16
-51 i386 acct sys_acct
-52 i386 umount2 sys_umount
+45 i386 brk sys_brk __ia32_sys_brk
+46 i386 setgid sys_setgid16 __ia32_sys_setgid16
+47 i386 getgid sys_getgid16 __ia32_sys_getgid16
+48 i386 signal sys_signal __ia32_sys_signal
+49 i386 geteuid sys_geteuid16 __ia32_sys_geteuid16
+50 i386 getegid sys_getegid16 __ia32_sys_getegid16
+51 i386 acct sys_acct __ia32_sys_acct
+52 i386 umount2 sys_umount __ia32_sys_umount
53 i386 lock
-54 i386 ioctl sys_ioctl compat_sys_ioctl
-55 i386 fcntl sys_fcntl compat_sys_fcntl64
+54 i386 ioctl sys_ioctl __ia32_compat_sys_ioctl
+55 i386 fcntl sys_fcntl __ia32_compat_sys_fcntl64
56 i386 mpx
-57 i386 setpgid sys_setpgid
+57 i386 setpgid sys_setpgid __ia32_sys_setpgid
58 i386 ulimit
-59 i386 oldolduname sys_olduname
-60 i386 umask sys_umask
-61 i386 chroot sys_chroot
-62 i386 ustat sys_ustat compat_sys_ustat
-63 i386 dup2 sys_dup2
-64 i386 getppid sys_getppid
-65 i386 getpgrp sys_getpgrp
-66 i386 setsid sys_setsid
-67 i386 sigaction sys_sigaction compat_sys_sigaction
-68 i386 sgetmask sys_sgetmask
-69 i386 ssetmask sys_ssetmask
-70 i386 setreuid sys_setreuid16
-71 i386 setregid sys_setregid16
-72 i386 sigsuspend sys_sigsuspend
-73 i386 sigpending sys_sigpending compat_sys_sigpending
-74 i386 sethostname sys_sethostname
-75 i386 setrlimit sys_setrlimit compat_sys_setrlimit
-76 i386 getrlimit sys_old_getrlimit compat_sys_old_getrlimit
-77 i386 getrusage sys_getrusage compat_sys_getrusage
-78 i386 gettimeofday sys_gettimeofday compat_sys_gettimeofday
-79 i386 settimeofday sys_settimeofday compat_sys_settimeofday
-80 i386 getgroups sys_getgroups16
-81 i386 setgroups sys_setgroups16
-82 i386 select sys_old_select compat_sys_old_select
-83 i386 symlink sys_symlink
-84 i386 oldlstat sys_lstat
-85 i386 readlink sys_readlink
-86 i386 uselib sys_uselib
-87 i386 swapon sys_swapon
-88 i386 reboot sys_reboot
-89 i386 readdir sys_old_readdir compat_sys_old_readdir
-90 i386 mmap sys_old_mmap compat_sys_x86_mmap
-91 i386 munmap sys_munmap
-92 i386 truncate sys_truncate compat_sys_truncate
-93 i386 ftruncate sys_ftruncate compat_sys_ftruncate
-94 i386 fchmod sys_fchmod
-95 i386 fchown sys_fchown16
-96 i386 getpriority sys_getpriority
-97 i386 setpriority sys_setpriority
+59 i386 oldolduname sys_olduname __ia32_sys_olduname
+60 i386 umask sys_umask __ia32_sys_umask
+61 i386 chroot sys_chroot __ia32_sys_chroot
+62 i386 ustat sys_ustat __ia32_compat_sys_ustat
+63 i386 dup2 sys_dup2 __ia32_sys_dup2
+64 i386 getppid sys_getppid __ia32_sys_getppid
+65 i386 getpgrp sys_getpgrp __ia32_sys_getpgrp
+66 i386 setsid sys_setsid __ia32_sys_setsid
+67 i386 sigaction sys_sigaction __ia32_compat_sys_sigaction
+68 i386 sgetmask sys_sgetmask __ia32_sys_sgetmask
+69 i386 ssetmask sys_ssetmask __ia32_sys_ssetmask
+70 i386 setreuid sys_setreuid16 __ia32_sys_setreuid16
+71 i386 setregid sys_setregid16 __ia32_sys_setregid16
+72 i386 sigsuspend sys_sigsuspend __ia32_sys_sigsuspend
+73 i386 sigpending sys_sigpending __ia32_compat_sys_sigpending
+74 i386 sethostname sys_sethostname __ia32_sys_sethostname
+75 i386 setrlimit sys_setrlimit __ia32_compat_sys_setrlimit
+76 i386 getrlimit sys_old_getrlimit __ia32_compat_sys_old_getrlimit
+77 i386 getrusage sys_getrusage __ia32_compat_sys_getrusage
+78 i386 gettimeofday sys_gettimeofday __ia32_compat_sys_gettimeofday
+79 i386 settimeofday sys_settimeofday __ia32_compat_sys_settimeofday
+80 i386 getgroups sys_getgroups16 __ia32_sys_getgroups16
+81 i386 setgroups sys_setgroups16 __ia32_sys_setgroups16
+82 i386 select sys_old_select __ia32_compat_sys_old_select
+83 i386 symlink sys_symlink __ia32_sys_symlink
+84 i386 oldlstat sys_lstat __ia32_sys_lstat
+85 i386 readlink sys_readlink __ia32_sys_readlink
+86 i386 uselib sys_uselib __ia32_sys_uselib
+87 i386 swapon sys_swapon __ia32_sys_swapon
+88 i386 reboot sys_reboot __ia32_sys_reboot
+89 i386 readdir sys_old_readdir __ia32_compat_sys_old_readdir
+90 i386 mmap sys_old_mmap __ia32_compat_sys_x86_mmap
+91 i386 munmap sys_munmap __ia32_sys_munmap
+92 i386 truncate sys_truncate __ia32_compat_sys_truncate
+93 i386 ftruncate sys_ftruncate __ia32_compat_sys_ftruncate
+94 i386 fchmod sys_fchmod __ia32_sys_fchmod
+95 i386 fchown sys_fchown16 __ia32_sys_fchown16
+96 i386 getpriority sys_getpriority __ia32_sys_getpriority
+97 i386 setpriority sys_setpriority __ia32_sys_setpriority
98 i386 profil
-99 i386 statfs sys_statfs compat_sys_statfs
-100 i386 fstatfs sys_fstatfs compat_sys_fstatfs
-101 i386 ioperm sys_ioperm
-102 i386 socketcall sys_socketcall compat_sys_socketcall
-103 i386 syslog sys_syslog
-104 i386 setitimer sys_setitimer compat_sys_setitimer
-105 i386 getitimer sys_getitimer compat_sys_getitimer
-106 i386 stat sys_newstat compat_sys_newstat
-107 i386 lstat sys_newlstat compat_sys_newlstat
-108 i386 fstat sys_newfstat compat_sys_newfstat
-109 i386 olduname sys_uname
-110 i386 iopl sys_iopl
-111 i386 vhangup sys_vhangup
+99 i386 statfs sys_statfs __ia32_compat_sys_statfs
+100 i386 fstatfs sys_fstatfs __ia32_compat_sys_fstatfs
+101 i386 ioperm sys_ioperm __ia32_sys_ioperm
+102 i386 socketcall sys_socketcall __ia32_compat_sys_socketcall
+103 i386 syslog sys_syslog __ia32_sys_syslog
+104 i386 setitimer sys_setitimer __ia32_compat_sys_setitimer
+105 i386 getitimer sys_getitimer __ia32_compat_sys_getitimer
+106 i386 stat sys_newstat __ia32_compat_sys_newstat
+107 i386 lstat sys_newlstat __ia32_compat_sys_newlstat
+108 i386 fstat sys_newfstat __ia32_compat_sys_newfstat
+109 i386 olduname sys_uname __ia32_sys_uname
+110 i386 iopl sys_iopl __ia32_sys_iopl
+111 i386 vhangup sys_vhangup __ia32_sys_vhangup
112 i386 idle
113 i386 vm86old sys_vm86old sys_ni_syscall
-114 i386 wait4 sys_wait4 compat_sys_wait4
-115 i386 swapoff sys_swapoff
-116 i386 sysinfo sys_sysinfo compat_sys_sysinfo
-117 i386 ipc sys_ipc compat_sys_ipc
-118 i386 fsync sys_fsync
+114 i386 wait4 sys_wait4 __ia32_compat_sys_wait4
+115 i386 swapoff sys_swapoff __ia32_sys_swapoff
+116 i386 sysinfo sys_sysinfo __ia32_compat_sys_sysinfo
+117 i386 ipc sys_ipc __ia32_compat_sys_ipc
+118 i386 fsync sys_fsync __ia32_sys_fsync
119 i386 sigreturn sys_sigreturn sys32_sigreturn
-120 i386 clone sys_clone compat_sys_x86_clone
-121 i386 setdomainname sys_setdomainname
-122 i386 uname sys_newuname
-123 i386 modify_ldt sys_modify_ldt
-124 i386 adjtimex sys_adjtimex compat_sys_adjtimex
-125 i386 mprotect sys_mprotect
-126 i386 sigprocmask sys_sigprocmask compat_sys_sigprocmask
+120 i386 clone sys_clone __ia32_compat_sys_x86_clone
+121 i386 setdomainname sys_setdomainname __ia32_sys_setdomainname
+122 i386 uname sys_newuname __ia32_sys_newuname
+123 i386 modify_ldt sys_modify_ldt __ia32_sys_modify_ldt
+124 i386 adjtimex sys_adjtimex __ia32_compat_sys_adjtimex
+125 i386 mprotect sys_mprotect __ia32_sys_mprotect
+126 i386 sigprocmask sys_sigprocmask __ia32_compat_sys_sigprocmask
127 i386 create_module
-128 i386 init_module sys_init_module
-129 i386 delete_module sys_delete_module
+128 i386 init_module sys_init_module __ia32_sys_init_module
+129 i386 delete_module sys_delete_module __ia32_sys_delete_module
130 i386 get_kernel_syms
-131 i386 quotactl sys_quotactl compat_sys_quotactl32
-132 i386 getpgid sys_getpgid
-133 i386 fchdir sys_fchdir
-134 i386 bdflush sys_bdflush
-135 i386 sysfs sys_sysfs
-136 i386 personality sys_personality
+131 i386 quotactl sys_quotactl __ia32_compat_sys_quotactl32
+132 i386 getpgid sys_getpgid __ia32_sys_getpgid
+133 i386 fchdir sys_fchdir __ia32_sys_fchdir
+134 i386 bdflush sys_bdflush __ia32_sys_bdflush
+135 i386 sysfs sys_sysfs __ia32_sys_sysfs
+136 i386 personality sys_personality __ia32_sys_personality
137 i386 afs_syscall
-138 i386 setfsuid sys_setfsuid16
-139 i386 setfsgid sys_setfsgid16
-140 i386 _llseek sys_llseek
-141 i386 getdents sys_getdents compat_sys_getdents
-142 i386 _newselect sys_select compat_sys_select
-143 i386 flock sys_flock
-144 i386 msync sys_msync
-145 i386 readv sys_readv compat_sys_readv
-146 i386 writev sys_writev compat_sys_writev
-147 i386 getsid sys_getsid
-148 i386 fdatasync sys_fdatasync
-149 i386 _sysctl sys_sysctl compat_sys_sysctl
-150 i386 mlock sys_mlock
-151 i386 munlock sys_munlock
-152 i386 mlockall sys_mlockall
-153 i386 munlockall sys_munlockall
-154 i386 sched_setparam sys_sched_setparam
-155 i386 sched_getparam sys_sched_getparam
-156 i386 sched_setscheduler sys_sched_setscheduler
-157 i386 sched_getscheduler sys_sched_getscheduler
-158 i386 sched_yield sys_sched_yield
-159 i386 sched_get_priority_max sys_sched_get_priority_max
-160 i386 sched_get_priority_min sys_sched_get_priority_min
-161 i386 sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
-162 i386 nanosleep sys_nanosleep compat_sys_nanosleep
-163 i386 mremap sys_mremap
-164 i386 setresuid sys_setresuid16
-165 i386 getresuid sys_getresuid16
+138 i386 setfsuid sys_setfsuid16 __ia32_sys_setfsuid16
+139 i386 setfsgid sys_setfsgid16 __ia32_sys_setfsgid16
+140 i386 _llseek sys_llseek __ia32_sys_llseek
+141 i386 getdents sys_getdents __ia32_compat_sys_getdents
+142 i386 _newselect sys_select __ia32_compat_sys_select
+143 i386 flock sys_flock __ia32_sys_flock
+144 i386 msync sys_msync __ia32_sys_msync
+145 i386 readv sys_readv __ia32_compat_sys_readv
+146 i386 writev sys_writev __ia32_compat_sys_writev
+147 i386 getsid sys_getsid __ia32_sys_getsid
+148 i386 fdatasync sys_fdatasync __ia32_sys_fdatasync
+149 i386 _sysctl sys_sysctl __ia32_compat_sys_sysctl
+150 i386 mlock sys_mlock __ia32_sys_mlock
+151 i386 munlock sys_munlock __ia32_sys_munlock
+152 i386 mlockall sys_mlockall __ia32_sys_mlockall
+153 i386 munlockall sys_munlockall __ia32_sys_munlockall
+154 i386 sched_setparam sys_sched_setparam __ia32_sys_sched_setparam
+155 i386 sched_getparam sys_sched_getparam __ia32_sys_sched_getparam
+156 i386 sched_setscheduler sys_sched_setscheduler __ia32_sys_sched_setscheduler
+157 i386 sched_getscheduler sys_sched_getscheduler __ia32_sys_sched_getscheduler
+158 i386 sched_yield sys_sched_yield __ia32_sys_sched_yield
+159 i386 sched_get_priority_max sys_sched_get_priority_max __ia32_sys_sched_get_priority_max
+160 i386 sched_get_priority_min sys_sched_get_priority_min __ia32_sys_sched_get_priority_min
+161 i386 sched_rr_get_interval sys_sched_rr_get_interval __ia32_compat_sys_sched_rr_get_interval
+162 i386 nanosleep sys_nanosleep __ia32_compat_sys_nanosleep
+163 i386 mremap sys_mremap __ia32_sys_mremap
+164 i386 setresuid sys_setresuid16 __ia32_sys_setresuid16
+165 i386 getresuid sys_getresuid16 __ia32_sys_getresuid16
166 i386 vm86 sys_vm86 sys_ni_syscall
167 i386 query_module
-168 i386 poll sys_poll
+168 i386 poll sys_poll __ia32_sys_poll
169 i386 nfsservctl
-170 i386 setresgid sys_setresgid16
-171 i386 getresgid sys_getresgid16
-172 i386 prctl sys_prctl
+170 i386 setresgid sys_setresgid16 __ia32_sys_setresgid16
+171 i386 getresgid sys_getresgid16 __ia32_sys_getresgid16
+172 i386 prctl sys_prctl __ia32_sys_prctl
173 i386 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn
-174 i386 rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
-175 i386 rt_sigprocmask sys_rt_sigprocmask
-176 i386 rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
-177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
-178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
-179 i386 rt_sigsuspend sys_rt_sigsuspend
-180 i386 pread64 sys_pread64 compat_sys_x86_pread
-181 i386 pwrite64 sys_pwrite64 compat_sys_x86_pwrite
-182 i386 chown sys_chown16
-183 i386 getcwd sys_getcwd
-184 i386 capget sys_capget
-185 i386 capset sys_capset
-186 i386 sigaltstack sys_sigaltstack compat_sys_sigaltstack
-187 i386 sendfile sys_sendfile compat_sys_sendfile
+174 i386 rt_sigaction sys_rt_sigaction __ia32_compat_sys_rt_sigaction
+175 i386 rt_sigprocmask sys_rt_sigprocmask __ia32_sys_rt_sigprocmask
+176 i386 rt_sigpending sys_rt_sigpending __ia32_compat_sys_rt_sigpending
+177 i386 rt_sigtimedwait sys_rt_sigtimedwait __ia32_compat_sys_rt_sigtimedwait
+178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo __ia32_compat_sys_rt_sigqueueinfo
+179 i386 rt_sigsuspend sys_rt_sigsuspend __ia32_sys_rt_sigsuspend
+180 i386 pread64 sys_pread64 __ia32_compat_sys_x86_pread
+181 i386 pwrite64 sys_pwrite64 __ia32_compat_sys_x86_pwrite
+182 i386 chown sys_chown16 __ia32_sys_chown16
+183 i386 getcwd sys_getcwd __ia32_sys_getcwd
+184 i386 capget sys_capget __ia32_sys_capget
+185 i386 capset sys_capset __ia32_sys_capset
+186 i386 sigaltstack sys_sigaltstack __ia32_compat_sys_sigaltstack
+187 i386 sendfile sys_sendfile __ia32_compat_sys_sendfile
188 i386 getpmsg
189 i386 putpmsg
-190 i386 vfork sys_vfork
-191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit
-192 i386 mmap2 sys_mmap_pgoff
-193 i386 truncate64 sys_truncate64 compat_sys_x86_truncate64
-194 i386 ftruncate64 sys_ftruncate64 compat_sys_x86_ftruncate64
-195 i386 stat64 sys_stat64 compat_sys_x86_stat64
-196 i386 lstat64 sys_lstat64 compat_sys_x86_lstat64
-197 i386 fstat64 sys_fstat64 compat_sys_x86_fstat64
-198 i386 lchown32 sys_lchown
-199 i386 getuid32 sys_getuid
-200 i386 getgid32 sys_getgid
-201 i386 geteuid32 sys_geteuid
-202 i386 getegid32 sys_getegid
-203 i386 setreuid32 sys_setreuid
-204 i386 setregid32 sys_setregid
-205 i386 getgroups32 sys_getgroups
-206 i386 setgroups32 sys_setgroups
-207 i386 fchown32 sys_fchown
-208 i386 setresuid32 sys_setresuid
-209 i386 getresuid32 sys_getresuid
-210 i386 setresgid32 sys_setresgid
-211 i386 getresgid32 sys_getresgid
-212 i386 chown32 sys_chown
-213 i386 setuid32 sys_setuid
-214 i386 setgid32 sys_setgid
-215 i386 setfsuid32 sys_setfsuid
-216 i386 setfsgid32 sys_setfsgid
-217 i386 pivot_root sys_pivot_root
-218 i386 mincore sys_mincore
-219 i386 madvise sys_madvise
-220 i386 getdents64 sys_getdents64
-221 i386 fcntl64 sys_fcntl64 compat_sys_fcntl64
+190 i386 vfork sys_vfork __ia32_sys_vfork
+191 i386 ugetrlimit sys_getrlimit __ia32_compat_sys_getrlimit
+192 i386 mmap2 sys_mmap_pgoff __ia32_sys_mmap_pgoff
+193 i386 truncate64 sys_truncate64 __ia32_compat_sys_x86_truncate64
+194 i386 ftruncate64 sys_ftruncate64 __ia32_compat_sys_x86_ftruncate64
+195 i386 stat64 sys_stat64 __ia32_compat_sys_x86_stat64
+196 i386 lstat64 sys_lstat64 __ia32_compat_sys_x86_lstat64
+197 i386 fstat64 sys_fstat64 __ia32_compat_sys_x86_fstat64
+198 i386 lchown32 sys_lchown __ia32_sys_lchown
+199 i386 getuid32 sys_getuid __ia32_sys_getuid
+200 i386 getgid32 sys_getgid __ia32_sys_getgid
+201 i386 geteuid32 sys_geteuid __ia32_sys_geteuid
+202 i386 getegid32 sys_getegid __ia32_sys_getegid
+203 i386 setreuid32 sys_setreuid __ia32_sys_setreuid
+204 i386 setregid32 sys_setregid __ia32_sys_setregid
+205 i386 getgroups32 sys_getgroups __ia32_sys_getgroups
+206 i386 setgroups32 sys_setgroups __ia32_sys_setgroups
+207 i386 fchown32 sys_fchown __ia32_sys_fchown
+208 i386 setresuid32 sys_setresuid __ia32_sys_setresuid
+209 i386 getresuid32 sys_getresuid __ia32_sys_getresuid
+210 i386 setresgid32 sys_setresgid __ia32_sys_setresgid
+211 i386 getresgid32 sys_getresgid __ia32_sys_getresgid
+212 i386 chown32 sys_chown __ia32_sys_chown
+213 i386 setuid32 sys_setuid __ia32_sys_setuid
+214 i386 setgid32 sys_setgid __ia32_sys_setgid
+215 i386 setfsuid32 sys_setfsuid __ia32_sys_setfsuid
+216 i386 setfsgid32 sys_setfsgid __ia32_sys_setfsgid
+217 i386 pivot_root sys_pivot_root __ia32_sys_pivot_root
+218 i386 mincore sys_mincore __ia32_sys_mincore
+219 i386 madvise sys_madvise __ia32_sys_madvise
+220 i386 getdents64 sys_getdents64 __ia32_sys_getdents64
+221 i386 fcntl64 sys_fcntl64 __ia32_compat_sys_fcntl64
# 222 is unused
# 223 is unused
-224 i386 gettid sys_gettid
-225 i386 readahead sys_readahead compat_sys_x86_readahead
-226 i386 setxattr sys_setxattr
-227 i386 lsetxattr sys_lsetxattr
-228 i386 fsetxattr sys_fsetxattr
-229 i386 getxattr sys_getxattr
-230 i386 lgetxattr sys_lgetxattr
-231 i386 fgetxattr sys_fgetxattr
-232 i386 listxattr sys_listxattr
-233 i386 llistxattr sys_llistxattr
-234 i386 flistxattr sys_flistxattr
-235 i386 removexattr sys_removexattr
-236 i386 lremovexattr sys_lremovexattr
-237 i386 fremovexattr sys_fremovexattr
-238 i386 tkill sys_tkill
-239 i386 sendfile64 sys_sendfile64
-240 i386 futex sys_futex compat_sys_futex
-241 i386 sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
-242 i386 sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
-243 i386 set_thread_area sys_set_thread_area
-244 i386 get_thread_area sys_get_thread_area
-245 i386 io_setup sys_io_setup compat_sys_io_setup
-246 i386 io_destroy sys_io_destroy
-247 i386 io_getevents sys_io_getevents compat_sys_io_getevents
-248 i386 io_submit sys_io_submit compat_sys_io_submit
-249 i386 io_cancel sys_io_cancel
-250 i386 fadvise64 sys_fadvise64 compat_sys_x86_fadvise64
+224 i386 gettid sys_gettid __ia32_sys_gettid
+225 i386 readahead sys_readahead __ia32_compat_sys_x86_readahead
+226 i386 setxattr sys_setxattr __ia32_sys_setxattr
+227 i386 lsetxattr sys_lsetxattr __ia32_sys_lsetxattr
+228 i386 fsetxattr sys_fsetxattr __ia32_sys_fsetxattr
+229 i386 getxattr sys_getxattr __ia32_sys_getxattr
+230 i386 lgetxattr sys_lgetxattr __ia32_sys_lgetxattr
+231 i386 fgetxattr sys_fgetxattr __ia32_sys_fgetxattr
+232 i386 listxattr sys_listxattr __ia32_sys_listxattr
+233 i386 llistxattr sys_llistxattr __ia32_sys_llistxattr
+234 i386 flistxattr sys_flistxattr __ia32_sys_flistxattr
+235 i386 removexattr sys_removexattr __ia32_sys_removexattr
+236 i386 lremovexattr sys_lremovexattr __ia32_sys_lremovexattr
+237 i386 fremovexattr sys_fremovexattr __ia32_sys_fremovexattr
+238 i386 tkill sys_tkill __ia32_sys_tkill
+239 i386 sendfile64 sys_sendfile64 __ia32_sys_sendfile64
+240 i386 futex sys_futex __ia32_compat_sys_futex
+241 i386 sched_setaffinity sys_sched_setaffinity __ia32_compat_sys_sched_setaffinity
+242 i386 sched_getaffinity sys_sched_getaffinity __ia32_compat_sys_sched_getaffinity
+243 i386 set_thread_area sys_set_thread_area __ia32_sys_set_thread_area
+244 i386 get_thread_area sys_get_thread_area __ia32_sys_get_thread_area
+245 i386 io_setup sys_io_setup __ia32_compat_sys_io_setup
+246 i386 io_destroy sys_io_destroy __ia32_sys_io_destroy
+247 i386 io_getevents sys_io_getevents __ia32_compat_sys_io_getevents
+248 i386 io_submit sys_io_submit __ia32_compat_sys_io_submit
+249 i386 io_cancel sys_io_cancel __ia32_sys_io_cancel
+250 i386 fadvise64 sys_fadvise64 __ia32_compat_sys_x86_fadvise64
# 251 is available for reuse (was briefly sys_set_zone_reclaim)
-252 i386 exit_group sys_exit_group
-253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
-254 i386 epoll_create sys_epoll_create
-255 i386 epoll_ctl sys_epoll_ctl
-256 i386 epoll_wait sys_epoll_wait
-257 i386 remap_file_pages sys_remap_file_pages
-258 i386 set_tid_address sys_set_tid_address
-259 i386 timer_create sys_timer_create compat_sys_timer_create
-260 i386 timer_settime sys_timer_settime compat_sys_timer_settime
-261 i386 timer_gettime sys_timer_gettime compat_sys_timer_gettime
-262 i386 timer_getoverrun sys_timer_getoverrun
-263 i386 timer_delete sys_timer_delete
-264 i386 clock_settime sys_clock_settime compat_sys_clock_settime
-265 i386 clock_gettime sys_clock_gettime compat_sys_clock_gettime
-266 i386 clock_getres sys_clock_getres compat_sys_clock_getres
-267 i386 clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
-268 i386 statfs64 sys_statfs64 compat_sys_statfs64
-269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
-270 i386 tgkill sys_tgkill
-271 i386 utimes sys_utimes compat_sys_utimes
-272 i386 fadvise64_64 sys_fadvise64_64 compat_sys_x86_fadvise64_64
+252 i386 exit_group sys_exit_group __ia32_sys_exit_group
+253 i386 lookup_dcookie sys_lookup_dcookie __ia32_compat_sys_lookup_dcookie
+254 i386 epoll_create sys_epoll_create __ia32_sys_epoll_create
+255 i386 epoll_ctl sys_epoll_ctl __ia32_sys_epoll_ctl
+256 i386 epoll_wait sys_epoll_wait __ia32_sys_epoll_wait
+257 i386 remap_file_pages sys_remap_file_pages __ia32_sys_remap_file_pages
+258 i386 set_tid_address sys_set_tid_address __ia32_sys_set_tid_address
+259 i386 timer_create sys_timer_create __ia32_compat_sys_timer_create
+260 i386 timer_settime sys_timer_settime __ia32_compat_sys_timer_settime
+261 i386 timer_gettime sys_timer_gettime __ia32_compat_sys_timer_gettime
+262 i386 timer_getoverrun sys_timer_getoverrun __ia32_sys_timer_getoverrun
+263 i386 timer_delete sys_timer_delete __ia32_sys_timer_delete
+264 i386 clock_settime sys_clock_settime __ia32_compat_sys_clock_settime
+265 i386 clock_gettime sys_clock_gettime __ia32_compat_sys_clock_gettime
+266 i386 clock_getres sys_clock_getres __ia32_compat_sys_clock_getres
+267 i386 clock_nanosleep sys_clock_nanosleep __ia32_compat_sys_clock_nanosleep
+268 i386 statfs64 sys_statfs64 __ia32_compat_sys_statfs64
+269 i386 fstatfs64 sys_fstatfs64 __ia32_compat_sys_fstatfs64
+270 i386 tgkill sys_tgkill __ia32_sys_tgkill
+271 i386 utimes sys_utimes __ia32_compat_sys_utimes
+272 i386 fadvise64_64 sys_fadvise64_64 __ia32_compat_sys_x86_fadvise64_64
273 i386 vserver
-274 i386 mbind sys_mbind
-275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-276 i386 set_mempolicy sys_set_mempolicy
-277 i386 mq_open sys_mq_open compat_sys_mq_open
-278 i386 mq_unlink sys_mq_unlink
-279 i386 mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
-280 i386 mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
-281 i386 mq_notify sys_mq_notify compat_sys_mq_notify
-282 i386 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
-283 i386 kexec_load sys_kexec_load compat_sys_kexec_load
-284 i386 waitid sys_waitid compat_sys_waitid
+274 i386 mbind sys_mbind __ia32_sys_mbind
+275 i386 get_mempolicy sys_get_mempolicy __ia32_compat_sys_get_mempolicy
+276 i386 set_mempolicy sys_set_mempolicy __ia32_sys_set_mempolicy
+277 i386 mq_open sys_mq_open __ia32_compat_sys_mq_open
+278 i386 mq_unlink sys_mq_unlink __ia32_sys_mq_unlink
+279 i386 mq_timedsend sys_mq_timedsend __ia32_compat_sys_mq_timedsend
+280 i386 mq_timedreceive sys_mq_timedreceive __ia32_compat_sys_mq_timedreceive
+281 i386 mq_notify sys_mq_notify __ia32_compat_sys_mq_notify
+282 i386 mq_getsetattr sys_mq_getsetattr __ia32_compat_sys_mq_getsetattr
+283 i386 kexec_load sys_kexec_load __ia32_compat_sys_kexec_load
+284 i386 waitid sys_waitid __ia32_compat_sys_waitid
# 285 sys_setaltroot
-286 i386 add_key sys_add_key
-287 i386 request_key sys_request_key
-288 i386 keyctl sys_keyctl compat_sys_keyctl
-289 i386 ioprio_set sys_ioprio_set
-290 i386 ioprio_get sys_ioprio_get
-291 i386 inotify_init sys_inotify_init
-292 i386 inotify_add_watch sys_inotify_add_watch
-293 i386 inotify_rm_watch sys_inotify_rm_watch
-294 i386 migrate_pages sys_migrate_pages
-295 i386 openat sys_openat compat_sys_openat
-296 i386 mkdirat sys_mkdirat
-297 i386 mknodat sys_mknodat
-298 i386 fchownat sys_fchownat
-299 i386 futimesat sys_futimesat compat_sys_futimesat
-300 i386 fstatat64 sys_fstatat64 compat_sys_x86_fstatat
-301 i386 unlinkat sys_unlinkat
-302 i386 renameat sys_renameat
-303 i386 linkat sys_linkat
-304 i386 symlinkat sys_symlinkat
-305 i386 readlinkat sys_readlinkat
-306 i386 fchmodat sys_fchmodat
-307 i386 faccessat sys_faccessat
-308 i386 pselect6 sys_pselect6 compat_sys_pselect6
-309 i386 ppoll sys_ppoll compat_sys_ppoll
-310 i386 unshare sys_unshare
-311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list
-312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list
-313 i386 splice sys_splice
-314 i386 sync_file_range sys_sync_file_range compat_sys_x86_sync_file_range
-315 i386 tee sys_tee
-316 i386 vmsplice sys_vmsplice compat_sys_vmsplice
-317 i386 move_pages sys_move_pages compat_sys_move_pages
-318 i386 getcpu sys_getcpu
-319 i386 epoll_pwait sys_epoll_pwait
-320 i386 utimensat sys_utimensat compat_sys_utimensat
-321 i386 signalfd sys_signalfd compat_sys_signalfd
-322 i386 timerfd_create sys_timerfd_create
-323 i386 eventfd sys_eventfd
-324 i386 fallocate sys_fallocate compat_sys_x86_fallocate
-325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
-326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
-327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4
-328 i386 eventfd2 sys_eventfd2
-329 i386 epoll_create1 sys_epoll_create1
-330 i386 dup3 sys_dup3
-331 i386 pipe2 sys_pipe2
-332 i386 inotify_init1 sys_inotify_init1
-333 i386 preadv sys_preadv compat_sys_preadv
-334 i386 pwritev sys_pwritev compat_sys_pwritev
-335 i386 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
-336 i386 perf_event_open sys_perf_event_open
-337 i386 recvmmsg sys_recvmmsg compat_sys_recvmmsg
-338 i386 fanotify_init sys_fanotify_init
-339 i386 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
-340 i386 prlimit64 sys_prlimit64
-341 i386 name_to_handle_at sys_name_to_handle_at
-342 i386 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
-343 i386 clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
-344 i386 syncfs sys_syncfs
-345 i386 sendmmsg sys_sendmmsg compat_sys_sendmmsg
-346 i386 setns sys_setns
-347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
-348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
-349 i386 kcmp sys_kcmp
-350 i386 finit_module sys_finit_module
-351 i386 sched_setattr sys_sched_setattr
-352 i386 sched_getattr sys_sched_getattr
-353 i386 renameat2 sys_renameat2
-354 i386 seccomp sys_seccomp
-355 i386 getrandom sys_getrandom
-356 i386 memfd_create sys_memfd_create
-357 i386 bpf sys_bpf
-358 i386 execveat sys_execveat compat_sys_execveat
-359 i386 socket sys_socket
-360 i386 socketpair sys_socketpair
-361 i386 bind sys_bind
-362 i386 connect sys_connect
-363 i386 listen sys_listen
-364 i386 accept4 sys_accept4
-365 i386 getsockopt sys_getsockopt compat_sys_getsockopt
-366 i386 setsockopt sys_setsockopt compat_sys_setsockopt
-367 i386 getsockname sys_getsockname
-368 i386 getpeername sys_getpeername
-369 i386 sendto sys_sendto
-370 i386 sendmsg sys_sendmsg compat_sys_sendmsg
-371 i386 recvfrom sys_recvfrom compat_sys_recvfrom
-372 i386 recvmsg sys_recvmsg compat_sys_recvmsg
-373 i386 shutdown sys_shutdown
-374 i386 userfaultfd sys_userfaultfd
-375 i386 membarrier sys_membarrier
-376 i386 mlock2 sys_mlock2
-377 i386 copy_file_range sys_copy_file_range
-378 i386 preadv2 sys_preadv2 compat_sys_preadv2
-379 i386 pwritev2 sys_pwritev2 compat_sys_pwritev2
-380 i386 pkey_mprotect sys_pkey_mprotect
-381 i386 pkey_alloc sys_pkey_alloc
-382 i386 pkey_free sys_pkey_free
-383 i386 statx sys_statx
-384 i386 arch_prctl sys_arch_prctl compat_sys_arch_prctl
+286 i386 add_key sys_add_key __ia32_sys_add_key
+287 i386 request_key sys_request_key __ia32_sys_request_key
+288 i386 keyctl sys_keyctl __ia32_compat_sys_keyctl
+289 i386 ioprio_set sys_ioprio_set __ia32_sys_ioprio_set
+290 i386 ioprio_get sys_ioprio_get __ia32_sys_ioprio_get
+291 i386 inotify_init sys_inotify_init __ia32_sys_inotify_init
+292 i386 inotify_add_watch sys_inotify_add_watch __ia32_sys_inotify_add_watch
+293 i386 inotify_rm_watch sys_inotify_rm_watch __ia32_sys_inotify_rm_watch
+294 i386 migrate_pages sys_migrate_pages __ia32_sys_migrate_pages
+295 i386 openat sys_openat __ia32_compat_sys_openat
+296 i386 mkdirat sys_mkdirat __ia32_sys_mkdirat
+297 i386 mknodat sys_mknodat __ia32_sys_mknodat
+298 i386 fchownat sys_fchownat __ia32_sys_fchownat
+299 i386 futimesat sys_futimesat __ia32_compat_sys_futimesat
+300 i386 fstatat64 sys_fstatat64 __ia32_compat_sys_x86_fstatat
+301 i386 unlinkat sys_unlinkat __ia32_sys_unlinkat
+302 i386 renameat sys_renameat __ia32_sys_renameat
+303 i386 linkat sys_linkat __ia32_sys_linkat
+304 i386 symlinkat sys_symlinkat __ia32_sys_symlinkat
+305 i386 readlinkat sys_readlinkat __ia32_sys_readlinkat
+306 i386 fchmodat sys_fchmodat __ia32_sys_fchmodat
+307 i386 faccessat sys_faccessat __ia32_sys_faccessat
+308 i386 pselect6 sys_pselect6 __ia32_compat_sys_pselect6
+309 i386 ppoll sys_ppoll __ia32_compat_sys_ppoll
+310 i386 unshare sys_unshare __ia32_sys_unshare
+311 i386 set_robust_list sys_set_robust_list __ia32_compat_sys_set_robust_list
+312 i386 get_robust_list sys_get_robust_list __ia32_compat_sys_get_robust_list
+313 i386 splice sys_splice __ia32_sys_splice
+314 i386 sync_file_range sys_sync_file_range __ia32_compat_sys_x86_sync_file_range
+315 i386 tee sys_tee __ia32_sys_tee
+316 i386 vmsplice sys_vmsplice __ia32_compat_sys_vmsplice
+317 i386 move_pages sys_move_pages __ia32_compat_sys_move_pages
+318 i386 getcpu sys_getcpu __ia32_sys_getcpu
+319 i386 epoll_pwait sys_epoll_pwait __ia32_sys_epoll_pwait
+320 i386 utimensat sys_utimensat __ia32_compat_sys_utimensat
+321 i386 signalfd sys_signalfd __ia32_compat_sys_signalfd
+322 i386 timerfd_create sys_timerfd_create __ia32_sys_timerfd_create
+323 i386 eventfd sys_eventfd __ia32_sys_eventfd
+324 i386 fallocate sys_fallocate __ia32_compat_sys_x86_fallocate
+325 i386 timerfd_settime sys_timerfd_settime __ia32_compat_sys_timerfd_settime
+326 i386 timerfd_gettime sys_timerfd_gettime __ia32_compat_sys_timerfd_gettime
+327 i386 signalfd4 sys_signalfd4 __ia32_compat_sys_signalfd4
+328 i386 eventfd2 sys_eventfd2 __ia32_sys_eventfd2
+329 i386 epoll_create1 sys_epoll_create1 __ia32_sys_epoll_create1
+330 i386 dup3 sys_dup3 __ia32_sys_dup3
+331 i386 pipe2 sys_pipe2 __ia32_sys_pipe2
+332 i386 inotify_init1 sys_inotify_init1 __ia32_sys_inotify_init1
+333 i386 preadv sys_preadv __ia32_compat_sys_preadv
+334 i386 pwritev sys_pwritev __ia32_compat_sys_pwritev
+335 i386 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo __ia32_compat_sys_rt_tgsigqueueinfo
+336 i386 perf_event_open sys_perf_event_open __ia32_sys_perf_event_open
+337 i386 recvmmsg sys_recvmmsg __ia32_compat_sys_recvmmsg
+338 i386 fanotify_init sys_fanotify_init __ia32_sys_fanotify_init
+339 i386 fanotify_mark sys_fanotify_mark __ia32_compat_sys_fanotify_mark
+340 i386 prlimit64 sys_prlimit64 __ia32_sys_prlimit64
+341 i386 name_to_handle_at sys_name_to_handle_at __ia32_sys_name_to_handle_at
+342 i386 open_by_handle_at sys_open_by_handle_at __ia32_compat_sys_open_by_handle_at
+343 i386 clock_adjtime sys_clock_adjtime __ia32_compat_sys_clock_adjtime
+344 i386 syncfs sys_syncfs __ia32_sys_syncfs
+345 i386 sendmmsg sys_sendmmsg __ia32_compat_sys_sendmmsg
+346 i386 setns sys_setns __ia32_sys_setns
+347 i386 process_vm_readv sys_process_vm_readv __ia32_compat_sys_process_vm_readv
+348 i386 process_vm_writev sys_process_vm_writev __ia32_compat_sys_process_vm_writev
+349 i386 kcmp sys_kcmp __ia32_sys_kcmp
+350 i386 finit_module sys_finit_module __ia32_sys_finit_module
+351 i386 sched_setattr sys_sched_setattr __ia32_sys_sched_setattr
+352 i386 sched_getattr sys_sched_getattr __ia32_sys_sched_getattr
+353 i386 renameat2 sys_renameat2 __ia32_sys_renameat2
+354 i386 seccomp sys_seccomp __ia32_sys_seccomp
+355 i386 getrandom sys_getrandom __ia32_sys_getrandom
+356 i386 memfd_create sys_memfd_create __ia32_sys_memfd_create
+357 i386 bpf sys_bpf __ia32_sys_bpf
+358 i386 execveat sys_execveat __ia32_compat_sys_execveat
+359 i386 socket sys_socket __ia32_sys_socket
+360 i386 socketpair sys_socketpair __ia32_sys_socketpair
+361 i386 bind sys_bind __ia32_sys_bind
+362 i386 connect sys_connect __ia32_sys_connect
+363 i386 listen sys_listen __ia32_sys_listen
+364 i386 accept4 sys_accept4 __ia32_sys_accept4
+365 i386 getsockopt sys_getsockopt __ia32_compat_sys_getsockopt
+366 i386 setsockopt sys_setsockopt __ia32_compat_sys_setsockopt
+367 i386 getsockname sys_getsockname __ia32_sys_getsockname
+368 i386 getpeername sys_getpeername __ia32_sys_getpeername
+369 i386 sendto sys_sendto __ia32_sys_sendto
+370 i386 sendmsg sys_sendmsg __ia32_compat_sys_sendmsg
+371 i386 recvfrom sys_recvfrom __ia32_compat_sys_recvfrom
+372 i386 recvmsg sys_recvmsg __ia32_compat_sys_recvmsg
+373 i386 shutdown sys_shutdown __ia32_sys_shutdown
+374 i386 userfaultfd sys_userfaultfd __ia32_sys_userfaultfd
+375 i386 membarrier sys_membarrier __ia32_sys_membarrier
+376 i386 mlock2 sys_mlock2 __ia32_sys_mlock2
+377 i386 copy_file_range sys_copy_file_range __ia32_sys_copy_file_range
+378 i386 preadv2 sys_preadv2 __ia32_compat_sys_preadv2
+379 i386 pwritev2 sys_pwritev2 __ia32_compat_sys_pwritev2
+380 i386 pkey_mprotect sys_pkey_mprotect __ia32_sys_pkey_mprotect
+381 i386 pkey_alloc sys_pkey_alloc __ia32_sys_pkey_alloc
+382 i386 pkey_free sys_pkey_free __ia32_sys_pkey_free
+383 i386 statx sys_statx __ia32_sys_statx
+384 i386 arch_prctl sys_arch_prctl __ia32_compat_sys_arch_prctl
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 5aef183e2f85..4dfe42666d0c 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -4,379 +4,383 @@
# The format is:
# <number> <abi> <name> <entry point>
#
+# The __x64_sys_*() stubs are created on-the-fly for sys_*() system calls
+#
# The abi is "common", "64" or "x32" for this file.
#
-0 common read sys_read
-1 common write sys_write
-2 common open sys_open
-3 common close sys_close
-4 common stat sys_newstat
-5 common fstat sys_newfstat
-6 common lstat sys_newlstat
-7 common poll sys_poll
-8 common lseek sys_lseek
-9 common mmap sys_mmap
-10 common mprotect sys_mprotect
-11 common munmap sys_munmap
-12 common brk sys_brk
-13 64 rt_sigaction sys_rt_sigaction
-14 common rt_sigprocmask sys_rt_sigprocmask
-15 64 rt_sigreturn sys_rt_sigreturn/ptregs
-16 64 ioctl sys_ioctl
-17 common pread64 sys_pread64
-18 common pwrite64 sys_pwrite64
-19 64 readv sys_readv
-20 64 writev sys_writev
-21 common access sys_access
-22 common pipe sys_pipe
-23 common select sys_select
-24 common sched_yield sys_sched_yield
-25 common mremap sys_mremap
-26 common msync sys_msync
-27 common mincore sys_mincore
-28 common madvise sys_madvise
-29 common shmget sys_shmget
-30 common shmat sys_shmat
-31 common shmctl sys_shmctl
-32 common dup sys_dup
-33 common dup2 sys_dup2
-34 common pause sys_pause
-35 common nanosleep sys_nanosleep
-36 common getitimer sys_getitimer
-37 common alarm sys_alarm
-38 common setitimer sys_setitimer
-39 common getpid sys_getpid
-40 common sendfile sys_sendfile64
-41 common socket sys_socket
-42 common connect sys_connect
-43 common accept sys_accept
-44 common sendto sys_sendto
-45 64 recvfrom sys_recvfrom
-46 64 sendmsg sys_sendmsg
-47 64 recvmsg sys_recvmsg
-48 common shutdown sys_shutdown
-49 common bind sys_bind
-50 common listen sys_listen
-51 common getsockname sys_getsockname
-52 common getpeername sys_getpeername
-53 common socketpair sys_socketpair
-54 64 setsockopt sys_setsockopt
-55 64 getsockopt sys_getsockopt
-56 common clone sys_clone/ptregs
-57 common fork sys_fork/ptregs
-58 common vfork sys_vfork/ptregs
-59 64 execve sys_execve/ptregs
-60 common exit sys_exit
-61 common wait4 sys_wait4
-62 common kill sys_kill
-63 common uname sys_newuname
-64 common semget sys_semget
-65 common semop sys_semop
-66 common semctl sys_semctl
-67 common shmdt sys_shmdt
-68 common msgget sys_msgget
-69 common msgsnd sys_msgsnd
-70 common msgrcv sys_msgrcv
-71 common msgctl sys_msgctl
-72 common fcntl sys_fcntl
-73 common flock sys_flock
-74 common fsync sys_fsync
-75 common fdatasync sys_fdatasync
-76 common truncate sys_truncate
-77 common ftruncate sys_ftruncate
-78 common getdents sys_getdents
-79 common getcwd sys_getcwd
-80 common chdir sys_chdir
-81 common fchdir sys_fchdir
-82 common rename sys_rename
-83 common mkdir sys_mkdir
-84 common rmdir sys_rmdir
-85 common creat sys_creat
-86 common link sys_link
-87 common unlink sys_unlink
-88 common symlink sys_symlink
-89 common readlink sys_readlink
-90 common chmod sys_chmod
-91 common fchmod sys_fchmod
-92 common chown sys_chown
-93 common fchown sys_fchown
-94 common lchown sys_lchown
-95 common umask sys_umask
-96 common gettimeofday sys_gettimeofday
-97 common getrlimit sys_getrlimit
-98 common getrusage sys_getrusage
-99 common sysinfo sys_sysinfo
-100 common times sys_times
-101 64 ptrace sys_ptrace
-102 common getuid sys_getuid
-103 common syslog sys_syslog
-104 common getgid sys_getgid
-105 common setuid sys_setuid
-106 common setgid sys_setgid
-107 common geteuid sys_geteuid
-108 common getegid sys_getegid
-109 common setpgid sys_setpgid
-110 common getppid sys_getppid
-111 common getpgrp sys_getpgrp
-112 common setsid sys_setsid
-113 common setreuid sys_setreuid
-114 common setregid sys_setregid
-115 common getgroups sys_getgroups
-116 common setgroups sys_setgroups
-117 common setresuid sys_setresuid
-118 common getresuid sys_getresuid
-119 common setresgid sys_setresgid
-120 common getresgid sys_getresgid
-121 common getpgid sys_getpgid
-122 common setfsuid sys_setfsuid
-123 common setfsgid sys_setfsgid
-124 common getsid sys_getsid
-125 common capget sys_capget
-126 common capset sys_capset
-127 64 rt_sigpending sys_rt_sigpending
-128 64 rt_sigtimedwait sys_rt_sigtimedwait
-129 64 rt_sigqueueinfo sys_rt_sigqueueinfo
-130 common rt_sigsuspend sys_rt_sigsuspend
-131 64 sigaltstack sys_sigaltstack
-132 common utime sys_utime
-133 common mknod sys_mknod
+0 common read __x64_sys_read
+1 common write __x64_sys_write
+2 common open __x64_sys_open
+3 common close __x64_sys_close
+4 common stat __x64_sys_newstat
+5 common fstat __x64_sys_newfstat
+6 common lstat __x64_sys_newlstat
+7 common poll __x64_sys_poll
+8 common lseek __x64_sys_lseek
+9 common mmap __x64_sys_mmap
+10 common mprotect __x64_sys_mprotect
+11 common munmap __x64_sys_munmap
+12 common brk __x64_sys_brk
+13 64 rt_sigaction __x64_sys_rt_sigaction
+14 common rt_sigprocmask __x64_sys_rt_sigprocmask
+15 64 rt_sigreturn __x64_sys_rt_sigreturn/ptregs
+16 64 ioctl __x64_sys_ioctl
+17 common pread64 __x64_sys_pread64
+18 common pwrite64 __x64_sys_pwrite64
+19 64 readv __x64_sys_readv
+20 64 writev __x64_sys_writev
+21 common access __x64_sys_access
+22 common pipe __x64_sys_pipe
+23 common select __x64_sys_select
+24 common sched_yield __x64_sys_sched_yield
+25 common mremap __x64_sys_mremap
+26 common msync __x64_sys_msync
+27 common mincore __x64_sys_mincore
+28 common madvise __x64_sys_madvise
+29 common shmget __x64_sys_shmget
+30 common shmat __x64_sys_shmat
+31 common shmctl __x64_sys_shmctl
+32 common dup __x64_sys_dup
+33 common dup2 __x64_sys_dup2
+34 common pause __x64_sys_pause
+35 common nanosleep __x64_sys_nanosleep
+36 common getitimer __x64_sys_getitimer
+37 common alarm __x64_sys_alarm
+38 common setitimer __x64_sys_setitimer
+39 common getpid __x64_sys_getpid
+40 common sendfile __x64_sys_sendfile64
+41 common socket __x64_sys_socket
+42 common connect __x64_sys_connect
+43 common accept __x64_sys_accept
+44 common sendto __x64_sys_sendto
+45 64 recvfrom __x64_sys_recvfrom
+46 64 sendmsg __x64_sys_sendmsg
+47 64 recvmsg __x64_sys_recvmsg
+48 common shutdown __x64_sys_shutdown
+49 common bind __x64_sys_bind
+50 common listen __x64_sys_listen
+51 common getsockname __x64_sys_getsockname
+52 common getpeername __x64_sys_getpeername
+53 common socketpair __x64_sys_socketpair
+54 64 setsockopt __x64_sys_setsockopt
+55 64 getsockopt __x64_sys_getsockopt
+56 common clone __x64_sys_clone/ptregs
+57 common fork __x64_sys_fork/ptregs
+58 common vfork __x64_sys_vfork/ptregs
+59 64 execve __x64_sys_execve/ptregs
+60 common exit __x64_sys_exit
+61 common wait4 __x64_sys_wait4
+62 common kill __x64_sys_kill
+63 common uname __x64_sys_newuname
+64 common semget __x64_sys_semget
+65 common semop __x64_sys_semop
+66 common semctl __x64_sys_semctl
+67 common shmdt __x64_sys_shmdt
+68 common msgget __x64_sys_msgget
+69 common msgsnd __x64_sys_msgsnd
+70 common msgrcv __x64_sys_msgrcv
+71 common msgctl __x64_sys_msgctl
+72 common fcntl __x64_sys_fcntl
+73 common flock __x64_sys_flock
+74 common fsync __x64_sys_fsync
+75 common fdatasync __x64_sys_fdatasync
+76 common truncate __x64_sys_truncate
+77 common ftruncate __x64_sys_ftruncate
+78 common getdents __x64_sys_getdents
+79 common getcwd __x64_sys_getcwd
+80 common chdir __x64_sys_chdir
+81 common fchdir __x64_sys_fchdir
+82 common rename __x64_sys_rename
+83 common mkdir __x64_sys_mkdir
+84 common rmdir __x64_sys_rmdir
+85 common creat __x64_sys_creat
+86 common link __x64_sys_link
+87 common unlink __x64_sys_unlink
+88 common symlink __x64_sys_symlink
+89 common readlink __x64_sys_readlink
+90 common chmod __x64_sys_chmod
+91 common fchmod __x64_sys_fchmod
+92 common chown __x64_sys_chown
+93 common fchown __x64_sys_fchown
+94 common lchown __x64_sys_lchown
+95 common umask __x64_sys_umask
+96 common gettimeofday __x64_sys_gettimeofday
+97 common getrlimit __x64_sys_getrlimit
+98 common getrusage __x64_sys_getrusage
+99 common sysinfo __x64_sys_sysinfo
+100 common times __x64_sys_times
+101 64 ptrace __x64_sys_ptrace
+102 common getuid __x64_sys_getuid
+103 common syslog __x64_sys_syslog
+104 common getgid __x64_sys_getgid
+105 common setuid __x64_sys_setuid
+106 common setgid __x64_sys_setgid
+107 common geteuid __x64_sys_geteuid
+108 common getegid __x64_sys_getegid
+109 common setpgid __x64_sys_setpgid
+110 common getppid __x64_sys_getppid
+111 common getpgrp __x64_sys_getpgrp
+112 common setsid __x64_sys_setsid
+113 common setreuid __x64_sys_setreuid
+114 common setregid __x64_sys_setregid
+115 common getgroups __x64_sys_getgroups
+116 common setgroups __x64_sys_setgroups
+117 common setresuid __x64_sys_setresuid
+118 common getresuid __x64_sys_getresuid
+119 common setresgid __x64_sys_setresgid
+120 common getresgid __x64_sys_getresgid
+121 common getpgid __x64_sys_getpgid
+122 common setfsuid __x64_sys_setfsuid
+123 common setfsgid __x64_sys_setfsgid
+124 common getsid __x64_sys_getsid
+125 common capget __x64_sys_capget
+126 common capset __x64_sys_capset
+127 64 rt_sigpending __x64_sys_rt_sigpending
+128 64 rt_sigtimedwait __x64_sys_rt_sigtimedwait
+129 64 rt_sigqueueinfo __x64_sys_rt_sigqueueinfo
+130 common rt_sigsuspend __x64_sys_rt_sigsuspend
+131 64 sigaltstack __x64_sys_sigaltstack
+132 common utime __x64_sys_utime
+133 common mknod __x64_sys_mknod
134 64 uselib
-135 common personality sys_personality
-136 common ustat sys_ustat
-137 common statfs sys_statfs
-138 common fstatfs sys_fstatfs
-139 common sysfs sys_sysfs
-140 common getpriority sys_getpriority
-141 common setpriority sys_setpriority
-142 common sched_setparam sys_sched_setparam
-143 common sched_getparam sys_sched_getparam
-144 common sched_setscheduler sys_sched_setscheduler
-145 common sched_getscheduler sys_sched_getscheduler
-146 common sched_get_priority_max sys_sched_get_priority_max
-147 common sched_get_priority_min sys_sched_get_priority_min
-148 common sched_rr_get_interval sys_sched_rr_get_interval
-149 common mlock sys_mlock
-150 common munlock sys_munlock
-151 common mlockall sys_mlockall
-152 common munlockall sys_munlockall
-153 common vhangup sys_vhangup
-154 common modify_ldt sys_modify_ldt
-155 common pivot_root sys_pivot_root
-156 64 _sysctl sys_sysctl
-157 common prctl sys_prctl
-158 common arch_prctl sys_arch_prctl
-159 common adjtimex sys_adjtimex
-160 common setrlimit sys_setrlimit
-161 common chroot sys_chroot
-162 common sync sys_sync
-163 common acct sys_acct
-164 common settimeofday sys_settimeofday
-165 common mount sys_mount
-166 common umount2 sys_umount
-167 common swapon sys_swapon
-168 common swapoff sys_swapoff
-169 common reboot sys_reboot
-170 common sethostname sys_sethostname
-171 common setdomainname sys_setdomainname
-172 common iopl sys_iopl/ptregs
-173 common ioperm sys_ioperm
+135 common personality __x64_sys_personality
+136 common ustat __x64_sys_ustat
+137 common statfs __x64_sys_statfs
+138 common fstatfs __x64_sys_fstatfs
+139 common sysfs __x64_sys_sysfs
+140 common getpriority __x64_sys_getpriority
+141 common setpriority __x64_sys_setpriority
+142 common sched_setparam __x64_sys_sched_setparam
+143 common sched_getparam __x64_sys_sched_getparam
+144 common sched_setscheduler __x64_sys_sched_setscheduler
+145 common sched_getscheduler __x64_sys_sched_getscheduler
+146 common sched_get_priority_max __x64_sys_sched_get_priority_max
+147 common sched_get_priority_min __x64_sys_sched_get_priority_min
+148 common sched_rr_get_interval __x64_sys_sched_rr_get_interval
+149 common mlock __x64_sys_mlock
+150 common munlock __x64_sys_munlock
+151 common mlockall __x64_sys_mlockall
+152 common munlockall __x64_sys_munlockall
+153 common vhangup __x64_sys_vhangup
+154 common modify_ldt __x64_sys_modify_ldt
+155 common pivot_root __x64_sys_pivot_root
+156 64 _sysctl __x64_sys_sysctl
+157 common prctl __x64_sys_prctl
+158 common arch_prctl __x64_sys_arch_prctl
+159 common adjtimex __x64_sys_adjtimex
+160 common setrlimit __x64_sys_setrlimit
+161 common chroot __x64_sys_chroot
+162 common sync __x64_sys_sync
+163 common acct __x64_sys_acct
+164 common settimeofday __x64_sys_settimeofday
+165 common mount __x64_sys_mount
+166 common umount2 __x64_sys_umount
+167 common swapon __x64_sys_swapon
+168 common swapoff __x64_sys_swapoff
+169 common reboot __x64_sys_reboot
+170 common sethostname __x64_sys_sethostname
+171 common setdomainname __x64_sys_setdomainname
+172 common iopl __x64_sys_iopl/ptregs
+173 common ioperm __x64_sys_ioperm
174 64 create_module
-175 common init_module sys_init_module
-176 common delete_module sys_delete_module
+175 common init_module __x64_sys_init_module
+176 common delete_module __x64_sys_delete_module
177 64 get_kernel_syms
178 64 query_module
-179 common quotactl sys_quotactl
+179 common quotactl __x64_sys_quotactl
180 64 nfsservctl
181 common getpmsg
182 common putpmsg
183 common afs_syscall
184 common tuxcall
185 common security
-186 common gettid sys_gettid
-187 common readahead sys_readahead
-188 common setxattr sys_setxattr
-189 common lsetxattr sys_lsetxattr
-190 common fsetxattr sys_fsetxattr
-191 common getxattr sys_getxattr
-192 common lgetxattr sys_lgetxattr
-193 common fgetxattr sys_fgetxattr
-194 common listxattr sys_listxattr
-195 common llistxattr sys_llistxattr
-196 common flistxattr sys_flistxattr
-197 common removexattr sys_removexattr
-198 common lremovexattr sys_lremovexattr
-199 common fremovexattr sys_fremovexattr
-200 common tkill sys_tkill
-201 common time sys_time
-202 common futex sys_futex
-203 common sched_setaffinity sys_sched_setaffinity
-204 common sched_getaffinity sys_sched_getaffinity
+186 common gettid __x64_sys_gettid
+187 common readahead __x64_sys_readahead
+188 common setxattr __x64_sys_setxattr
+189 common lsetxattr __x64_sys_lsetxattr
+190 common fsetxattr __x64_sys_fsetxattr
+191 common getxattr __x64_sys_getxattr
+192 common lgetxattr __x64_sys_lgetxattr
+193 common fgetxattr __x64_sys_fgetxattr
+194 common listxattr __x64_sys_listxattr
+195 common llistxattr __x64_sys_llistxattr
+196 common flistxattr __x64_sys_flistxattr
+197 common removexattr __x64_sys_removexattr
+198 common lremovexattr __x64_sys_lremovexattr
+199 common fremovexattr __x64_sys_fremovexattr
+200 common tkill __x64_sys_tkill
+201 common time __x64_sys_time
+202 common futex __x64_sys_futex
+203 common sched_setaffinity __x64_sys_sched_setaffinity
+204 common sched_getaffinity __x64_sys_sched_getaffinity
205 64 set_thread_area
-206 64 io_setup sys_io_setup
-207 common io_destroy sys_io_destroy
-208 common io_getevents sys_io_getevents
-209 64 io_submit sys_io_submit
-210 common io_cancel sys_io_cancel
+206 64 io_setup __x64_sys_io_setup
+207 common io_destroy __x64_sys_io_destroy
+208 common io_getevents __x64_sys_io_getevents
+209 64 io_submit __x64_sys_io_submit
+210 common io_cancel __x64_sys_io_cancel
211 64 get_thread_area
-212 common lookup_dcookie sys_lookup_dcookie
-213 common epoll_create sys_epoll_create
+212 common lookup_dcookie __x64_sys_lookup_dcookie
+213 common epoll_create __x64_sys_epoll_create
214 64 epoll_ctl_old
215 64 epoll_wait_old
-216 common remap_file_pages sys_remap_file_pages
-217 common getdents64 sys_getdents64
-218 common set_tid_address sys_set_tid_address
-219 common restart_syscall sys_restart_syscall
-220 common semtimedop sys_semtimedop
-221 common fadvise64 sys_fadvise64
-222 64 timer_create sys_timer_create
-223 common timer_settime sys_timer_settime
-224 common timer_gettime sys_timer_gettime
-225 common timer_getoverrun sys_timer_getoverrun
-226 common timer_delete sys_timer_delete
-227 common clock_settime sys_clock_settime
-228 common clock_gettime sys_clock_gettime
-229 common clock_getres sys_clock_getres
-230 common clock_nanosleep sys_clock_nanosleep
-231 common exit_group sys_exit_group
-232 common epoll_wait sys_epoll_wait
-233 common epoll_ctl sys_epoll_ctl
-234 common tgkill sys_tgkill
-235 common utimes sys_utimes
+216 common remap_file_pages __x64_sys_remap_file_pages
+217 common getdents64 __x64_sys_getdents64
+218 common set_tid_address __x64_sys_set_tid_address
+219 common restart_syscall __x64_sys_restart_syscall
+220 common semtimedop __x64_sys_semtimedop
+221 common fadvise64 __x64_sys_fadvise64
+222 64 timer_create __x64_sys_timer_create
+223 common timer_settime __x64_sys_timer_settime
+224 common timer_gettime __x64_sys_timer_gettime
+225 common timer_getoverrun __x64_sys_timer_getoverrun
+226 common timer_delete __x64_sys_timer_delete
+227 common clock_settime __x64_sys_clock_settime
+228 common clock_gettime __x64_sys_clock_gettime
+229 common clock_getres __x64_sys_clock_getres
+230 common clock_nanosleep __x64_sys_clock_nanosleep
+231 common exit_group __x64_sys_exit_group
+232 common epoll_wait __x64_sys_epoll_wait
+233 common epoll_ctl __x64_sys_epoll_ctl
+234 common tgkill __x64_sys_tgkill
+235 common utimes __x64_sys_utimes
236 64 vserver
-237 common mbind sys_mbind
-238 common set_mempolicy sys_set_mempolicy
-239 common get_mempolicy sys_get_mempolicy
-240 common mq_open sys_mq_open
-241 common mq_unlink sys_mq_unlink
-242 common mq_timedsend sys_mq_timedsend
-243 common mq_timedreceive sys_mq_timedreceive
-244 64 mq_notify sys_mq_notify
-245 common mq_getsetattr sys_mq_getsetattr
-246 64 kexec_load sys_kexec_load
-247 64 waitid sys_waitid
-248 common add_key sys_add_key
-249 common request_key sys_request_key
-250 common keyctl sys_keyctl
-251 common ioprio_set sys_ioprio_set
-252 common ioprio_get sys_ioprio_get
-253 common inotify_init sys_inotify_init
-254 common inotify_add_watch sys_inotify_add_watch
-255 common inotify_rm_watch sys_inotify_rm_watch
-256 common migrate_pages sys_migrate_pages
-257 common openat sys_openat
-258 common mkdirat sys_mkdirat
-259 common mknodat sys_mknodat
-260 common fchownat sys_fchownat
-261 common futimesat sys_futimesat
-262 common newfstatat sys_newfstatat
-263 common unlinkat sys_unlinkat
-264 common renameat sys_renameat
-265 common linkat sys_linkat
-266 common symlinkat sys_symlinkat
-267 common readlinkat sys_readlinkat
-268 common fchmodat sys_fchmodat
-269 common faccessat sys_faccessat
-270 common pselect6 sys_pselect6
-271 common ppoll sys_ppoll
-272 common unshare sys_unshare
-273 64 set_robust_list sys_set_robust_list
-274 64 get_robust_list sys_get_robust_list
-275 common splice sys_splice
-276 common tee sys_tee
-277 common sync_file_range sys_sync_file_range
-278 64 vmsplice sys_vmsplice
-279 64 move_pages sys_move_pages
-280 common utimensat sys_utimensat
-281 common epoll_pwait sys_epoll_pwait
-282 common signalfd sys_signalfd
-283 common timerfd_create sys_timerfd_create
-284 common eventfd sys_eventfd
-285 common fallocate sys_fallocate
-286 common timerfd_settime sys_timerfd_settime
-287 common timerfd_gettime sys_timerfd_gettime
-288 common accept4 sys_accept4
-289 common signalfd4 sys_signalfd4
-290 common eventfd2 sys_eventfd2
-291 common epoll_create1 sys_epoll_create1
-292 common dup3 sys_dup3
-293 common pipe2 sys_pipe2
-294 common inotify_init1 sys_inotify_init1
-295 64 preadv sys_preadv
-296 64 pwritev sys_pwritev
-297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
-298 common perf_event_open sys_perf_event_open
-299 64 recvmmsg sys_recvmmsg
-300 common fanotify_init sys_fanotify_init
-301 common fanotify_mark sys_fanotify_mark
-302 common prlimit64 sys_prlimit64
-303 common name_to_handle_at sys_name_to_handle_at
-304 common open_by_handle_at sys_open_by_handle_at
-305 common clock_adjtime sys_clock_adjtime
-306 common syncfs sys_syncfs
-307 64 sendmmsg sys_sendmmsg
-308 common setns sys_setns
-309 common getcpu sys_getcpu
-310 64 process_vm_readv sys_process_vm_readv
-311 64 process_vm_writev sys_process_vm_writev
-312 common kcmp sys_kcmp
-313 common finit_module sys_finit_module
-314 common sched_setattr sys_sched_setattr
-315 common sched_getattr sys_sched_getattr
-316 common renameat2 sys_renameat2
-317 common seccomp sys_seccomp
-318 common getrandom sys_getrandom
-319 common memfd_create sys_memfd_create
-320 common kexec_file_load sys_kexec_file_load
-321 common bpf sys_bpf
-322 64 execveat sys_execveat/ptregs
-323 common userfaultfd sys_userfaultfd
-324 common membarrier sys_membarrier
-325 common mlock2 sys_mlock2
-326 common copy_file_range sys_copy_file_range
-327 64 preadv2 sys_preadv2
-328 64 pwritev2 sys_pwritev2
-329 common pkey_mprotect sys_pkey_mprotect
-330 common pkey_alloc sys_pkey_alloc
-331 common pkey_free sys_pkey_free
-332 common statx sys_statx
+237 common mbind __x64_sys_mbind
+238 common set_mempolicy __x64_sys_set_mempolicy
+239 common get_mempolicy __x64_sys_get_mempolicy
+240 common mq_open __x64_sys_mq_open
+241 common mq_unlink __x64_sys_mq_unlink
+242 common mq_timedsend __x64_sys_mq_timedsend
+243 common mq_timedreceive __x64_sys_mq_timedreceive
+244 64 mq_notify __x64_sys_mq_notify
+245 common mq_getsetattr __x64_sys_mq_getsetattr
+246 64 kexec_load __x64_sys_kexec_load
+247 64 waitid __x64_sys_waitid
+248 common add_key __x64_sys_add_key
+249 common request_key __x64_sys_request_key
+250 common keyctl __x64_sys_keyctl
+251 common ioprio_set __x64_sys_ioprio_set
+252 common ioprio_get __x64_sys_ioprio_get
+253 common inotify_init __x64_sys_inotify_init
+254 common inotify_add_watch __x64_sys_inotify_add_watch
+255 common inotify_rm_watch __x64_sys_inotify_rm_watch
+256 common migrate_pages __x64_sys_migrate_pages
+257 common openat __x64_sys_openat
+258 common mkdirat __x64_sys_mkdirat
+259 common mknodat __x64_sys_mknodat
+260 common fchownat __x64_sys_fchownat
+261 common futimesat __x64_sys_futimesat
+262 common newfstatat __x64_sys_newfstatat
+263 common unlinkat __x64_sys_unlinkat
+264 common renameat __x64_sys_renameat
+265 common linkat __x64_sys_linkat
+266 common symlinkat __x64_sys_symlinkat
+267 common readlinkat __x64_sys_readlinkat
+268 common fchmodat __x64_sys_fchmodat
+269 common faccessat __x64_sys_faccessat
+270 common pselect6 __x64_sys_pselect6
+271 common ppoll __x64_sys_ppoll
+272 common unshare __x64_sys_unshare
+273 64 set_robust_list __x64_sys_set_robust_list
+274 64 get_robust_list __x64_sys_get_robust_list
+275 common splice __x64_sys_splice
+276 common tee __x64_sys_tee
+277 common sync_file_range __x64_sys_sync_file_range
+278 64 vmsplice __x64_sys_vmsplice
+279 64 move_pages __x64_sys_move_pages
+280 common utimensat __x64_sys_utimensat
+281 common epoll_pwait __x64_sys_epoll_pwait
+282 common signalfd __x64_sys_signalfd
+283 common timerfd_create __x64_sys_timerfd_create
+284 common eventfd __x64_sys_eventfd
+285 common fallocate __x64_sys_fallocate
+286 common timerfd_settime __x64_sys_timerfd_settime
+287 common timerfd_gettime __x64_sys_timerfd_gettime
+288 common accept4 __x64_sys_accept4
+289 common signalfd4 __x64_sys_signalfd4
+290 common eventfd2 __x64_sys_eventfd2
+291 common epoll_create1 __x64_sys_epoll_create1
+292 common dup3 __x64_sys_dup3
+293 common pipe2 __x64_sys_pipe2
+294 common inotify_init1 __x64_sys_inotify_init1
+295 64 preadv __x64_sys_preadv
+296 64 pwritev __x64_sys_pwritev
+297 64 rt_tgsigqueueinfo __x64_sys_rt_tgsigqueueinfo
+298 common perf_event_open __x64_sys_perf_event_open
+299 64 recvmmsg __x64_sys_recvmmsg
+300 common fanotify_init __x64_sys_fanotify_init
+301 common fanotify_mark __x64_sys_fanotify_mark
+302 common prlimit64 __x64_sys_prlimit64
+303 common name_to_handle_at __x64_sys_name_to_handle_at
+304 common open_by_handle_at __x64_sys_open_by_handle_at
+305 common clock_adjtime __x64_sys_clock_adjtime
+306 common syncfs __x64_sys_syncfs
+307 64 sendmmsg __x64_sys_sendmmsg
+308 common setns __x64_sys_setns
+309 common getcpu __x64_sys_getcpu
+310 64 process_vm_readv __x64_sys_process_vm_readv
+311 64 process_vm_writev __x64_sys_process_vm_writev
+312 common kcmp __x64_sys_kcmp
+313 common finit_module __x64_sys_finit_module
+314 common sched_setattr __x64_sys_sched_setattr
+315 common sched_getattr __x64_sys_sched_getattr
+316 common renameat2 __x64_sys_renameat2
+317 common seccomp __x64_sys_seccomp
+318 common getrandom __x64_sys_getrandom
+319 common memfd_create __x64_sys_memfd_create
+320 common kexec_file_load __x64_sys_kexec_file_load
+321 common bpf __x64_sys_bpf
+322 64 execveat __x64_sys_execveat/ptregs
+323 common userfaultfd __x64_sys_userfaultfd
+324 common membarrier __x64_sys_membarrier
+325 common mlock2 __x64_sys_mlock2
+326 common copy_file_range __x64_sys_copy_file_range
+327 64 preadv2 __x64_sys_preadv2
+328 64 pwritev2 __x64_sys_pwritev2
+329 common pkey_mprotect __x64_sys_pkey_mprotect
+330 common pkey_alloc __x64_sys_pkey_alloc
+331 common pkey_free __x64_sys_pkey_free
+332 common statx __x64_sys_statx
#
# x32-specific system call numbers start at 512 to avoid cache impact
-# for native 64-bit operation.
+# for native 64-bit operation. The __x32_compat_sys stubs are created
+# on-the-fly for compat_sys_*() compatibility system calls if X86_X32
+# is defined.
#
-512 x32 rt_sigaction compat_sys_rt_sigaction
+512 x32 rt_sigaction __x32_compat_sys_rt_sigaction
513 x32 rt_sigreturn sys32_x32_rt_sigreturn
-514 x32 ioctl compat_sys_ioctl
-515 x32 readv compat_sys_readv
-516 x32 writev compat_sys_writev
-517 x32 recvfrom compat_sys_recvfrom
-518 x32 sendmsg compat_sys_sendmsg
-519 x32 recvmsg compat_sys_recvmsg
-520 x32 execve compat_sys_execve/ptregs
-521 x32 ptrace compat_sys_ptrace
-522 x32 rt_sigpending compat_sys_rt_sigpending
-523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait
-524 x32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo
-525 x32 sigaltstack compat_sys_sigaltstack
-526 x32 timer_create compat_sys_timer_create
-527 x32 mq_notify compat_sys_mq_notify
-528 x32 kexec_load compat_sys_kexec_load
-529 x32 waitid compat_sys_waitid
-530 x32 set_robust_list compat_sys_set_robust_list
-531 x32 get_robust_list compat_sys_get_robust_list
-532 x32 vmsplice compat_sys_vmsplice
-533 x32 move_pages compat_sys_move_pages
-534 x32 preadv compat_sys_preadv64
-535 x32 pwritev compat_sys_pwritev64
-536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
-537 x32 recvmmsg compat_sys_recvmmsg
-538 x32 sendmmsg compat_sys_sendmmsg
-539 x32 process_vm_readv compat_sys_process_vm_readv
-540 x32 process_vm_writev compat_sys_process_vm_writev
-541 x32 setsockopt compat_sys_setsockopt
-542 x32 getsockopt compat_sys_getsockopt
-543 x32 io_setup compat_sys_io_setup
-544 x32 io_submit compat_sys_io_submit
-545 x32 execveat compat_sys_execveat/ptregs
-546 x32 preadv2 compat_sys_preadv64v2
-547 x32 pwritev2 compat_sys_pwritev64v2
+514 x32 ioctl __x32_compat_sys_ioctl
+515 x32 readv __x32_compat_sys_readv
+516 x32 writev __x32_compat_sys_writev
+517 x32 recvfrom __x32_compat_sys_recvfrom
+518 x32 sendmsg __x32_compat_sys_sendmsg
+519 x32 recvmsg __x32_compat_sys_recvmsg
+520 x32 execve __x32_compat_sys_execve/ptregs
+521 x32 ptrace __x32_compat_sys_ptrace
+522 x32 rt_sigpending __x32_compat_sys_rt_sigpending
+523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait
+524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo
+525 x32 sigaltstack __x32_compat_sys_sigaltstack
+526 x32 timer_create __x32_compat_sys_timer_create
+527 x32 mq_notify __x32_compat_sys_mq_notify
+528 x32 kexec_load __x32_compat_sys_kexec_load
+529 x32 waitid __x32_compat_sys_waitid
+530 x32 set_robust_list __x32_compat_sys_set_robust_list
+531 x32 get_robust_list __x32_compat_sys_get_robust_list
+532 x32 vmsplice __x32_compat_sys_vmsplice
+533 x32 move_pages __x32_compat_sys_move_pages
+534 x32 preadv __x32_compat_sys_preadv64
+535 x32 pwritev __x32_compat_sys_pwritev64
+536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo
+537 x32 recvmmsg __x32_compat_sys_recvmmsg
+538 x32 sendmmsg __x32_compat_sys_sendmmsg
+539 x32 process_vm_readv __x32_compat_sys_process_vm_readv
+540 x32 process_vm_writev __x32_compat_sys_process_vm_writev
+541 x32 setsockopt __x32_compat_sys_setsockopt
+542 x32 getsockopt __x32_compat_sys_getsockopt
+543 x32 io_setup __x32_compat_sys_io_setup
+544 x32 io_submit __x32_compat_sys_io_submit
+545 x32 execveat __x32_compat_sys_execveat/ptregs
+546 x32 preadv2 __x32_compat_sys_preadv64v2
+547 x32 pwritev2 __x32_compat_sys_pwritev64v2
diff --git a/arch/x86/entry/syscalls/syscalltbl.sh b/arch/x86/entry/syscalls/syscalltbl.sh
index d71ef4bd3615..94fcd1951aca 100644
--- a/arch/x86/entry/syscalls/syscalltbl.sh
+++ b/arch/x86/entry/syscalls/syscalltbl.sh
@@ -25,15 +25,27 @@ emit() {
nr="$2"
entry="$3"
compat="$4"
+ umlentry=""
if [ "$abi" = "64" -a -n "$compat" ]; then
echo "a compat entry for a 64-bit syscall makes no sense" >&2
exit 1
fi
+ # For CONFIG_UML, we need to strip the __x64_sys prefix
+ if [ "$abi" = "64" -a "${entry}" != "${entry#__x64_sys}" ]; then
+ umlentry="sys${entry#__x64_sys}"
+ fi
+
if [ -z "$compat" ]; then
- if [ -n "$entry" ]; then
+ if [ -n "$entry" -a -z "$umlentry" ]; then
syscall_macro "$abi" "$nr" "$entry"
+ elif [ -n "$umlentry" ]; then # implies -n "$entry"
+ echo "#ifdef CONFIG_X86"
+ syscall_macro "$abi" "$nr" "$entry"
+ echo "#else /* CONFIG_UML */"
+ syscall_macro "$abi" "$nr" "$umlentry"
+ echo "#endif"
fi
else
echo "#ifdef CONFIG_X86_32"
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 1943aebadede..d998a487c9b1 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -42,9 +42,7 @@ vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c)
vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg)
obj-y += $(vdso_img_objs)
targets += $(vdso_img_cfiles)
-targets += $(vdso_img_sodbg)
-.SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c) \
- $(vdso_img-y:%=$(obj)/vdso%.so)
+targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
export CPPFLAGS_vdso.lds += -P -C
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 317be365bce3..70b7845434cb 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -127,6 +127,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
int vsyscall_nr, syscall_nr, tmp;
int prev_sig_on_uaccess_err;
long ret;
+ unsigned long orig_dx;
/*
* No point in checking CS -- the only way to get here is a user mode
@@ -227,19 +228,22 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
ret = -EFAULT;
switch (vsyscall_nr) {
case 0:
- ret = sys_gettimeofday(
- (struct timeval __user *)regs->di,
- (struct timezone __user *)regs->si);
+ /* this decodes regs->di and regs->si on its own */
+ ret = __x64_sys_gettimeofday(regs);
break;
case 1:
- ret = sys_time((time_t __user *)regs->di);
+ /* this decodes regs->di on its own */
+ ret = __x64_sys_time(regs);
break;
case 2:
- ret = sys_getcpu((unsigned __user *)regs->di,
- (unsigned __user *)regs->si,
- NULL);
+ /* while we could clobber regs->dx, we didn't in the past... */
+ orig_dx = regs->dx;
+ regs->dx = 0;
+ /* this decodes regs->di, regs->si and regs->dx on its own */
+ ret = __x64_sys_getcpu(regs);
+ regs->dx = orig_dx;
break;
}
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index da6780122786..8a10a045b57b 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1153,7 +1153,6 @@ static void setup_pebs_sample_data(struct perf_event *event,
if (pebs == NULL)
return;
- regs->flags &= ~PERF_EFLAGS_EXACT;
sample_type = event->attr.sample_type;
dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
@@ -1197,7 +1196,13 @@ static void setup_pebs_sample_data(struct perf_event *event,
* and PMI.
*/
*regs = *iregs;
- regs->flags = pebs->flags;
+
+ /*
+ * Initialize regs_>flags from PEBS,
+ * Clear exact bit (which uses x86 EFLAGS Reserved bit 3),
+ * i.e., do not rely on it being zero:
+ */
+ regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT;
if (sample_type & PERF_SAMPLE_REGS_INTR) {
regs->ax = pebs->ax;
@@ -1217,10 +1222,6 @@ static void setup_pebs_sample_data(struct perf_event *event,
regs->sp = pebs->sp;
}
- /*
- * Preserve PERF_EFLAGS_VM from set_linear_ip().
- */
- regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
#ifndef CONFIG_X86_32
regs->r8 = pebs->r8;
regs->r9 = pebs->r9;
@@ -1234,20 +1235,33 @@ static void setup_pebs_sample_data(struct perf_event *event,
}
if (event->attr.precise_ip > 1) {
- /* Haswell and later have the eventing IP, so use it: */
+ /*
+ * Haswell and later processors have an 'eventing IP'
+ * (real IP) which fixes the off-by-1 skid in hardware.
+ * Use it when precise_ip >= 2 :
+ */
if (x86_pmu.intel_cap.pebs_format >= 2) {
set_linear_ip(regs, pebs->real_ip);
regs->flags |= PERF_EFLAGS_EXACT;
} else {
- /* Otherwise use PEBS off-by-1 IP: */
+ /* Otherwise, use PEBS off-by-1 IP: */
set_linear_ip(regs, pebs->ip);
- /* ... and try to fix it up using the LBR entries: */
+ /*
+ * With precise_ip >= 2, try to fix up the off-by-1 IP
+ * using the LBR. If successful, the fixup function
+ * corrects regs->ip and calls set_linear_ip() on regs:
+ */
if (intel_pmu_pebs_fixup_ip(regs))
regs->flags |= PERF_EFLAGS_EXACT;
}
- } else
+ } else {
+ /*
+ * When precise_ip == 1, return the PEBS off-by-1 IP,
+ * no fixup attempted:
+ */
set_linear_ip(regs, pebs->ip);
+ }
if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) &&
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 40a3d3642f3a..08acd954f00e 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -313,7 +313,7 @@ struct apic {
/* Probe, setup and smpboot functions */
int (*probe)(void);
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
- int (*apic_id_valid)(int apicid);
+ int (*apic_id_valid)(u32 apicid);
int (*apic_id_registered)(void);
bool (*check_apicid_used)(physid_mask_t *map, int apicid);
@@ -486,7 +486,7 @@ static inline unsigned int read_apic_id(void)
return apic->get_apic_id(reg);
}
-extern int default_apic_id_valid(int apicid);
+extern int default_apic_id_valid(u32 apicid);
extern int default_acpi_madt_oem_check(char *, char *);
extern void default_setup_apic_routing(void);
diff --git a/arch/x86/include/asm/kexec-bzimage64.h b/arch/x86/include/asm/kexec-bzimage64.h
index 9f07cff43705..df89ee7d3e9e 100644
--- a/arch/x86/include/asm/kexec-bzimage64.h
+++ b/arch/x86/include/asm/kexec-bzimage64.h
@@ -2,6 +2,6 @@
#ifndef _ASM_KEXEC_BZIMAGE64_H
#define _ASM_KEXEC_BZIMAGE64_H
-extern struct kexec_file_ops kexec_bzImage64_ops;
+extern const struct kexec_file_ops kexec_bzImage64_ops;
#endif /* _ASM_KEXE_BZIMAGE64_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 89d5c8886c85..5f49b4ff0c24 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -526,22 +526,39 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
return protval;
}
+static inline pgprotval_t check_pgprot(pgprot_t pgprot)
+{
+ pgprotval_t massaged_val = massage_pgprot(pgprot);
+
+ /* mmdebug.h can not be included here because of dependencies */
+#ifdef CONFIG_DEBUG_VM
+ WARN_ONCE(pgprot_val(pgprot) != massaged_val,
+ "attempted to set unsupported pgprot: %016llx "
+ "bits: %016llx supported: %016llx\n",
+ (u64)pgprot_val(pgprot),
+ (u64)pgprot_val(pgprot) ^ massaged_val,
+ (u64)__supported_pte_mask);
+#endif
+
+ return massaged_val;
+}
+
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
- massage_pgprot(pgprot));
+ check_pgprot(pgprot));
}
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
- massage_pgprot(pgprot));
+ check_pgprot(pgprot));
}
static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
{
return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
- massage_pgprot(pgprot));
+ check_pgprot(pgprot));
}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
@@ -553,7 +570,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* the newprot (if present):
*/
val &= _PAGE_CHG_MASK;
- val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
+ val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
return __pte(val);
}
@@ -563,7 +580,7 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
pmdval_t val = pmd_val(pmd);
val &= _HPAGE_CHG_MASK;
- val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
+ val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
return __pmd(val);
}
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index acfe755562a6..1e5a40673953 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -196,19 +196,21 @@ enum page_cache_mode {
#define __PAGE_KERNEL_NOENC (__PAGE_KERNEL)
#define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP)
-#define PAGE_KERNEL __pgprot(__PAGE_KERNEL | _PAGE_ENC)
-#define PAGE_KERNEL_NOENC __pgprot(__PAGE_KERNEL)
-#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC)
-#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC)
-#define PAGE_KERNEL_EXEC_NOENC __pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX | _PAGE_ENC)
-#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
-#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
-#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
-#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
-
-#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
-#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
+#define default_pgprot(x) __pgprot((x) & __default_kernel_pte_mask)
+
+#define PAGE_KERNEL default_pgprot(__PAGE_KERNEL | _PAGE_ENC)
+#define PAGE_KERNEL_NOENC default_pgprot(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO default_pgprot(__PAGE_KERNEL_RO | _PAGE_ENC)
+#define PAGE_KERNEL_EXEC default_pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC)
+#define PAGE_KERNEL_EXEC_NOENC default_pgprot(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RX default_pgprot(__PAGE_KERNEL_RX | _PAGE_ENC)
+#define PAGE_KERNEL_NOCACHE default_pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
+#define PAGE_KERNEL_LARGE default_pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
+#define PAGE_KERNEL_LARGE_EXEC default_pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
+#define PAGE_KERNEL_VVAR default_pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
+
+#define PAGE_KERNEL_IO default_pgprot(__PAGE_KERNEL_IO)
+#define PAGE_KERNEL_IO_NOCACHE default_pgprot(__PAGE_KERNEL_IO_NOCACHE)
#endif /* __ASSEMBLY__ */
@@ -483,6 +485,7 @@ static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
typedef struct page *pgtable_t;
extern pteval_t __supported_pte_mask;
+extern pteval_t __default_kernel_pte_mask;
extern void set_nx(void);
extern int nx_enabled;
diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h
index 0b5ef05b2d2d..38a17f1d5c9d 100644
--- a/arch/x86/include/asm/pti.h
+++ b/arch/x86/include/asm/pti.h
@@ -6,8 +6,10 @@
#ifdef CONFIG_PAGE_TABLE_ISOLATION
extern void pti_init(void);
extern void pti_check_boottime_disable(void);
+extern void pti_clone_kernel_text(void);
#else
static inline void pti_check_boottime_disable(void) { }
+static inline void pti_clone_kernel_text(void) { }
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 03eedc21246d..d653139857af 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -20,9 +20,13 @@
#include <asm/thread_info.h> /* for TS_COMPAT */
#include <asm/unistd.h>
+#ifdef CONFIG_X86_64
+typedef asmlinkage long (*sys_call_ptr_t)(const struct pt_regs *);
+#else
typedef asmlinkage long (*sys_call_ptr_t)(unsigned long, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
+#endif /* CONFIG_X86_64 */
extern const sys_call_ptr_t sys_call_table[];
#if defined(CONFIG_X86_32)
diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
new file mode 100644
index 000000000000..e046a405743d
--- /dev/null
+++ b/arch/x86/include/asm/syscall_wrapper.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * syscall_wrapper.h - x86 specific wrappers to syscall definitions
+ */
+
+#ifndef _ASM_X86_SYSCALL_WRAPPER_H
+#define _ASM_X86_SYSCALL_WRAPPER_H
+
+/* Mapping of registers to parameters for syscalls on x86-64 and x32 */
+#define SC_X86_64_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,regs->di,,regs->si,,regs->dx \
+ ,,regs->r10,,regs->r8,,regs->r9) \
+
+/* Mapping of registers to parameters for syscalls on i386 */
+#define SC_IA32_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,(unsigned int)regs->bx,,(unsigned int)regs->cx \
+ ,,(unsigned int)regs->dx,,(unsigned int)regs->si \
+ ,,(unsigned int)regs->di,,(unsigned int)regs->bp)
+
+#ifdef CONFIG_IA32_EMULATION
+/*
+ * For IA32 emulation, we need to handle "compat" syscalls *and* create
+ * additional wrappers (aptly named __ia32_sys_xyzzy) which decode the
+ * ia32 regs in the proper order for shared or "common" syscalls. As some
+ * syscalls may not be implemented, we need to expand COND_SYSCALL in
+ * kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this
+ * case as well.
+ */
+#define __IA32_COMPAT_SYS_STUBx(x, name, ...) \
+ asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs);\
+ ALLOW_ERROR_INJECTION(__ia32_compat_sys##name, ERRNO); \
+ asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs)\
+ { \
+ return __se_compat_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\
+ } \
+
+#define __IA32_SYS_STUBx(x, name, ...) \
+ asmlinkage long __ia32_sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__ia32_sys##name, ERRNO); \
+ asmlinkage long __ia32_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\
+ }
+
+/*
+ * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias
+ * named __ia32_sys_*()
+ */
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __x64_sys_##sname(void); \
+ ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
+ SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
+ asmlinkage long __x64_sys_##sname(void)
+
+#define COND_SYSCALL(name) \
+ cond_syscall(__x64_sys_##name); \
+ cond_syscall(__ia32_sys_##name)
+
+#define SYS_NI(name) \
+ SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers); \
+ SYSCALL_ALIAS(__ia32_sys_##name, sys_ni_posix_timers)
+
+#else /* CONFIG_IA32_EMULATION */
+#define __IA32_COMPAT_SYS_STUBx(x, name, ...)
+#define __IA32_SYS_STUBx(x, fullname, name, ...)
+#endif /* CONFIG_IA32_EMULATION */
+
+
+#ifdef CONFIG_X86_X32
+/*
+ * For the x32 ABI, we need to create a stub for compat_sys_*() which is aware
+ * of the x86-64-style parameter ordering of x32 syscalls. The syscalls common
+ * with x86_64 obviously do not need such care.
+ */
+#define __X32_COMPAT_SYS_STUBx(x, name, ...) \
+ asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs);\
+ ALLOW_ERROR_INJECTION(__x32_compat_sys##name, ERRNO); \
+ asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs)\
+ { \
+ return __se_compat_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\
+ } \
+
+#else /* CONFIG_X86_X32 */
+#define __X32_COMPAT_SYS_STUBx(x, name, ...)
+#endif /* CONFIG_X86_X32 */
+
+
+#ifdef CONFIG_COMPAT
+/*
+ * Compat means IA32_EMULATION and/or X86_X32. As they use a different
+ * mapping of registers to parameters, we need to generate stubs for each
+ * of them.
+ */
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ __IA32_COMPAT_SYS_STUBx(x, name, __VA_ARGS__) \
+ __X32_COMPAT_SYS_STUBx(x, name, __VA_ARGS__) \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+ } \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+/*
+ * As some compat syscalls may not be implemented, we need to expand
+ * COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
+ * kernel/time/posix-stubs.c to cover this case as well.
+ */
+#define COND_SYSCALL_COMPAT(name) \
+ cond_syscall(__ia32_compat_sys_##name); \
+ cond_syscall(__x32_compat_sys_##name)
+
+#define COMPAT_SYS_NI(name) \
+ SYSCALL_ALIAS(__ia32_compat_sys_##name, sys_ni_posix_timers); \
+ SYSCALL_ALIAS(__x32_compat_sys_##name, sys_ni_posix_timers)
+
+#endif /* CONFIG_COMPAT */
+
+
+/*
+ * Instead of the generic __SYSCALL_DEFINEx() definition, this macro takes
+ * struct pt_regs *regs as the only argument of the syscall stub named
+ * __x64_sys_*(). It decodes just the registers it needs and passes them on to
+ * the __se_sys_*() wrapper performing sign extension and then to the
+ * __do_sys_*() function doing the actual job. These wrappers and functions
+ * are inlined (at least in very most cases), meaning that the assembly looks
+ * as follows (slightly re-ordered for better readability):
+ *
+ * <__x64_sys_recv>: <-- syscall with 4 parameters
+ * callq <__fentry__>
+ *
+ * mov 0x70(%rdi),%rdi <-- decode regs->di
+ * mov 0x68(%rdi),%rsi <-- decode regs->si
+ * mov 0x60(%rdi),%rdx <-- decode regs->dx
+ * mov 0x38(%rdi),%rcx <-- decode regs->r10
+ *
+ * xor %r9d,%r9d <-- clear %r9
+ * xor %r8d,%r8d <-- clear %r8
+ *
+ * callq __sys_recvfrom <-- do the actual work in __sys_recvfrom()
+ * which takes 6 arguments
+ *
+ * cltq <-- extend return value to 64-bit
+ * retq <-- return
+ *
+ * This approach avoids leaking random user-provided register content down
+ * the call chain.
+ *
+ * If IA32_EMULATION is enabled, this macro generates an additional wrapper
+ * named __ia32_sys_*() which decodes the struct pt_regs *regs according
+ * to the i386 calling convention (bx, cx, dx, si, di, bp).
+ */
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long __x64_sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__x64_sys##name, ERRNO); \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ asmlinkage long __x64_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\
+ } \
+ __IA32_SYS_STUBx(x, name, __VA_ARGS__) \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__));\
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+/*
+ * As the generic SYSCALL_DEFINE0() macro does not decode any parameters for
+ * obvious reasons, and passing struct pt_regs *regs to it in %rdi does not
+ * hurt, we only need to re-define it here to keep the naming congruent to
+ * SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() and SYS_NI()
+ * macros to work correctly.
+ */
+#ifndef SYSCALL_DEFINE0
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __x64_sys_##sname(void); \
+ ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
+ asmlinkage long __x64_sys_##sname(void)
+#endif
+
+#ifndef COND_SYSCALL
+#define COND_SYSCALL(name) cond_syscall(__x64_sys_##name)
+#endif
+
+#ifndef SYS_NI
+#define SYS_NI(name) SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers);
+#endif
+
+
+/*
+ * For VSYSCALLS, we need to declare these three syscalls with the new
+ * pt_regs-based calling convention for in-kernel use.
+ */
+struct pt_regs;
+asmlinkage long __x64_sys_getcpu(const struct pt_regs *regs);
+asmlinkage long __x64_sys_gettimeofday(const struct pt_regs *regs);
+asmlinkage long __x64_sys_time(const struct pt_regs *regs);
+
+#endif /* _ASM_X86_SYSCALL_WRAPPER_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index ae6e05fdc24b..9fa979dd0d9d 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -18,6 +18,12 @@
/* Common in X86_32 and X86_64 */
/* kernel/ioport.c */
long ksys_ioperm(unsigned long from, unsigned long num, int turn_on);
+
+#ifdef CONFIG_X86_32
+/*
+ * These definitions are only valid on pure 32-bit systems; x86-64 uses a
+ * different syscall calling convention
+ */
asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
asmlinkage long sys_iopl(unsigned int);
@@ -32,7 +38,6 @@ asmlinkage long sys_set_thread_area(struct user_desc __user *);
asmlinkage long sys_get_thread_area(struct user_desc __user *);
/* X86_32 only */
-#ifdef CONFIG_X86_32
/* kernel/signal.c */
asmlinkage long sys_sigreturn(void);
@@ -42,15 +47,5 @@ struct vm86_struct;
asmlinkage long sys_vm86old(struct vm86_struct __user *);
asmlinkage long sys_vm86(unsigned long, unsigned long);
-#else /* CONFIG_X86_32 */
-
-/* X86_64 only */
-/* kernel/process_64.c */
-asmlinkage long sys_arch_prctl(int, unsigned long);
-
-/* kernel/sys_x86_64.c */
-asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
- unsigned long, unsigned long, unsigned long);
-
#endif /* CONFIG_X86_32 */
#endif /* _ASM_X86_SYSCALLS_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 84137c22fdfa..6690cd3fc8b1 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -131,7 +131,12 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
{
VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
- VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID));
+ /*
+ * Use boot_cpu_has() instead of this_cpu_has() as this function
+ * might be called during early boot. This should work even after
+ * boot because all CPU's the have same capabilities:
+ */
+ VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
}
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index aebf60357758..a06cbf019744 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -137,15 +137,15 @@ struct boot_e820_entry {
* setup data structure.
*/
struct jailhouse_setup_data {
- u16 version;
- u16 compatible_version;
- u16 pm_timer_address;
- u16 num_cpus;
- u64 pci_mmconfig_base;
- u32 tsc_khz;
- u32 apic_khz;
- u8 standard_ioapic;
- u8 cpu_ids[255];
+ __u16 version;
+ __u16 compatible_version;
+ __u16 pm_timer_address;
+ __u16 num_cpus;
+ __u64 pci_mmconfig_base;
+ __u32 tsc_khz;
+ __u32 apic_khz;
+ __u8 standard_ioapic;
+ __u8 cpu_ids[255];
} __attribute__((packed));
/* The so-called "zeropage" */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 7a37d9357bc4..dde444f932c1 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -200,7 +200,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
{
struct acpi_madt_local_x2apic *processor = NULL;
#ifdef CONFIG_X86_X2APIC
- int apic_id;
+ u32 apic_id;
u8 enabled;
#endif
@@ -222,10 +222,13 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
* to not preallocating memory for all NR_CPUS
* when we use CPU hotplug.
*/
- if (!apic->apic_id_valid(apic_id) && enabled)
- printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
- else
- acpi_register_lapic(apic_id, processor->uid, enabled);
+ if (!apic->apic_id_valid(apic_id)) {
+ if (enabled)
+ pr_warn(PREFIX "x2apic entry ignored\n");
+ return 0;
+ }
+
+ acpi_register_lapic(apic_id, processor->uid, enabled);
#else
printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
#endif
diff --git a/arch/x86/kernel/apic/apic_common.c b/arch/x86/kernel/apic/apic_common.c
index a360801779ae..02b4839478b1 100644
--- a/arch/x86/kernel/apic/apic_common.c
+++ b/arch/x86/kernel/apic/apic_common.c
@@ -40,7 +40,7 @@ int default_check_phys_apicid_present(int phys_apicid)
return physid_isset(phys_apicid, phys_cpu_present_map);
}
-int default_apic_id_valid(int apicid)
+int default_apic_id_valid(u32 apicid)
{
return (apicid < 255);
}
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 134e04506ab4..78778b54f904 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -56,7 +56,7 @@ static u32 numachip2_set_apic_id(unsigned int id)
return id << 24;
}
-static int numachip_apic_id_valid(int apicid)
+static int numachip_apic_id_valid(u32 apicid)
{
/* Trust what bootloader passes in MADT */
return 1;
diff --git a/arch/x86/kernel/apic/x2apic.h b/arch/x86/kernel/apic/x2apic.h
index b107de381cb5..a49b3604027f 100644
--- a/arch/x86/kernel/apic/x2apic.h
+++ b/arch/x86/kernel/apic/x2apic.h
@@ -1,6 +1,6 @@
/* Common bits for X2APIC cluster/physical modes. */
-int x2apic_apic_id_valid(int apicid);
+int x2apic_apic_id_valid(u32 apicid);
int x2apic_apic_id_registered(void);
void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest);
unsigned int x2apic_get_apic_id(unsigned long id);
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index e2829bf40e4a..b5cf9e7b3830 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -101,7 +101,7 @@ static int x2apic_phys_probe(void)
}
/* Common x2apic functions, also used by x2apic_cluster */
-int x2apic_apic_id_valid(int apicid)
+int x2apic_apic_id_valid(u32 apicid)
{
return 1;
}
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index f11910b44638..efaf2d4f9c3c 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -557,7 +557,7 @@ static void uv_send_IPI_all(int vector)
uv_send_IPI_mask(cpu_online_mask, vector);
}
-static int uv_apic_id_valid(int apicid)
+static int uv_apic_id_valid(u32 apicid)
{
return 1;
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4702fbd98f92..8a5b185735e1 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -848,18 +848,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_power = edx;
}
- if (c->extended_cpuid_level >= 0x80000008) {
- cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
-
- c->x86_virt_bits = (eax >> 8) & 0xff;
- c->x86_phys_bits = eax & 0xff;
- c->x86_capability[CPUID_8000_0008_EBX] = ebx;
- }
-#ifdef CONFIG_X86_32
- else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
- c->x86_phys_bits = 36;
-#endif
-
if (c->extended_cpuid_level >= 0x8000000a)
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
@@ -874,6 +862,23 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
apply_forced_caps(c);
}
+static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
+{
+ u32 eax, ebx, ecx, edx;
+
+ if (c->extended_cpuid_level >= 0x80000008) {
+ cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
+
+ c->x86_virt_bits = (eax >> 8) & 0xff;
+ c->x86_phys_bits = eax & 0xff;
+ c->x86_capability[CPUID_8000_0008_EBX] = ebx;
+ }
+#ifdef CONFIG_X86_32
+ else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
+ c->x86_phys_bits = 36;
+#endif
+}
+
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_32
@@ -965,6 +970,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
cpu_detect(c);
get_cpu_vendor(c);
get_cpu_cap(c);
+ get_cpu_address_sizes(c);
setup_force_cpu_cap(X86_FEATURE_CPUID);
if (this_cpu->c_early_init)
@@ -1097,6 +1103,8 @@ static void generic_identify(struct cpuinfo_x86 *c)
get_cpu_cap(c);
+ get_cpu_address_sizes(c);
+
if (c->cpuid_level >= 0x00000001) {
c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index 904b0a3c4e53..2c0bd38a44ab 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -19,7 +19,7 @@ struct cpuid_dep {
* called from cpu hotplug. It shouldn't do anything in this case,
* but it's difficult to tell that to the init reference checker.
*/
-const static struct cpuid_dep cpuid_deps[] = {
+static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_XSAVEOPT, X86_FEATURE_XSAVE },
{ X86_FEATURE_XSAVEC, X86_FEATURE_XSAVE },
{ X86_FEATURE_XSAVES, X86_FEATURE_XSAVE },
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 1f6680427ff0..f631a3f15587 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -38,37 +38,6 @@
#include <asm/virtext.h>
#include <asm/intel_pt.h>
-/* Alignment required for elf header segment */
-#define ELF_CORE_HEADER_ALIGN 4096
-
-/* This primarily represents number of split ranges due to exclusion */
-#define CRASH_MAX_RANGES 16
-
-struct crash_mem_range {
- u64 start, end;
-};
-
-struct crash_mem {
- unsigned int nr_ranges;
- struct crash_mem_range ranges[CRASH_MAX_RANGES];
-};
-
-/* Misc data about ram ranges needed to prepare elf headers */
-struct crash_elf_data {
- struct kimage *image;
- /*
- * Total number of ram ranges we have after various adjustments for
- * crash reserved region, etc.
- */
- unsigned int max_nr_ranges;
-
- /* Pointer to elf header */
- void *ehdr;
- /* Pointer to next phdr */
- void *bufp;
- struct crash_mem mem;
-};
-
/* Used while preparing memory map entries for second kernel */
struct crash_memmap_data {
struct boot_params *params;
@@ -218,124 +187,49 @@ static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
return 0;
}
-
/* Gather all the required information to prepare elf headers for ram regions */
-static void fill_up_crash_elf_data(struct crash_elf_data *ced,
- struct kimage *image)
+static struct crash_mem *fill_up_crash_elf_data(void)
{
unsigned int nr_ranges = 0;
-
- ced->image = image;
+ struct crash_mem *cmem;
walk_system_ram_res(0, -1, &nr_ranges,
get_nr_ram_ranges_callback);
+ if (!nr_ranges)
+ return NULL;
- ced->max_nr_ranges = nr_ranges;
-
- /* Exclusion of crash region could split memory ranges */
- ced->max_nr_ranges++;
-
- /* If crashk_low_res is not 0, another range split possible */
- if (crashk_low_res.end)
- ced->max_nr_ranges++;
-}
-
-static int exclude_mem_range(struct crash_mem *mem,
- unsigned long long mstart, unsigned long long mend)
-{
- int i, j;
- unsigned long long start, end;
- struct crash_mem_range temp_range = {0, 0};
-
- for (i = 0; i < mem->nr_ranges; i++) {
- start = mem->ranges[i].start;
- end = mem->ranges[i].end;
-
- if (mstart > end || mend < start)
- continue;
-
- /* Truncate any area outside of range */
- if (mstart < start)
- mstart = start;
- if (mend > end)
- mend = end;
-
- /* Found completely overlapping range */
- if (mstart == start && mend == end) {
- mem->ranges[i].start = 0;
- mem->ranges[i].end = 0;
- if (i < mem->nr_ranges - 1) {
- /* Shift rest of the ranges to left */
- for (j = i; j < mem->nr_ranges - 1; j++) {
- mem->ranges[j].start =
- mem->ranges[j+1].start;
- mem->ranges[j].end =
- mem->ranges[j+1].end;
- }
- }
- mem->nr_ranges--;
- return 0;
- }
-
- if (mstart > start && mend < end) {
- /* Split original range */
- mem->ranges[i].end = mstart - 1;
- temp_range.start = mend + 1;
- temp_range.end = end;
- } else if (mstart != start)
- mem->ranges[i].end = mstart - 1;
- else
- mem->ranges[i].start = mend + 1;
- break;
- }
+ /*
+ * Exclusion of crash region and/or crashk_low_res may cause
+ * another range split. So add extra two slots here.
+ */
+ nr_ranges += 2;
+ cmem = vzalloc(sizeof(struct crash_mem) +
+ sizeof(struct crash_mem_range) * nr_ranges);
+ if (!cmem)
+ return NULL;
- /* If a split happend, add the split to array */
- if (!temp_range.end)
- return 0;
+ cmem->max_nr_ranges = nr_ranges;
+ cmem->nr_ranges = 0;
- /* Split happened */
- if (i == CRASH_MAX_RANGES - 1) {
- pr_err("Too many crash ranges after split\n");
- return -ENOMEM;
- }
-
- /* Location where new range should go */
- j = i + 1;
- if (j < mem->nr_ranges) {
- /* Move over all ranges one slot towards the end */
- for (i = mem->nr_ranges - 1; i >= j; i--)
- mem->ranges[i + 1] = mem->ranges[i];
- }
-
- mem->ranges[j].start = temp_range.start;
- mem->ranges[j].end = temp_range.end;
- mem->nr_ranges++;
- return 0;
+ return cmem;
}
/*
* Look for any unwanted ranges between mstart, mend and remove them. This
- * might lead to split and split ranges are put in ced->mem.ranges[] array
+ * might lead to split and split ranges are put in cmem->ranges[] array
*/
-static int elf_header_exclude_ranges(struct crash_elf_data *ced,
- unsigned long long mstart, unsigned long long mend)
+static int elf_header_exclude_ranges(struct crash_mem *cmem)
{
- struct crash_mem *cmem = &ced->mem;
int ret = 0;
- memset(cmem->ranges, 0, sizeof(cmem->ranges));
-
- cmem->ranges[0].start = mstart;
- cmem->ranges[0].end = mend;
- cmem->nr_ranges = 1;
-
/* Exclude crashkernel region */
- ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
+ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
if (ret)
return ret;
if (crashk_low_res.end) {
- ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
+ ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
+ crashk_low_res.end);
if (ret)
return ret;
}
@@ -345,144 +239,12 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced,
static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
{
- struct crash_elf_data *ced = arg;
- Elf64_Ehdr *ehdr;
- Elf64_Phdr *phdr;
- unsigned long mstart, mend;
- struct kimage *image = ced->image;
- struct crash_mem *cmem;
- int ret, i;
-
- ehdr = ced->ehdr;
-
- /* Exclude unwanted mem ranges */
- ret = elf_header_exclude_ranges(ced, res->start, res->end);
- if (ret)
- return ret;
-
- /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
- cmem = &ced->mem;
-
- for (i = 0; i < cmem->nr_ranges; i++) {
- mstart = cmem->ranges[i].start;
- mend = cmem->ranges[i].end;
-
- phdr = ced->bufp;
- ced->bufp += sizeof(Elf64_Phdr);
-
- phdr->p_type = PT_LOAD;
- phdr->p_flags = PF_R|PF_W|PF_X;
- phdr->p_offset = mstart;
-
- /*
- * If a range matches backup region, adjust offset to backup
- * segment.
- */
- if (mstart == image->arch.backup_src_start &&
- (mend - mstart + 1) == image->arch.backup_src_sz)
- phdr->p_offset = image->arch.backup_load_addr;
-
- phdr->p_paddr = mstart;
- phdr->p_vaddr = (unsigned long long) __va(mstart);
- phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
- phdr->p_align = 0;
- ehdr->e_phnum++;
- pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
- phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
- ehdr->e_phnum, phdr->p_offset);
- }
-
- return ret;
-}
-
-static int prepare_elf64_headers(struct crash_elf_data *ced,
- void **addr, unsigned long *sz)
-{
- Elf64_Ehdr *ehdr;
- Elf64_Phdr *phdr;
- unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
- unsigned char *buf, *bufp;
- unsigned int cpu;
- unsigned long long notes_addr;
- int ret;
+ struct crash_mem *cmem = arg;
- /* extra phdr for vmcoreinfo elf note */
- nr_phdr = nr_cpus + 1;
- nr_phdr += ced->max_nr_ranges;
-
- /*
- * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
- * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
- * I think this is required by tools like gdb. So same physical
- * memory will be mapped in two elf headers. One will contain kernel
- * text virtual addresses and other will have __va(physical) addresses.
- */
+ cmem->ranges[cmem->nr_ranges].start = res->start;
+ cmem->ranges[cmem->nr_ranges].end = res->end;
+ cmem->nr_ranges++;
- nr_phdr++;
- elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
- elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
-
- buf = vzalloc(elf_sz);
- if (!buf)
- return -ENOMEM;
-
- bufp = buf;
- ehdr = (Elf64_Ehdr *)bufp;
- bufp += sizeof(Elf64_Ehdr);
- memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
- ehdr->e_ident[EI_CLASS] = ELFCLASS64;
- ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
- ehdr->e_ident[EI_VERSION] = EV_CURRENT;
- ehdr->e_ident[EI_OSABI] = ELF_OSABI;
- memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
- ehdr->e_type = ET_CORE;
- ehdr->e_machine = ELF_ARCH;
- ehdr->e_version = EV_CURRENT;
- ehdr->e_phoff = sizeof(Elf64_Ehdr);
- ehdr->e_ehsize = sizeof(Elf64_Ehdr);
- ehdr->e_phentsize = sizeof(Elf64_Phdr);
-
- /* Prepare one phdr of type PT_NOTE for each present cpu */
- for_each_present_cpu(cpu) {
- phdr = (Elf64_Phdr *)bufp;
- bufp += sizeof(Elf64_Phdr);
- phdr->p_type = PT_NOTE;
- notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
- phdr->p_offset = phdr->p_paddr = notes_addr;
- phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
- (ehdr->e_phnum)++;
- }
-
- /* Prepare one PT_NOTE header for vmcoreinfo */
- phdr = (Elf64_Phdr *)bufp;
- bufp += sizeof(Elf64_Phdr);
- phdr->p_type = PT_NOTE;
- phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
- phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
- (ehdr->e_phnum)++;
-
-#ifdef CONFIG_X86_64
- /* Prepare PT_LOAD type program header for kernel text region */
- phdr = (Elf64_Phdr *)bufp;
- bufp += sizeof(Elf64_Phdr);
- phdr->p_type = PT_LOAD;
- phdr->p_flags = PF_R|PF_W|PF_X;
- phdr->p_vaddr = (Elf64_Addr)_text;
- phdr->p_filesz = phdr->p_memsz = _end - _text;
- phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
- (ehdr->e_phnum)++;
-#endif
-
- /* Prepare PT_LOAD headers for system ram chunks. */
- ced->ehdr = ehdr;
- ced->bufp = bufp;
- ret = walk_system_ram_res(0, -1, ced,
- prepare_elf64_ram_headers_callback);
- if (ret < 0)
- return ret;
-
- *addr = buf;
- *sz = elf_sz;
return 0;
}
@@ -490,18 +252,46 @@ static int prepare_elf64_headers(struct crash_elf_data *ced,
static int prepare_elf_headers(struct kimage *image, void **addr,
unsigned long *sz)
{
- struct crash_elf_data *ced;
- int ret;
+ struct crash_mem *cmem;
+ Elf64_Ehdr *ehdr;
+ Elf64_Phdr *phdr;
+ int ret, i;
- ced = kzalloc(sizeof(*ced), GFP_KERNEL);
- if (!ced)
+ cmem = fill_up_crash_elf_data();
+ if (!cmem)
return -ENOMEM;
- fill_up_crash_elf_data(ced, image);
+ ret = walk_system_ram_res(0, -1, cmem,
+ prepare_elf64_ram_headers_callback);
+ if (ret)
+ goto out;
+
+ /* Exclude unwanted mem ranges */
+ ret = elf_header_exclude_ranges(cmem);
+ if (ret)
+ goto out;
/* By default prepare 64bit headers */
- ret = prepare_elf64_headers(ced, addr, sz);
- kfree(ced);
+ ret = crash_prepare_elf64_headers(cmem,
+ IS_ENABLED(CONFIG_X86_64), addr, sz);
+ if (ret)
+ goto out;
+
+ /*
+ * If a range matches backup region, adjust offset to backup
+ * segment.
+ */
+ ehdr = (Elf64_Ehdr *)*addr;
+ phdr = (Elf64_Phdr *)(ehdr + 1);
+ for (i = 0; i < ehdr->e_phnum; phdr++, i++)
+ if (phdr->p_type == PT_LOAD &&
+ phdr->p_paddr == image->arch.backup_src_start &&
+ phdr->p_memsz == image->arch.backup_src_sz) {
+ phdr->p_offset = image->arch.backup_load_addr;
+ break;
+ }
+out:
+ vfree(cmem);
return ret;
}
@@ -547,14 +337,14 @@ static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
/* Exclude Backup region */
start = image->arch.backup_load_addr;
end = start + image->arch.backup_src_sz - 1;
- ret = exclude_mem_range(cmem, start, end);
+ ret = crash_exclude_mem_range(cmem, start, end);
if (ret)
return ret;
/* Exclude elf header region */
start = image->arch.elf_load_addr;
end = start + image->arch.elf_headers_sz - 1;
- return exclude_mem_range(cmem, start, end);
+ return crash_exclude_mem_range(cmem, start, end);
}
/* Prepare memory map for crash dump kernel */
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index e5ec3cafa72e..aebd0d5bc086 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -195,6 +195,10 @@ void init_espfix_ap(int cpu)
pte_p = pte_offset_kernel(&pmd, addr);
stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
+ /*
+ * __PAGE_KERNEL_* includes _PAGE_GLOBAL, which we want since
+ * this is mapped to userspace.
+ */
pte = __pte(__pa(stack_page) | ((__PAGE_KERNEL_RO | _PAGE_ENC) & ptemask));
for (n = 0; n < ESPFIX_PTE_CLONES; n++)
set_pte(&pte_p[n*PTE_STRIDE], pte);
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 0c855deee165..0c408f8c4ed4 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -195,6 +195,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
pud[i + 1] = (pudval_t)pmd + pgtable_flags;
pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
+ /* Filter out unsupported __PAGE_KERNEL_* bits: */
+ pmd_entry &= __supported_pte_mask;
pmd_entry += sme_get_me_mask();
pmd_entry += physaddr;
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 48385c1074a5..8344dd2f310a 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -399,8 +399,13 @@ NEXT_PAGE(level3_ident_pgt)
.quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.fill 511, 8, 0
NEXT_PAGE(level2_ident_pgt)
- /* Since I easily can, map the first 1G.
+ /*
+ * Since I easily can, map the first 1G.
* Don't set NX because code runs from these pages.
+ *
+ * Note: This sets _PAGE_GLOBAL despite whether
+ * the CPU supports it or it is enabled. But,
+ * the CPU should ignore the bit.
*/
PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
#else
@@ -431,6 +436,10 @@ NEXT_PAGE(level2_kernel_pgt)
* (NOTE: at +512MB starts the module area, see MODULES_VADDR.
* If you want to increase this then increase MODULES_VADDR
* too.)
+ *
+ * This table is eventually used by the kernel during normal
+ * runtime. Care must be taken to clear out undesired bits
+ * later, like _PAGE_RW or _PAGE_GLOBAL in some cases.
*/
PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
KERNEL_IMAGE_SIZE/PMD_SIZE)
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index fb095ba0c02f..3182908b7e6c 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -334,7 +334,6 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
unsigned long setup_header_size, params_cmdline_sz;
struct boot_params *params;
unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr;
- unsigned long purgatory_load_addr;
struct bzimage64_data *ldata;
struct kexec_entry64_regs regs64;
void *stack;
@@ -342,6 +341,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
unsigned int efi_map_offset, efi_map_sz, efi_setup_data_offset;
struct kexec_buf kbuf = { .image = image, .buf_max = ULONG_MAX,
.top_down = true };
+ struct kexec_buf pbuf = { .image = image, .buf_min = MIN_PURGATORY_ADDR,
+ .buf_max = ULONG_MAX, .top_down = true };
header = (struct setup_header *)(kernel + setup_hdr_offset);
setup_sects = header->setup_sects;
@@ -379,14 +380,13 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
* Load purgatory. For 64bit entry point, purgatory code can be
* anywhere.
*/
- ret = kexec_load_purgatory(image, MIN_PURGATORY_ADDR, ULONG_MAX, 1,
- &purgatory_load_addr);
+ ret = kexec_load_purgatory(image, &pbuf);
if (ret) {
pr_err("Loading purgatory failed\n");
return ERR_PTR(ret);
}
- pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr);
+ pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
/*
@@ -538,7 +538,7 @@ static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
}
#endif
-struct kexec_file_ops kexec_bzImage64_ops = {
+const struct kexec_file_ops kexec_bzImage64_ops = {
.probe = bzImage64_probe,
.load = bzImage64_load,
.cleanup = bzImage64_cleanup,
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 26d713ecad34..d41d896481b8 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -145,6 +145,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
unsigned long offset = i << PAGE_SHIFT;
const void *src = (char *)ldt->entries + offset;
unsigned long pfn;
+ pgprot_t pte_prot;
pte_t pte, *ptep;
va = (unsigned long)ldt_slot_va(slot) + offset;
@@ -163,7 +164,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
* target via some kernel interface which misses a
* permission check.
*/
- pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL));
+ pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL);
+ /* Filter out unsuppored __PAGE_KERNEL* bits: */
+ pgprot_val(pte_prot) |= __supported_pte_mask;
+ pte = pfn_pte(pfn, pte_prot);
set_pte_at(mm, va, ptep, pte);
pte_unmap_unlock(ptep, ptl);
}
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 93bd4fb603d1..a5e55d832d0a 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -30,8 +30,9 @@
#include <asm/set_memory.h>
#ifdef CONFIG_KEXEC_FILE
-static struct kexec_file_ops *kexec_file_loaders[] = {
+const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_bzImage64_ops,
+ NULL
};
#endif
@@ -364,27 +365,6 @@ void arch_crash_save_vmcoreinfo(void)
/* arch-dependent functionality related to kexec file-based syscall */
#ifdef CONFIG_KEXEC_FILE
-int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
- unsigned long buf_len)
-{
- int i, ret = -ENOEXEC;
- struct kexec_file_ops *fops;
-
- for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
- fops = kexec_file_loaders[i];
- if (!fops || !fops->probe)
- continue;
-
- ret = fops->probe(buf, buf_len);
- if (!ret) {
- image->fops = fops;
- return ret;
- }
- }
-
- return ret;
-}
-
void *arch_kexec_kernel_image_load(struct kimage *image)
{
vfree(image->arch.elf_headers);
@@ -399,88 +379,53 @@ void *arch_kexec_kernel_image_load(struct kimage *image)
image->cmdline_buf_len);
}
-int arch_kimage_file_post_load_cleanup(struct kimage *image)
-{
- if (!image->fops || !image->fops->cleanup)
- return 0;
-
- return image->fops->cleanup(image->image_loader_data);
-}
-
-#ifdef CONFIG_KEXEC_VERIFY_SIG
-int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
- unsigned long kernel_len)
-{
- if (!image->fops || !image->fops->verify_sig) {
- pr_debug("kernel loader does not support signature verification.");
- return -EKEYREJECTED;
- }
-
- return image->fops->verify_sig(kernel, kernel_len);
-}
-#endif
-
/*
* Apply purgatory relocations.
*
- * ehdr: Pointer to elf headers
- * sechdrs: Pointer to section headers.
- * relsec: section index of SHT_RELA section.
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELAs.
+ * @symtabsec: Corresponding symtab.
*
* TODO: Some of the code belongs to generic code. Move that in kexec.c.
*/
-int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
- Elf64_Shdr *sechdrs, unsigned int relsec)
+int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ Elf_Shdr *section, const Elf_Shdr *relsec,
+ const Elf_Shdr *symtabsec)
{
unsigned int i;
Elf64_Rela *rel;
Elf64_Sym *sym;
void *location;
- Elf64_Shdr *section, *symtabsec;
unsigned long address, sec_base, value;
const char *strtab, *name, *shstrtab;
+ const Elf_Shdr *sechdrs;
- /*
- * ->sh_offset has been modified to keep the pointer to section
- * contents in memory
- */
- rel = (void *)sechdrs[relsec].sh_offset;
-
- /* Section to which relocations apply */
- section = &sechdrs[sechdrs[relsec].sh_info];
-
- pr_debug("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
-
- /* Associated symbol table */
- symtabsec = &sechdrs[sechdrs[relsec].sh_link];
-
- /* String table */
- if (symtabsec->sh_link >= ehdr->e_shnum) {
- /* Invalid strtab section number */
- pr_err("Invalid string table section index %d\n",
- symtabsec->sh_link);
- return -ENOEXEC;
- }
+ /* String & section header string table */
+ sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
+ strtab = (char *)pi->ehdr + sechdrs[symtabsec->sh_link].sh_offset;
+ shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
- strtab = (char *)sechdrs[symtabsec->sh_link].sh_offset;
+ rel = (void *)pi->ehdr + relsec->sh_offset;
- /* section header string table */
- shstrtab = (char *)sechdrs[ehdr->e_shstrndx].sh_offset;
+ pr_debug("Applying relocate section %s to %u\n",
+ shstrtab + relsec->sh_name, relsec->sh_info);
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ for (i = 0; i < relsec->sh_size / sizeof(*rel); i++) {
/*
* rel[i].r_offset contains byte offset from beginning
* of section to the storage unit affected.
*
- * This is location to update (->sh_offset). This is temporary
- * buffer where section is currently loaded. This will finally
- * be loaded to a different address later, pointed to by
+ * This is location to update. This is temporary buffer
+ * where section is currently loaded. This will finally be
+ * loaded to a different address later, pointed to by
* ->sh_addr. kexec takes care of moving it
* (kexec_load_segment()).
*/
- location = (void *)(section->sh_offset + rel[i].r_offset);
+ location = pi->purgatory_buf;
+ location += section->sh_offset;
+ location += rel[i].r_offset;
/* Final address of the location */
address = section->sh_addr + rel[i].r_offset;
@@ -491,8 +436,8 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
* to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get
* these respectively.
*/
- sym = (Elf64_Sym *)symtabsec->sh_offset +
- ELF64_R_SYM(rel[i].r_info);
+ sym = (void *)pi->ehdr + symtabsec->sh_offset;
+ sym += ELF64_R_SYM(rel[i].r_info);
if (sym->st_name)
name = strtab + sym->st_name;
@@ -515,12 +460,12 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
if (sym->st_shndx == SHN_ABS)
sec_base = 0;
- else if (sym->st_shndx >= ehdr->e_shnum) {
+ else if (sym->st_shndx >= pi->ehdr->e_shnum) {
pr_err("Invalid section %d for symbol %s\n",
sym->st_shndx, name);
return -ENOEXEC;
} else
- sec_base = sechdrs[sym->st_shndx].sh_addr;
+ sec_base = pi->sechdrs[sym->st_shndx].sh_addr;
value = sym->st_value;
value += sec_base;
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index df92605d8724..14c057f29979 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -26,7 +26,7 @@ static inline void signal_compat_build_tests(void)
* new fields are handled in copy_siginfo_to_user32()!
*/
BUILD_BUG_ON(NSIGILL != 11);
- BUILD_BUG_ON(NSIGFPE != 14);
+ BUILD_BUG_ON(NSIGFPE != 15);
BUILD_BUG_ON(NSIGSEGV != 7);
BUILD_BUG_ON(NSIGBUS != 5);
BUILD_BUG_ON(NSIGTRAP != 4);
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 476d810639a8..b45f5aaefd74 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -27,8 +27,20 @@ EXPORT_SYMBOL(get_cpu_entry_area);
void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
{
unsigned long va = (unsigned long) cea_vaddr;
+ pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
- set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags));
+ /*
+ * The cpu_entry_area is shared between the user and kernel
+ * page tables. All of its ptes can safely be global.
+ * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
+ * non-present PTEs, so be careful not to set it in that
+ * case to avoid confusion.
+ */
+ if (boot_cpu_has(X86_FEATURE_PGE) &&
+ (pgprot_val(flags) & _PAGE_PRESENT))
+ pte = pte_set_flags(pte, _PAGE_GLOBAL);
+
+ set_pte_vaddr(va, pte);
}
static void __init
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index 9aa22be8331e..a2f0c7e20fb0 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -98,6 +98,9 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
if (!info->kernpg_flag)
info->kernpg_flag = _KERNPG_TABLE;
+ /* Filter out unsupported __PAGE_KERNEL_* bits: */
+ info->kernpg_flag &= __default_kernel_pte_mask;
+
for (; addr < end; addr = next) {
pgd_t *pgd = pgd_page + pgd_index(addr);
p4d_t *p4d;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 82f5252c723a..fec82b577c18 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -161,12 +161,6 @@ struct map_range {
static int page_size_mask;
-static void enable_global_pages(void)
-{
- if (!static_cpu_has(X86_FEATURE_PTI))
- __supported_pte_mask |= _PAGE_GLOBAL;
-}
-
static void __init probe_page_size_mask(void)
{
/*
@@ -187,9 +181,15 @@ static void __init probe_page_size_mask(void)
__supported_pte_mask &= ~_PAGE_GLOBAL;
if (boot_cpu_has(X86_FEATURE_PGE)) {
cr4_set_bits_and_update_boot(X86_CR4_PGE);
- enable_global_pages();
+ __supported_pte_mask |= _PAGE_GLOBAL;
}
+ /* By the default is everything supported: */
+ __default_kernel_pte_mask = __supported_pte_mask;
+ /* Except when with PTI where the kernel is mostly non-Global: */
+ if (cpu_feature_enabled(X86_FEATURE_PTI))
+ __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
+
/* Enable 1 GB linear kernel mappings if available: */
if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
printk(KERN_INFO "Using GB pages for direct mapping\n");
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8008db2bddb3..c893c6a3d707 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -558,8 +558,14 @@ static void __init pagetable_init(void)
permanent_kmaps_init(pgd_base);
}
-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
+#define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL)
+/* Bits supported by the hardware: */
+pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK;
+/* Bits allowed in normal kernel mappings: */
+pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK;
EXPORT_SYMBOL_GPL(__supported_pte_mask);
+/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
+EXPORT_SYMBOL(__default_kernel_pte_mask);
/* user-defined highmem size */
static unsigned int highmem_pages = -1;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 66de40e45f58..0a400606dea0 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -65,8 +65,13 @@
* around without checking the pgd every time.
*/
+/* Bits supported by the hardware: */
pteval_t __supported_pte_mask __read_mostly = ~0;
+/* Bits allowed in normal kernel mappings: */
+pteval_t __default_kernel_pte_mask __read_mostly = ~0;
EXPORT_SYMBOL_GPL(__supported_pte_mask);
+/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
+EXPORT_SYMBOL(__default_kernel_pte_mask);
int force_personality32;
@@ -1286,6 +1291,12 @@ void mark_rodata_ro(void)
(unsigned long) __va(__pa_symbol(_sdata)));
debug_checkwx();
+
+ /*
+ * Do this after all of the manipulation of the
+ * kernel text page tables are complete.
+ */
+ pti_clone_kernel_text();
}
int kern_addr_valid(unsigned long addr)
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index ada98b39b8ad..b3294d36769d 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -44,6 +44,9 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
return ret;
*prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm));
+ /* Filter out unsupported __PAGE_KERNEL* bits: */
+ pgprot_val(*prot) &= __default_kernel_pte_mask;
+
return 0;
}
EXPORT_SYMBOL_GPL(iomap_create_wc);
@@ -88,6 +91,9 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
prot = __pgprot(__PAGE_KERNEL |
cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
+ /* Filter out unsupported __PAGE_KERNEL* bits: */
+ pgprot_val(prot) &= __default_kernel_pte_mask;
+
return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
}
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index e2db83bebc3b..c63a545ec199 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -816,6 +816,9 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
}
pte = early_ioremap_pte(addr);
+ /* Sanitize 'prot' against any unsupported bits: */
+ pgprot_val(flags) &= __default_kernel_pte_mask;
+
if (pgprot_val(flags))
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
else
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index d8ff013ea9d0..980dbebd0ca7 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -269,6 +269,12 @@ void __init kasan_early_init(void)
pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
+ /* Mask out unsupported __PAGE_KERNEL bits: */
+ pte_val &= __default_kernel_pte_mask;
+ pmd_val &= __default_kernel_pte_mask;
+ pud_val &= __default_kernel_pte_mask;
+ p4d_val &= __default_kernel_pte_mask;
+
for (i = 0; i < PTRS_PER_PTE; i++)
kasan_zero_pte[i] = __pte(pte_val);
@@ -371,7 +377,13 @@ void __init kasan_init(void)
*/
memset(kasan_zero_page, 0, PAGE_SIZE);
for (i = 0; i < PTRS_PER_PTE; i++) {
- pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC);
+ pte_t pte;
+ pgprot_t prot;
+
+ prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
+ pgprot_val(prot) &= __default_kernel_pte_mask;
+
+ pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot));
set_pte(&kasan_zero_pte[i], pte);
}
/* Flush TLBs again to be sure that write protection applied. */
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 85cf12219dea..0f3d50f4c48c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -298,9 +298,11 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
/*
* The .rodata section needs to be read-only. Using the pfn
- * catches all aliases.
+ * catches all aliases. This also includes __ro_after_init,
+ * so do not enforce until kernel_set_to_readonly is true.
*/
- if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
+ if (kernel_set_to_readonly &&
+ within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
__pa_symbol(__end_rodata) >> PAGE_SHIFT))
pgprot_val(forbidden) |= _PAGE_RW;
@@ -512,6 +514,23 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
#endif
}
+static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot)
+{
+ /*
+ * _PAGE_GLOBAL means "global page" for present PTEs.
+ * But, it is also used to indicate _PAGE_PROTNONE
+ * for non-present PTEs.
+ *
+ * This ensures that a _PAGE_GLOBAL PTE going from
+ * present to non-present is not confused as
+ * _PAGE_PROTNONE.
+ */
+ if (!(pgprot_val(prot) & _PAGE_PRESENT))
+ pgprot_val(prot) &= ~_PAGE_GLOBAL;
+
+ return prot;
+}
+
static int
try_preserve_large_page(pte_t *kpte, unsigned long address,
struct cpa_data *cpa)
@@ -566,6 +585,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* up accordingly.
*/
old_pte = *kpte;
+ /* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */
req_prot = pgprot_large_2_4k(old_prot);
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
@@ -577,19 +597,9 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* different bit positions in the two formats.
*/
req_prot = pgprot_4k_2_large(req_prot);
-
- /*
- * Set the PSE and GLOBAL flags only if the PRESENT flag is
- * set otherwise pmd_present/pmd_huge will return true even on
- * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
- * for the ancient hardware that doesn't support it.
- */
+ req_prot = pgprot_clear_protnone_bits(req_prot);
if (pgprot_val(req_prot) & _PAGE_PRESENT)
- pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
- else
- pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
-
- req_prot = canon_pgprot(req_prot);
+ pgprot_val(req_prot) |= _PAGE_PSE;
/*
* old_pfn points to the large page base pfn. So we need
@@ -674,8 +684,12 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
switch (level) {
case PG_LEVEL_2M:
ref_prot = pmd_pgprot(*(pmd_t *)kpte);
- /* clear PSE and promote PAT bit to correct position */
+ /*
+ * Clear PSE (aka _PAGE_PAT) and move
+ * PAT bit to correct position.
+ */
ref_prot = pgprot_large_2_4k(ref_prot);
+
ref_pfn = pmd_pfn(*(pmd_t *)kpte);
break;
@@ -698,23 +712,14 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
return 1;
}
- /*
- * Set the GLOBAL flags only if the PRESENT flag is set
- * otherwise pmd/pte_present will return true even on a non
- * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL
- * for the ancient hardware that doesn't support it.
- */
- if (pgprot_val(ref_prot) & _PAGE_PRESENT)
- pgprot_val(ref_prot) |= _PAGE_GLOBAL;
- else
- pgprot_val(ref_prot) &= ~_PAGE_GLOBAL;
+ ref_prot = pgprot_clear_protnone_bits(ref_prot);
/*
* Get the target pfn from the original entry:
*/
pfn = ref_pfn;
for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
- set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));
+ set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
if (virt_addr_valid(address)) {
unsigned long pfn = PFN_DOWN(__pa(address));
@@ -930,19 +935,7 @@ static void populate_pte(struct cpa_data *cpa,
pte = pte_offset_kernel(pmd, start);
- /*
- * Set the GLOBAL flags only if the PRESENT flag is
- * set otherwise pte_present will return true even on
- * a non present pte. The canon_pgprot will clear
- * _PAGE_GLOBAL for the ancient hardware that doesn't
- * support it.
- */
- if (pgprot_val(pgprot) & _PAGE_PRESENT)
- pgprot_val(pgprot) |= _PAGE_GLOBAL;
- else
- pgprot_val(pgprot) &= ~_PAGE_GLOBAL;
-
- pgprot = canon_pgprot(pgprot);
+ pgprot = pgprot_clear_protnone_bits(pgprot);
while (num_pages-- && start < end) {
set_pte(pte, pfn_pte(cpa->pfn, pgprot));
@@ -1234,24 +1227,14 @@ repeat:
new_prot = static_protections(new_prot, address, pfn);
- /*
- * Set the GLOBAL flags only if the PRESENT flag is
- * set otherwise pte_present will return true even on
- * a non present pte. The canon_pgprot will clear
- * _PAGE_GLOBAL for the ancient hardware that doesn't
- * support it.
- */
- if (pgprot_val(new_prot) & _PAGE_PRESENT)
- pgprot_val(new_prot) |= _PAGE_GLOBAL;
- else
- pgprot_val(new_prot) &= ~_PAGE_GLOBAL;
+ new_prot = pgprot_clear_protnone_bits(new_prot);
/*
* We need to keep the pfn from the existing PTE,
* after all we're only going to change it's attributes
* not the memory it points to
*/
- new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
+ new_pte = pfn_pte(pfn, new_prot);
cpa->pfn = pfn;
/*
* Do we really change anything ?
@@ -1428,11 +1411,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
memset(&cpa, 0, sizeof(cpa));
/*
- * Check, if we are requested to change a not supported
- * feature:
+ * Check, if we are requested to set a not supported
+ * feature. Clearing non-supported features is OK.
*/
mask_set = canon_pgprot(mask_set);
- mask_clr = canon_pgprot(mask_clr);
+
if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
return 0;
@@ -1775,6 +1758,12 @@ int set_memory_4k(unsigned long addr, int numpages)
__pgprot(0), 1, 0, NULL);
}
+int set_memory_nonglobal(unsigned long addr, int numpages)
+{
+ return change_page_attr_clear(&addr, numpages,
+ __pgprot(_PAGE_GLOBAL), 0);
+}
+
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
{
struct cpa_data cpa;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 34cda7e0551b..ffc8c13c50e4 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/gfp.h>
+#include <linux/hugetlb.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
@@ -583,6 +584,9 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
pgprot_t flags)
{
+ /* Sanitize 'prot' against any unsupported bits: */
+ pgprot_val(flags) &= __default_kernel_pte_mask;
+
__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
}
@@ -636,6 +640,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
(mtrr != MTRR_TYPE_WRBACK))
return 0;
+ /* Bail out if we are we on a populated non-leaf entry: */
+ if (pud_present(*pud) && !pud_huge(*pud))
+ return 0;
+
prot = pgprot_4k_2_large(prot);
set_pte((pte_t *)pud, pfn_pte(
@@ -664,6 +672,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
return 0;
}
+ /* Bail out if we are we on a populated non-leaf entry: */
+ if (pmd_present(*pmd) && !pmd_huge(*pmd))
+ return 0;
+
prot = pgprot_4k_2_large(prot);
set_pte((pte_t *)pmd, pfn_pte(
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 631507f0c198..f1fd52f449e0 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -66,12 +66,22 @@ static void __init pti_print_if_secure(const char *reason)
pr_info("%s\n", reason);
}
+enum pti_mode {
+ PTI_AUTO = 0,
+ PTI_FORCE_OFF,
+ PTI_FORCE_ON
+} pti_mode;
+
void __init pti_check_boottime_disable(void)
{
char arg[5];
int ret;
+ /* Assume mode is auto unless overridden. */
+ pti_mode = PTI_AUTO;
+
if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
+ pti_mode = PTI_FORCE_OFF;
pti_print_if_insecure("disabled on XEN PV.");
return;
}
@@ -79,18 +89,23 @@ void __init pti_check_boottime_disable(void)
ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
if (ret > 0) {
if (ret == 3 && !strncmp(arg, "off", 3)) {
+ pti_mode = PTI_FORCE_OFF;
pti_print_if_insecure("disabled on command line.");
return;
}
if (ret == 2 && !strncmp(arg, "on", 2)) {
+ pti_mode = PTI_FORCE_ON;
pti_print_if_secure("force enabled on command line.");
goto enable;
}
- if (ret == 4 && !strncmp(arg, "auto", 4))
+ if (ret == 4 && !strncmp(arg, "auto", 4)) {
+ pti_mode = PTI_AUTO;
goto autosel;
+ }
}
if (cmdline_find_option_bool(boot_command_line, "nopti")) {
+ pti_mode = PTI_FORCE_OFF;
pti_print_if_insecure("disabled on command line.");
return;
}
@@ -149,7 +164,7 @@ pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
*
* Returns a pointer to a P4D on success, or NULL on failure.
*/
-static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
+static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
{
pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
@@ -177,7 +192,7 @@ static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
*
* Returns a pointer to a PMD on success, or NULL on failure.
*/
-static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
+static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
@@ -267,7 +282,7 @@ static void __init pti_setup_vsyscall(void)
static void __init pti_setup_vsyscall(void) { }
#endif
-static void __init
+static void
pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
{
unsigned long addr;
@@ -300,6 +315,27 @@ pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
return;
/*
+ * Only clone present PMDs. This ensures only setting
+ * _PAGE_GLOBAL on present PMDs. This should only be
+ * called on well-known addresses anyway, so a non-
+ * present PMD would be a surprise.
+ */
+ if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
+ return;
+
+ /*
+ * Setting 'target_pmd' below creates a mapping in both
+ * the user and kernel page tables. It is effectively
+ * global, so set it as global in both copies. Note:
+ * the X86_FEATURE_PGE check is not _required_ because
+ * the CPU ignores _PAGE_GLOBAL when PGE is not
+ * supported. The check keeps consistentency with
+ * code that only set this bit when supported.
+ */
+ if (boot_cpu_has(X86_FEATURE_PGE))
+ *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
+
+ /*
* Copy the PMD. That is, the kernelmode and usermode
* tables will share the last-level page tables of this
* address range
@@ -348,7 +384,83 @@ static void __init pti_clone_entry_text(void)
{
pti_clone_pmds((unsigned long) __entry_text_start,
(unsigned long) __irqentry_text_end,
- _PAGE_RW | _PAGE_GLOBAL);
+ _PAGE_RW);
+}
+
+/*
+ * Global pages and PCIDs are both ways to make kernel TLB entries
+ * live longer, reduce TLB misses and improve kernel performance.
+ * But, leaving all kernel text Global makes it potentially accessible
+ * to Meltdown-style attacks which make it trivial to find gadgets or
+ * defeat KASLR.
+ *
+ * Only use global pages when it is really worth it.
+ */
+static inline bool pti_kernel_image_global_ok(void)
+{
+ /*
+ * Systems with PCIDs get litlle benefit from global
+ * kernel text and are not worth the downsides.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_PCID))
+ return false;
+
+ /*
+ * Only do global kernel image for pti=auto. Do the most
+ * secure thing (not global) if pti=on specified.
+ */
+ if (pti_mode != PTI_AUTO)
+ return false;
+
+ /*
+ * K8 may not tolerate the cleared _PAGE_RW on the userspace
+ * global kernel image pages. Do the safe thing (disable
+ * global kernel image). This is unlikely to ever be
+ * noticed because PTI is disabled by default on AMD CPUs.
+ */
+ if (boot_cpu_has(X86_FEATURE_K8))
+ return false;
+
+ return true;
+}
+
+/*
+ * For some configurations, map all of kernel text into the user page
+ * tables. This reduces TLB misses, especially on non-PCID systems.
+ */
+void pti_clone_kernel_text(void)
+{
+ unsigned long start = PFN_ALIGN(_text);
+ unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
+
+ if (!pti_kernel_image_global_ok())
+ return;
+
+ pti_clone_pmds(start, end, _PAGE_RW);
+}
+
+/*
+ * This is the only user for it and it is not arch-generic like
+ * the other set_memory.h functions. Just extern it.
+ */
+extern int set_memory_nonglobal(unsigned long addr, int numpages);
+void pti_set_kernel_image_nonglobal(void)
+{
+ /*
+ * The identity map is created with PMDs, regardless of the
+ * actual length of the kernel. We need to clear
+ * _PAGE_GLOBAL up to a PMD boundary, not just to the end
+ * of the image.
+ */
+ unsigned long start = PFN_ALIGN(_text);
+ unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
+
+ if (pti_kernel_image_global_ok())
+ return;
+
+ pr_debug("set kernel image non-global\n");
+
+ set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
}
/*
@@ -362,6 +474,10 @@ void __init pti_init(void)
pr_info("enabled\n");
pti_clone_user_shared();
+
+ /* Undo all global bits from the init pagetables in head_64.S: */
+ pti_set_kernel_image_nonglobal();
+ /* Replace some of the global bits just for shared entry text: */
pti_clone_entry_text();
pti_setup_espfix64();
pti_setup_vsyscall();
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 74a532989308..48b14b534897 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -51,6 +51,12 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
pmd_t *pmd;
pud_t *pud;
p4d_t *p4d = NULL;
+ pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
+ pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
+
+ /* Filter out unsupported __PAGE_KERNEL* bits: */
+ pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
+ pgprot_val(pgtable_prot) &= __default_kernel_pte_mask;
/*
* The new mapping only has to cover the page containing the image
@@ -81,15 +87,19 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
return -ENOMEM;
set_pmd(pmd + pmd_index(restore_jump_address),
- __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
+ __pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
set_pud(pud + pud_index(restore_jump_address),
- __pud(__pa(pmd) | _KERNPG_TABLE));
+ __pud(__pa(pmd) | pgprot_val(pgtable_prot)));
if (p4d) {
- set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE));
- set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE));
+ p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
+ pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
+
+ set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
+ set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
} else {
/* No p4d for 4-level paging: point the pgd to the pud page table */
- set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(pud) | _KERNPG_TABLE));
+ pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
+ set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
}
return 0;
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index d70c15de417b..2e9ee023e6bc 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
targets += $(purgatory-y)
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
+$(obj)/sha256.o: $(srctree)/lib/sha256.c
+ $(call if_changed_rule,cc_o_c)
+
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
targets += purgatory.ro
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index 470edad96bb9..025c34ac0d84 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -11,9 +11,9 @@
*/
#include <linux/bug.h>
+#include <linux/sha256.h>
#include <asm/purgatory.h>
-#include "sha256.h"
#include "../boot/string.h"
unsigned long purgatory_backup_dest __section(.kexec-purgatory);
diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c
index d886b1fa36f0..795ca4f2cb3c 100644
--- a/arch/x86/purgatory/string.c
+++ b/arch/x86/purgatory/string.c
@@ -10,4 +10,16 @@
* Version 2. See the file COPYING for more details.
*/
+#include <linux/types.h>
+
#include "../boot/string.c"
+
+void *memcpy(void *dst, const void *src, size_t len)
+{
+ return __builtin_memcpy(dst, src, len);
+}
+
+void *memset(void *dst, int c, size_t len)
+{
+ return __builtin_memset(dst, c, len);
+}
diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
index 1518d2805ae8..27361cbb7ca9 100644
--- a/arch/x86/um/stub_segv.c
+++ b/arch/x86/um/stub_segv.c
@@ -6,11 +6,12 @@
#include <sysdep/stub.h>
#include <sysdep/faultinfo.h>
#include <sysdep/mcontext.h>
+#include <sys/ucontext.h>
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig, siginfo_t *info, void *p)
{
- struct ucontext *uc = p;
+ ucontext_t *uc = p;
GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
&uc->uc_mcontext);
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index 2163888497d3..5e53bfbe5823 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -112,7 +112,7 @@ static int xen_madt_oem_check(char *oem_id, char *oem_table_id)
return xen_pv_domain();
}
-static int xen_id_always_valid(int apicid)
+static int xen_id_always_valid(u32 apicid)
{
return 1;
}
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 3c2c2530737e..c36d23aa6c35 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1259,10 +1259,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
*/
__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
- /* Work out if we support NX */
- get_cpu_cap(&boot_cpu_data);
- x86_configure_nx();
-
/* Get mfn list */
xen_build_dynamic_phys_to_machine();
@@ -1272,6 +1268,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
*/
xen_setup_gdt(0);
+ /* Work out if we support NX */
+ get_cpu_cap(&boot_cpu_data);
+ x86_configure_nx();
+
xen_init_irq_ops();
/* Let's presume PV guests always boot on vCPU with id 0. */
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index c0c756c76afe..2e20ae2fa2d6 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -425,6 +425,7 @@ static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
* data back is to call:
*/
tick_nohz_idle_enter();
+ tick_nohz_idle_stop_tick_protected();
cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 96f26e026783..5077ead5e59c 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -89,7 +89,9 @@ END(hypercall_page)
ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,
.ascii "!writable_page_tables|pae_pgdir_above_4gb")
ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
- .long (1 << XENFEAT_writable_page_tables) | (1 << XENFEAT_dom0))
+ .long (1 << XENFEAT_writable_page_tables) | \
+ (1 << XENFEAT_dom0) | \
+ (1 << XENFEAT_linux_rsdp_unrestricted))
ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes")
ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID,
diff --git a/block/blk-core.c b/block/blk-core.c
index abcb8684ba67..806ce2442819 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2385,8 +2385,20 @@ blk_qc_t generic_make_request(struct bio *bio)
* yet.
*/
struct bio_list bio_list_on_stack[2];
+ blk_mq_req_flags_t flags = 0;
+ struct request_queue *q = bio->bi_disk->queue;
blk_qc_t ret = BLK_QC_T_NONE;
+ if (bio->bi_opf & REQ_NOWAIT)
+ flags = BLK_MQ_REQ_NOWAIT;
+ if (blk_queue_enter(q, flags) < 0) {
+ if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
+ bio_wouldblock_error(bio);
+ else
+ bio_io_error(bio);
+ return ret;
+ }
+
if (!generic_make_request_checks(bio))
goto out;
@@ -2423,11 +2435,22 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack;
do {
- struct request_queue *q = bio->bi_disk->queue;
- blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
- BLK_MQ_REQ_NOWAIT : 0;
+ bool enter_succeeded = true;
+
+ if (unlikely(q != bio->bi_disk->queue)) {
+ if (q)
+ blk_queue_exit(q);
+ q = bio->bi_disk->queue;
+ flags = 0;
+ if (bio->bi_opf & REQ_NOWAIT)
+ flags = BLK_MQ_REQ_NOWAIT;
+ if (blk_queue_enter(q, flags) < 0) {
+ enter_succeeded = false;
+ q = NULL;
+ }
+ }
- if (likely(blk_queue_enter(q, flags) == 0)) {
+ if (enter_succeeded) {
struct bio_list lower, same;
/* Create a fresh bio_list for all subordinate requests */
@@ -2435,8 +2458,6 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
- blk_queue_exit(q);
-
/* sort new bios into those for a lower level
* and those for the same level
*/
@@ -2463,6 +2484,8 @@ blk_qc_t generic_make_request(struct bio *bio)
current->bio_list = NULL; /* deactivate */
out:
+ if (q)
+ blk_queue_exit(q);
return ret;
}
EXPORT_SYMBOL(generic_make_request);
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 9f8cffc8a701..3eb169f15842 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -16,11 +16,6 @@
static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
{
- /*
- * Non present CPU will be mapped to queue index 0.
- */
- if (!cpu_present(cpu))
- return 0;
return cpu % nr_queues;
}
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 58b3b79cbe83..3080e18cb859 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -235,7 +235,6 @@ static const char *const hctx_state_name[] = {
HCTX_STATE_NAME(STOPPED),
HCTX_STATE_NAME(TAG_ACTIVE),
HCTX_STATE_NAME(SCHED_RESTART),
- HCTX_STATE_NAME(START_ON_RUN),
};
#undef HCTX_STATE_NAME
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f5c7dbcb954f..0dc9e341c2a7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1180,7 +1180,12 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
struct blk_mq_queue_data bd;
rq = list_first_entry(list, struct request, queuelist);
- if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
+
+ hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
+ if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
+ break;
+
+ if (!blk_mq_get_driver_tag(rq, NULL, false)) {
/*
* The initial allocation attempt failed, so we need to
* rerun the hardware queue when a tag is freed. The
@@ -1189,8 +1194,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
* we'll re-run it below.
*/
if (!blk_mq_mark_tag_wait(&hctx, rq)) {
- if (got_budget)
- blk_mq_put_dispatch_budget(hctx);
+ blk_mq_put_dispatch_budget(hctx);
/*
* For non-shared tags, the RESTART check
* will suffice.
@@ -1201,11 +1205,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
}
}
- if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
- blk_mq_put_driver_tag(rq);
- break;
- }
-
list_del_init(&rq->queuelist);
bd.rq = rq;
@@ -1336,6 +1335,15 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
hctx_unlock(hctx, srcu_idx);
}
+static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
+{
+ int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
+
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(hctx->cpumask);
+ return cpu;
+}
+
/*
* It'd be great if the workqueue API had a way to pass
* in a mask and had some smarts for more clever placement.
@@ -1345,26 +1353,17 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{
bool tried = false;
+ int next_cpu = hctx->next_cpu;
if (hctx->queue->nr_hw_queues == 1)
return WORK_CPU_UNBOUND;
if (--hctx->next_cpu_batch <= 0) {
- int next_cpu;
select_cpu:
- next_cpu = cpumask_next_and(hctx->next_cpu, hctx->cpumask,
+ next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
cpu_online_mask);
if (next_cpu >= nr_cpu_ids)
- next_cpu = cpumask_first_and(hctx->cpumask,cpu_online_mask);
-
- /*
- * No online CPU is found, so have to make sure hctx->next_cpu
- * is set correctly for not breaking workqueue.
- */
- if (next_cpu >= nr_cpu_ids)
- hctx->next_cpu = cpumask_first(hctx->cpumask);
- else
- hctx->next_cpu = next_cpu;
+ next_cpu = blk_mq_first_mapped_cpu(hctx);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
@@ -1372,7 +1371,7 @@ select_cpu:
* Do unbound schedule if we can't find a online CPU for this hctx,
* and it should only happen in the path of handling CPU DEAD.
*/
- if (!cpu_online(hctx->next_cpu)) {
+ if (!cpu_online(next_cpu)) {
if (!tried) {
tried = true;
goto select_cpu;
@@ -1382,18 +1381,18 @@ select_cpu:
* Make sure to re-select CPU next time once after CPUs
* in hctx->cpumask become online again.
*/
+ hctx->next_cpu = next_cpu;
hctx->next_cpu_batch = 1;
return WORK_CPU_UNBOUND;
}
- return hctx->next_cpu;
+
+ hctx->next_cpu = next_cpu;
+ return next_cpu;
}
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
unsigned long msecs)
{
- if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
- return;
-
if (unlikely(blk_mq_hctx_stopped(hctx)))
return;
@@ -1560,40 +1559,14 @@ static void blk_mq_run_work_fn(struct work_struct *work)
hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
/*
- * If we are stopped, don't run the queue. The exception is if
- * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
- * the STOPPED bit and run it.
+ * If we are stopped, don't run the queue.
*/
- if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
- if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
- return;
-
- clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
+ if (test_bit(BLK_MQ_S_STOPPED, &hctx->state))
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
- }
__blk_mq_run_hw_queue(hctx);
}
-
-void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
-{
- if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
- return;
-
- /*
- * Stop the hw queue, then modify currently delayed work.
- * This should prevent us from running the queue prematurely.
- * Mark the queue as auto-clearing STOPPED when it runs.
- */
- blk_mq_stop_hw_queue(hctx);
- set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
- kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
- &hctx->run_work,
- msecs_to_jiffies(msecs));
-}
-EXPORT_SYMBOL(blk_mq_delay_queue);
-
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
struct request *rq,
bool at_head)
@@ -1804,11 +1777,11 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
if (q->elevator && !bypass_insert)
goto insert;
- if (!blk_mq_get_driver_tag(rq, NULL, false))
+ if (!blk_mq_get_dispatch_budget(hctx))
goto insert;
- if (!blk_mq_get_dispatch_budget(hctx)) {
- blk_mq_put_driver_tag(rq);
+ if (!blk_mq_get_driver_tag(rq, NULL, false)) {
+ blk_mq_put_dispatch_budget(hctx);
goto insert;
}
@@ -2356,7 +2329,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
static void blk_mq_map_swqueue(struct request_queue *q)
{
- unsigned int i, hctx_idx;
+ unsigned int i;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
@@ -2373,23 +2346,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
/*
* Map software to hardware queues.
- *
- * If the cpu isn't present, the cpu is mapped to first hctx.
*/
for_each_possible_cpu(i) {
- hctx_idx = q->mq_map[i];
- /* unmapped hw queue can be remapped after CPU topo changed */
- if (!set->tags[hctx_idx] &&
- !__blk_mq_alloc_rq_map(set, hctx_idx)) {
- /*
- * If tags initialization fail for some hctx,
- * that hctx won't be brought online. In this
- * case, remap the current ctx to hctx[0] which
- * is guaranteed to always have tags allocated
- */
- q->mq_map[i] = 0;
- }
-
ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = blk_mq_map_queue(q, i);
@@ -2401,21 +2359,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
mutex_unlock(&q->sysfs_lock);
queue_for_each_hw_ctx(q, hctx, i) {
- /*
- * If no software queues are mapped to this hardware queue,
- * disable it and free the request entries.
- */
- if (!hctx->nr_ctx) {
- /* Never unmap queue 0. We need it as a
- * fallback in case of a new remap fails
- * allocation
- */
- if (i && set->tags[i])
- blk_mq_free_map_and_requests(set, i);
-
- hctx->tags = NULL;
- continue;
- }
+ /* every hctx should get mapped by at least one CPU */
+ WARN_ON(!hctx->nr_ctx);
hctx->tags = set->tags[i];
WARN_ON(!hctx->tags);
@@ -2430,8 +2375,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
/*
* Initialize batch roundrobin counts
*/
- hctx->next_cpu = cpumask_first_and(hctx->cpumask,
- cpu_online_mask);
+ hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
}
diff --git a/crypto/.gitignore b/crypto/.gitignore
deleted file mode 100644
index ee328374dba8..000000000000
--- a/crypto/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*-asn1.[ch]
diff --git a/crypto/Makefile b/crypto/Makefile
index 4fc69fe94e6a..3a5f01616f74 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -35,14 +35,12 @@ dh_generic-y := dh.o
dh_generic-y += dh_helper.o
obj-$(CONFIG_CRYPTO_DH) += dh_generic.o
-$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
-$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
-$(obj)/rsa_helper.o: $(obj)/rsapubkey-asn1.h $(obj)/rsaprivkey-asn1.h
-clean-files += rsapubkey-asn1.c rsapubkey-asn1.h
-clean-files += rsaprivkey-asn1.c rsaprivkey-asn1.h
-
-rsa_generic-y := rsapubkey-asn1.o
-rsa_generic-y += rsaprivkey-asn1.o
+$(obj)/rsapubkey.asn1.o: $(obj)/rsapubkey.asn1.c $(obj)/rsapubkey.asn1.h
+$(obj)/rsaprivkey.asn1.o: $(obj)/rsaprivkey.asn1.c $(obj)/rsaprivkey.asn1.h
+$(obj)/rsa_helper.o: $(obj)/rsapubkey.asn1.h $(obj)/rsaprivkey.asn1.h
+
+rsa_generic-y := rsapubkey.asn1.o
+rsa_generic-y += rsaprivkey.asn1.o
rsa_generic-y += rsa.o
rsa_generic-y += rsa_helper.o
rsa_generic-y += rsa-pkcs1pad.o
diff --git a/crypto/asymmetric_keys/.gitignore b/crypto/asymmetric_keys/.gitignore
deleted file mode 100644
index ee328374dba8..000000000000
--- a/crypto/asymmetric_keys/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*-asn1.[ch]
diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
index 4719aad5dec0..d4b2e1b2dc65 100644
--- a/crypto/asymmetric_keys/Makefile
+++ b/crypto/asymmetric_keys/Makefile
@@ -17,35 +17,30 @@ obj-$(CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE) += public_key.o
#
obj-$(CONFIG_X509_CERTIFICATE_PARSER) += x509_key_parser.o
x509_key_parser-y := \
- x509-asn1.o \
- x509_akid-asn1.o \
+ x509.asn1.o \
+ x509_akid.asn1.o \
x509_cert_parser.o \
x509_public_key.o
$(obj)/x509_cert_parser.o: \
- $(obj)/x509-asn1.h \
- $(obj)/x509_akid-asn1.h
+ $(obj)/x509.asn1.h \
+ $(obj)/x509_akid.asn1.h
-$(obj)/x509-asn1.o: $(obj)/x509-asn1.c $(obj)/x509-asn1.h
-$(obj)/x509_akid-asn1.o: $(obj)/x509_akid-asn1.c $(obj)/x509_akid-asn1.h
-
-clean-files += x509-asn1.c x509-asn1.h
-clean-files += x509_akid-asn1.c x509_akid-asn1.h
+$(obj)/x509.asn1.o: $(obj)/x509.asn1.c $(obj)/x509.asn1.h
+$(obj)/x509_akid.asn1.o: $(obj)/x509_akid.asn1.c $(obj)/x509_akid.asn1.h
#
# PKCS#7 message handling
#
obj-$(CONFIG_PKCS7_MESSAGE_PARSER) += pkcs7_message.o
pkcs7_message-y := \
- pkcs7-asn1.o \
+ pkcs7.asn1.o \
pkcs7_parser.o \
pkcs7_trust.o \
pkcs7_verify.o
-$(obj)/pkcs7_parser.o: $(obj)/pkcs7-asn1.h
-$(obj)/pkcs7-asn1.o: $(obj)/pkcs7-asn1.c $(obj)/pkcs7-asn1.h
-
-clean-files += pkcs7-asn1.c pkcs7-asn1.h
+$(obj)/pkcs7_parser.o: $(obj)/pkcs7.asn1.h
+$(obj)/pkcs7.asn1.o: $(obj)/pkcs7.asn1.c $(obj)/pkcs7.asn1.h
#
# PKCS#7 parser testing key
@@ -62,9 +57,7 @@ obj-$(CONFIG_SIGNED_PE_FILE_VERIFICATION) += verify_signed_pefile.o
verify_signed_pefile-y := \
verify_pefile.o \
mscode_parser.o \
- mscode-asn1.o
-
-$(obj)/mscode_parser.o: $(obj)/mscode-asn1.h $(obj)/mscode-asn1.h
-$(obj)/mscode-asn1.o: $(obj)/mscode-asn1.c $(obj)/mscode-asn1.h
+ mscode.asn1.o
-clean-files += mscode-asn1.c mscode-asn1.h
+$(obj)/mscode_parser.o: $(obj)/mscode.asn1.h $(obj)/mscode.asn1.h
+$(obj)/mscode.asn1.o: $(obj)/mscode.asn1.c $(obj)/mscode.asn1.h
diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c
index 9492e1c22d38..83d2e9b38101 100644
--- a/crypto/asymmetric_keys/mscode_parser.c
+++ b/crypto/asymmetric_keys/mscode_parser.c
@@ -16,7 +16,7 @@
#include <linux/oid_registry.h>
#include <crypto/pkcs7.h>
#include "verify_pefile.h"
-#include "mscode-asn1.h"
+#include "mscode.asn1.h"
/*
* Parse a Microsoft Individual Code Signing blob
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index a6dcaa659aa8..0f134162cef4 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -18,7 +18,7 @@
#include <linux/oid_registry.h>
#include <crypto/public_key.h>
#include "pkcs7_parser.h"
-#include "pkcs7-asn1.h"
+#include "pkcs7.asn1.h"
MODULE_DESCRIPTION("PKCS#7 parser");
MODULE_AUTHOR("Red Hat, Inc.");
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index ce2df8c9c583..7d81e6bb461a 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -17,8 +17,8 @@
#include <linux/oid_registry.h>
#include <crypto/public_key.h>
#include "x509_parser.h"
-#include "x509-asn1.h"
-#include "x509_akid-asn1.h"
+#include "x509.asn1.h"
+#include "x509_akid.asn1.h"
struct x509_parse_context {
struct x509_certificate *cert; /* Certificate being constructed */
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index cad395d70d78..efc78fe7ae2e 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -15,8 +15,8 @@
#include <linux/err.h>
#include <linux/fips.h>
#include <crypto/internal/rsa.h>
-#include "rsapubkey-asn1.h"
-#include "rsaprivkey-asn1.h"
+#include "rsapubkey.asn1.h"
+#include "rsaprivkey.asn1.h"
int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 9e702bc4960f..7a3a541046ed 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -34,6 +34,7 @@
struct iort_its_msi_chip {
struct list_head list;
struct fwnode_handle *fw_node;
+ phys_addr_t base_addr;
u32 translation_id;
};
@@ -156,14 +157,16 @@ static LIST_HEAD(iort_msi_chip_list);
static DEFINE_SPINLOCK(iort_msi_chip_lock);
/**
- * iort_register_domain_token() - register domain token and related ITS ID
- * to the list from where we can get it back later on.
+ * iort_register_domain_token() - register domain token along with related
+ * ITS ID and base address to the list from where we can get it back later on.
* @trans_id: ITS ID.
+ * @base: ITS base address.
* @fw_node: Domain token.
*
* Returns: 0 on success, -ENOMEM if no memory when allocating list element
*/
-int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node)
+int iort_register_domain_token(int trans_id, phys_addr_t base,
+ struct fwnode_handle *fw_node)
{
struct iort_its_msi_chip *its_msi_chip;
@@ -173,6 +176,7 @@ int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node)
its_msi_chip->fw_node = fw_node;
its_msi_chip->translation_id = trans_id;
+ its_msi_chip->base_addr = base;
spin_lock(&iort_msi_chip_lock);
list_add(&its_msi_chip->list, &iort_msi_chip_list);
@@ -569,6 +573,24 @@ int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
return -ENODEV;
}
+static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
+{
+ struct iort_its_msi_chip *its_msi_chip;
+ int ret = -ENODEV;
+
+ spin_lock(&iort_msi_chip_lock);
+ list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
+ if (its_msi_chip->translation_id == its_id) {
+ *base = its_msi_chip->base_addr;
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock(&iort_msi_chip_lock);
+
+ return ret;
+}
+
/**
* iort_dev_find_its_id() - Find the ITS identifier for a device
* @dev: The device.
@@ -754,6 +776,24 @@ static inline bool iort_iommu_driver_enabled(u8 type)
}
#ifdef CONFIG_IOMMU_API
+static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
+{
+ struct acpi_iort_node *iommu;
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+ iommu = iort_get_iort_node(fwspec->iommu_fwnode);
+
+ if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
+ struct acpi_iort_smmu_v3 *smmu;
+
+ smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
+ if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
+ return iommu;
+ }
+
+ return NULL;
+}
+
static inline const struct iommu_ops *iort_fwspec_iommu_ops(
struct iommu_fwspec *fwspec)
{
@@ -770,6 +810,69 @@ static inline int iort_add_device_replay(const struct iommu_ops *ops,
return err;
}
+
+/**
+ * iort_iommu_msi_get_resv_regions - Reserved region driver helper
+ * @dev: Device from iommu_get_resv_regions()
+ * @head: Reserved region list from iommu_get_resv_regions()
+ *
+ * Returns: Number of msi reserved regions on success (0 if platform
+ * doesn't require the reservation or no associated msi regions),
+ * appropriate error value otherwise. The ITS interrupt translation
+ * spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
+ * are the msi reserved regions.
+ */
+int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
+{
+ struct acpi_iort_its_group *its;
+ struct acpi_iort_node *iommu_node, *its_node = NULL;
+ int i, resv = 0;
+
+ iommu_node = iort_get_msi_resv_iommu(dev);
+ if (!iommu_node)
+ return 0;
+
+ /*
+ * Current logic to reserve ITS regions relies on HW topologies
+ * where a given PCI or named component maps its IDs to only one
+ * ITS group; if a PCI or named component can map its IDs to
+ * different ITS groups through IORT mappings this function has
+ * to be reworked to ensure we reserve regions for all ITS groups
+ * a given PCI or named component may map IDs to.
+ */
+
+ for (i = 0; i < dev->iommu_fwspec->num_ids; i++) {
+ its_node = iort_node_map_id(iommu_node,
+ dev->iommu_fwspec->ids[i],
+ NULL, IORT_MSI_TYPE);
+ if (its_node)
+ break;
+ }
+
+ if (!its_node)
+ return 0;
+
+ /* Move to ITS specific data */
+ its = (struct acpi_iort_its_group *)its_node->node_data;
+
+ for (i = 0; i < its->its_count; i++) {
+ phys_addr_t base;
+
+ if (!iort_find_its_base(its->identifiers[i], &base)) {
+ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+ struct iommu_resv_region *region;
+
+ region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
+ prot, IOMMU_RESV_MSI);
+ if (region) {
+ list_add_tail(&region->list, head);
+ resv++;
+ }
+ }
+ }
+
+ return (resv == its->its_count) ? resv : -ENODEV;
+}
#else
static inline const struct iommu_ops *iort_fwspec_iommu_ops(
struct iommu_fwspec *fwspec)
@@ -777,6 +880,8 @@ static inline const struct iommu_ops *iort_fwspec_iommu_ops(
static inline int iort_add_device_replay(const struct iommu_ops *ops,
struct device *dev)
{ return 0; }
+int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
+{ return 0; }
#endif
static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index c7cf48ad5cb9..a651ab3490d8 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -533,7 +533,7 @@ int acpi_processor_notify_smm(struct module *calling_module)
EXPORT_SYMBOL(acpi_processor_notify_smm);
-static int acpi_processor_get_psd(struct acpi_processor *pr)
+int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
{
int result = 0;
acpi_status status = AE_OK;
@@ -541,9 +541,8 @@ static int acpi_processor_get_psd(struct acpi_processor *pr)
struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
struct acpi_buffer state = {0, NULL};
union acpi_object *psd = NULL;
- struct acpi_psd_package *pdomain;
- status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
+ status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer);
if (ACPI_FAILURE(status)) {
return -ENODEV;
}
@@ -561,8 +560,6 @@ static int acpi_processor_get_psd(struct acpi_processor *pr)
goto end;
}
- pdomain = &(pr->performance->domain_info);
-
state.length = sizeof(struct acpi_psd_package);
state.pointer = pdomain;
@@ -597,6 +594,7 @@ end:
kfree(buffer.pointer);
return result;
}
+EXPORT_SYMBOL(acpi_processor_get_psd);
int acpi_processor_preregister_performance(
struct acpi_processor_performance __percpu *performance)
@@ -645,7 +643,8 @@ int acpi_processor_preregister_performance(
pr->performance = per_cpu_ptr(performance, i);
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
- if (acpi_processor_get_psd(pr)) {
+ pdomain = &(pr->performance->domain_info);
+ if (acpi_processor_get_psd(pr->handle, pdomain)) {
retval = -EINVAL;
continue;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 264abaaff662..c9d04497a415 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1103,11 +1103,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (info->lo_encrypt_type) {
unsigned int type = info->lo_encrypt_type;
- if (type >= MAX_LO_CRYPT)
- return -EINVAL;
+ if (type >= MAX_LO_CRYPT) {
+ err = -EINVAL;
+ goto exit;
+ }
xfer = xfer_funcs[type];
- if (xfer == NULL)
- return -EINVAL;
+ if (xfer == NULL) {
+ err = -EINVAL;
+ goto exit;
+ }
} else
xfer = NULL;
@@ -1283,12 +1287,13 @@ static int
loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
struct loop_info info;
struct loop_info64 info64;
- int err = 0;
+ int err;
- if (!arg)
- err = -EINVAL;
- if (!err)
- err = loop_get_status(lo, &info64);
+ if (!arg) {
+ mutex_unlock(&lo->lo_ctl_mutex);
+ return -EINVAL;
+ }
+ err = loop_get_status(lo, &info64);
if (!err)
err = loop_info64_to_old(&info64, &info);
if (!err && copy_to_user(arg, &info, sizeof(info)))
@@ -1300,12 +1305,13 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
static int
loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
struct loop_info64 info64;
- int err = 0;
+ int err;
- if (!arg)
- err = -EINVAL;
- if (!err)
- err = loop_get_status(lo, &info64);
+ if (!arg) {
+ mutex_unlock(&lo->lo_ctl_mutex);
+ return -EINVAL;
+ }
+ err = loop_get_status(lo, &info64);
if (!err && copy_to_user(arg, &info64, sizeof(info64)))
err = -EFAULT;
@@ -1529,12 +1535,13 @@ loop_get_status_compat(struct loop_device *lo,
struct compat_loop_info __user *arg)
{
struct loop_info64 info64;
- int err = 0;
+ int err;
- if (!arg)
- err = -EINVAL;
- if (!err)
- err = loop_get_status(lo, &info64);
+ if (!arg) {
+ mutex_unlock(&lo->lo_ctl_mutex);
+ return -EINVAL;
+ }
+ err = loop_get_status(lo, &info64);
if (!err)
err = loop_info64_to_compat(&info64, arg);
return err;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 7ae23b25b406..41492e980ef4 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -55,8 +55,10 @@ config COMMON_CLK_RK808
by control register.
config COMMON_CLK_HI655X
- tristate "Clock driver for Hi655x"
- depends on MFD_HI655X_PMIC || COMPILE_TEST
+ tristate "Clock driver for Hi655x" if EXPERT
+ depends on (MFD_HI655X_PMIC || COMPILE_TEST)
+ depends on REGMAP
+ default MFD_HI655X_PMIC
---help---
This driver supports the hi655x PMIC clock. This
multi-function device has one fixed-rate oscillator, clocked
@@ -101,6 +103,15 @@ config COMMON_CLK_SI514
This driver supports the Silicon Labs 514 programmable clock
generator.
+config COMMON_CLK_SI544
+ tristate "Clock driver for SiLabs 544 devices"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ ---help---
+ This driver supports the Silicon Labs 544 programmable clock
+ generator.
+
config COMMON_CLK_SI570
tristate "Clock driver for SiLabs 570 and compatible devices"
depends on I2C
@@ -248,6 +259,26 @@ config COMMON_CLK_VC5
This driver supports the IDT VersaClock 5 and VersaClock 6
programmable clock generators.
+config COMMON_CLK_STM32MP157
+ def_bool COMMON_CLK && MACH_STM32MP157
+ help
+ ---help---
+ Support for stm32mp157 SoC family clocks
+
+config COMMON_CLK_STM32F
+ bool "Clock driver for stm32f4 and stm32f7 SoC families"
+ depends on MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746
+ help
+ ---help---
+ Support for stm32f4 and stm32f7 SoC families clocks
+
+config COMMON_CLK_STM32H7
+ bool "Clock driver for stm32h7 SoC family"
+ depends on MACH_STM32H743
+ help
+ ---help---
+ Support for stm32h7 SoC family clocks
+
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
source "drivers/clk/imgtec/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 6605513eaa94..de6d06ac790b 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -45,9 +45,11 @@ obj-$(CONFIG_COMMON_CLK_SCMI) += clk-scmi.o
obj-$(CONFIG_COMMON_CLK_SCPI) += clk-scpi.o
obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o
+obj-$(CONFIG_COMMON_CLK_SI544) += clk-si544.o
obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
-obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o
-obj-$(CONFIG_ARCH_STM32) += clk-stm32h7.o
+obj-$(CONFIG_COMMON_CLK_STM32F) += clk-stm32f4.o
+obj-$(CONFIG_COMMON_CLK_STM32H7) += clk-stm32h7.o
+obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o
obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_U300) += clk-u300.o
@@ -62,6 +64,7 @@ obj-$(CONFIG_ARCH_ARTPEC) += axis/
obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/
obj-y += bcm/
obj-$(CONFIG_ARCH_BERLIN) += berlin/
+obj-$(CONFIG_ARCH_DAVINCI) += davinci/
obj-$(CONFIG_H8300) += h8300/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-y += imgtec/
@@ -89,6 +92,7 @@ obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-$(CONFIG_ARCH_SPRD) += sprd/
obj-$(CONFIG_ARCH_STI) += st/
+obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index a07f6451694a..fa0d5c8611a0 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -602,9 +602,7 @@ static void bcm2835_pll_off(struct clk_hw *hw)
const struct bcm2835_pll_data *data = pll->data;
spin_lock(&cprman->regs_lock);
- cprman_write(cprman, data->cm_ctrl_reg,
- cprman_read(cprman, data->cm_ctrl_reg) |
- CM_PLL_ANARST);
+ cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
cprman_write(cprman, data->a2w_ctrl_reg,
cprman_read(cprman, data->a2w_ctrl_reg) |
A2W_PLL_CTRL_PWRDN);
@@ -640,6 +638,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
cpu_relax();
}
+ cprman_write(cprman, data->a2w_ctrl_reg,
+ cprman_read(cprman, data->a2w_ctrl_reg) |
+ A2W_PLL_CTRL_PRST_DISABLE);
+
return 0;
}
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index e8ea81c30f0c..c58019750b7e 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -549,7 +549,7 @@ static int cs2000_resume(struct device *dev)
}
static const struct dev_pm_ops cs2000_pm_ops = {
- .resume_early = cs2000_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, cs2000_resume)
};
static struct i2c_driver cs2000_driver = {
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index b49942b9fe50..b6234a5da12d 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -28,12 +28,10 @@
* parent - fixed parent. No clk_set_parent support
*/
-#define div_mask(width) ((1 << (width)) - 1)
-
static unsigned int _get_table_maxdiv(const struct clk_div_table *table,
u8 width)
{
- unsigned int maxdiv = 0, mask = div_mask(width);
+ unsigned int maxdiv = 0, mask = clk_div_mask(width);
const struct clk_div_table *clkt;
for (clkt = table; clkt->div; clkt++)
@@ -57,12 +55,12 @@ static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
unsigned long flags)
{
if (flags & CLK_DIVIDER_ONE_BASED)
- return div_mask(width);
+ return clk_div_mask(width);
if (flags & CLK_DIVIDER_POWER_OF_TWO)
- return 1 << div_mask(width);
+ return 1 << clk_div_mask(width);
if (table)
return _get_table_maxdiv(table, width);
- return div_mask(width) + 1;
+ return clk_div_mask(width) + 1;
}
static unsigned int _get_table_div(const struct clk_div_table *table,
@@ -84,7 +82,7 @@ static unsigned int _get_div(const struct clk_div_table *table,
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return 1 << val;
if (flags & CLK_DIVIDER_MAX_AT_ZERO)
- return val ? val : div_mask(width) + 1;
+ return val ? val : clk_div_mask(width) + 1;
if (table)
return _get_table_div(table, val);
return val + 1;
@@ -109,7 +107,7 @@ static unsigned int _get_val(const struct clk_div_table *table,
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return __ffs(div);
if (flags & CLK_DIVIDER_MAX_AT_ZERO)
- return (div == div_mask(width) + 1) ? 0 : div;
+ return (div == clk_div_mask(width) + 1) ? 0 : div;
if (table)
return _get_table_val(table, div);
return div - 1;
@@ -141,7 +139,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
unsigned int val;
val = clk_readl(divider->reg) >> divider->shift;
- val &= div_mask(divider->width);
+ val &= clk_div_mask(divider->width);
return divider_recalc_rate(hw, parent_rate, val, divider->table,
divider->flags, divider->width);
@@ -344,19 +342,43 @@ long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
}
EXPORT_SYMBOL_GPL(divider_round_rate_parent);
+long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
+ unsigned long rate, unsigned long *prate,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags, unsigned int val)
+{
+ int div;
+
+ div = _get_div(table, val, flags, width);
+
+ /* Even a read-only clock can propagate a rate change */
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
+ if (!parent)
+ return -EINVAL;
+
+ *prate = clk_hw_round_rate(parent, rate * div);
+ }
+
+ return DIV_ROUND_UP_ULL((u64)*prate, div);
+}
+EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent);
+
+
static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_divider *divider = to_clk_divider(hw);
- int bestdiv;
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- bestdiv = clk_readl(divider->reg) >> divider->shift;
- bestdiv &= div_mask(divider->width);
- bestdiv = _get_div(divider->table, bestdiv, divider->flags,
- divider->width);
- return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ u32 val;
+
+ val = clk_readl(divider->reg) >> divider->shift;
+ val &= clk_div_mask(divider->width);
+
+ return divider_ro_round_rate(hw, rate, prate, divider->table,
+ divider->width, divider->flags,
+ val);
}
return divider_round_rate(hw, rate, prate, divider->table,
@@ -376,7 +398,7 @@ int divider_get_val(unsigned long rate, unsigned long parent_rate,
value = _get_val(table, div, flags, width);
- return min_t(unsigned int, value, div_mask(width));
+ return min_t(unsigned int, value, clk_div_mask(width));
}
EXPORT_SYMBOL_GPL(divider_get_val);
@@ -399,10 +421,10 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
__acquire(divider->lock);
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
- val = div_mask(divider->width) << (divider->shift + 16);
+ val = clk_div_mask(divider->width) << (divider->shift + 16);
} else {
val = clk_readl(divider->reg);
- val &= ~(div_mask(divider->width) << divider->shift);
+ val &= ~(clk_div_mask(divider->width) << divider->shift);
}
val |= (u32)value << divider->shift;
clk_writel(val, divider->reg);
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 151513c655c3..40af4fbab4d2 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -73,14 +73,14 @@ static u8 clk_gpio_mux_get_parent(struct clk_hw *hw)
{
struct clk_gpio *clk = to_clk_gpio(hw);
- return gpiod_get_value(clk->gpiod);
+ return gpiod_get_value_cansleep(clk->gpiod);
}
static int clk_gpio_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_gpio *clk = to_clk_gpio(hw);
- gpiod_set_value(clk->gpiod, index);
+ gpiod_set_value_cansleep(clk->gpiod, index);
return 0;
}
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 39cabe157163..ac4a042f8658 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -26,35 +26,24 @@
* parent - parent is adjustable through clk_set_parent
*/
-static u8 clk_mux_get_parent(struct clk_hw *hw)
+int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
+ unsigned int val)
{
- struct clk_mux *mux = to_clk_mux(hw);
int num_parents = clk_hw_get_num_parents(hw);
- u32 val;
- /*
- * FIXME need a mux-specific flag to determine if val is bitwise or numeric
- * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1
- * to 0x7 (index starts at one)
- * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
- * val = 0x4 really means "bit 2, index starts at bit 0"
- */
- val = clk_readl(mux->reg) >> mux->shift;
- val &= mux->mask;
-
- if (mux->table) {
+ if (table) {
int i;
for (i = 0; i < num_parents; i++)
- if (mux->table[i] == val)
+ if (table[i] == val)
return i;
return -EINVAL;
}
- if (val && (mux->flags & CLK_MUX_INDEX_BIT))
+ if (val && (flags & CLK_MUX_INDEX_BIT))
val = ffs(val) - 1;
- if (val && (mux->flags & CLK_MUX_INDEX_ONE))
+ if (val && (flags & CLK_MUX_INDEX_ONE))
val--;
if (val >= num_parents)
@@ -62,36 +51,58 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
return val;
}
+EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
-static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
+unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index)
{
- struct clk_mux *mux = to_clk_mux(hw);
- u32 val;
- unsigned long flags = 0;
+ unsigned int val = index;
- if (mux->table) {
- index = mux->table[index];
+ if (table) {
+ val = table[index];
} else {
- if (mux->flags & CLK_MUX_INDEX_BIT)
- index = 1 << index;
+ if (flags & CLK_MUX_INDEX_BIT)
+ val = 1 << index;
- if (mux->flags & CLK_MUX_INDEX_ONE)
- index++;
+ if (flags & CLK_MUX_INDEX_ONE)
+ val++;
}
+ return val;
+}
+EXPORT_SYMBOL_GPL(clk_mux_index_to_val);
+
+static u8 clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+
+ val = clk_readl(mux->reg) >> mux->shift;
+ val &= mux->mask;
+
+ return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
+}
+
+static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
+ unsigned long flags = 0;
+ u32 reg;
+
if (mux->lock)
spin_lock_irqsave(mux->lock, flags);
else
__acquire(mux->lock);
if (mux->flags & CLK_MUX_HIWORD_MASK) {
- val = mux->mask << (mux->shift + 16);
+ reg = mux->mask << (mux->shift + 16);
} else {
- val = clk_readl(mux->reg);
- val &= ~(mux->mask << mux->shift);
+ reg = clk_readl(mux->reg);
+ reg &= ~(mux->mask << mux->shift);
}
- val |= index << mux->shift;
- clk_writel(val, mux->reg);
+ val = val << mux->shift;
+ reg |= val;
+ clk_writel(reg, mux->reg);
if (mux->lock)
spin_unlock_irqrestore(mux->lock, flags);
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
new file mode 100644
index 000000000000..1c96a9f6c022
--- /dev/null
+++ b/drivers/clk/clk-si544.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Silicon Labs Si544 Programmable Oscillator
+ * Copyright (C) 2018 Topic Embedded Products
+ * Author: Mike Looijmans <mike.looijmans@topic.nl>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* I2C registers (decimal as in datasheet) */
+#define SI544_REG_CONTROL 7
+#define SI544_REG_OE_STATE 17
+#define SI544_REG_HS_DIV 23
+#define SI544_REG_LS_HS_DIV 24
+#define SI544_REG_FBDIV0 26
+#define SI544_REG_FBDIV8 27
+#define SI544_REG_FBDIV16 28
+#define SI544_REG_FBDIV24 29
+#define SI544_REG_FBDIV32 30
+#define SI544_REG_FBDIV40 31
+#define SI544_REG_FCAL_OVR 69
+#define SI544_REG_ADPLL_DELTA_M0 231
+#define SI544_REG_ADPLL_DELTA_M8 232
+#define SI544_REG_ADPLL_DELTA_M16 233
+#define SI544_REG_PAGE_SELECT 255
+
+/* Register values */
+#define SI544_CONTROL_RESET BIT(7)
+#define SI544_CONTROL_MS_ICAL2 BIT(3)
+
+#define SI544_OE_STATE_ODC_OE BIT(0)
+
+/* Max freq depends on speed grade */
+#define SI544_MIN_FREQ 200000U
+
+/* Si544 Internal oscilator runs at 55.05 MHz */
+#define FXO 55050000U
+
+/* VCO range is 10.8 .. 12.1 GHz, max depends on speed grade */
+#define FVCO_MIN 10800000000ULL
+
+#define HS_DIV_MAX 2046
+#define HS_DIV_MAX_ODD 33
+
+/* Lowest frequency synthesizeable using only the HS divider */
+#define MIN_HSDIV_FREQ (FVCO_MIN / HS_DIV_MAX)
+
+enum si544_speed_grade {
+ si544a,
+ si544b,
+ si544c,
+};
+
+struct clk_si544 {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ struct i2c_client *i2c_client;
+ enum si544_speed_grade speed_grade;
+};
+#define to_clk_si544(_hw) container_of(_hw, struct clk_si544, hw)
+
+/**
+ * struct clk_si544_muldiv - Multiplier/divider settings
+ * @fb_div_frac: integer part of feedback divider (32 bits)
+ * @fb_div_int: fractional part of feedback divider (11 bits)
+ * @hs_div: 1st divider, 5..2046, must be even when >33
+ * @ls_div_bits: 2nd divider, as 2^x, range 0..5
+ * If ls_div_bits is non-zero, hs_div must be even
+ */
+struct clk_si544_muldiv {
+ u32 fb_div_frac;
+ u16 fb_div_int;
+ u16 hs_div;
+ u8 ls_div_bits;
+};
+
+/* Enables or disables the output driver */
+static int si544_enable_output(struct clk_si544 *data, bool enable)
+{
+ return regmap_update_bits(data->regmap, SI544_REG_OE_STATE,
+ SI544_OE_STATE_ODC_OE, enable ? SI544_OE_STATE_ODC_OE : 0);
+}
+
+/* Retrieve clock multiplier and dividers from hardware */
+static int si544_get_muldiv(struct clk_si544 *data,
+ struct clk_si544_muldiv *settings)
+{
+ int err;
+ u8 reg[6];
+
+ err = regmap_bulk_read(data->regmap, SI544_REG_HS_DIV, reg, 2);
+ if (err)
+ return err;
+
+ settings->ls_div_bits = (reg[1] >> 4) & 0x07;
+ settings->hs_div = (reg[1] & 0x07) << 8 | reg[0];
+
+ err = regmap_bulk_read(data->regmap, SI544_REG_FBDIV0, reg, 6);
+ if (err)
+ return err;
+
+ settings->fb_div_int = reg[4] | (reg[5] & 0x07) << 8;
+ settings->fb_div_frac = reg[0] | reg[1] << 8 | reg[2] << 16 |
+ reg[3] << 24;
+ return 0;
+}
+
+static int si544_set_muldiv(struct clk_si544 *data,
+ struct clk_si544_muldiv *settings)
+{
+ int err;
+ u8 reg[6];
+
+ reg[0] = settings->hs_div;
+ reg[1] = settings->hs_div >> 8 | settings->ls_div_bits << 4;
+
+ err = regmap_bulk_write(data->regmap, SI544_REG_HS_DIV, reg, 2);
+ if (err < 0)
+ return err;
+
+ reg[0] = settings->fb_div_frac;
+ reg[1] = settings->fb_div_frac >> 8;
+ reg[2] = settings->fb_div_frac >> 16;
+ reg[3] = settings->fb_div_frac >> 24;
+ reg[4] = settings->fb_div_int;
+ reg[5] = settings->fb_div_int >> 8;
+
+ /*
+ * Writing to SI544_REG_FBDIV40 triggers the clock change, so that
+ * must be written last
+ */
+ return regmap_bulk_write(data->regmap, SI544_REG_FBDIV0, reg, 6);
+}
+
+static bool is_valid_frequency(const struct clk_si544 *data,
+ unsigned long frequency)
+{
+ unsigned long max_freq = 0;
+
+ if (frequency < SI544_MIN_FREQ)
+ return false;
+
+ switch (data->speed_grade) {
+ case si544a:
+ max_freq = 1500000000;
+ break;
+ case si544b:
+ max_freq = 800000000;
+ break;
+ case si544c:
+ max_freq = 350000000;
+ break;
+ }
+
+ return frequency <= max_freq;
+}
+
+/* Calculate divider settings for a given frequency */
+static int si544_calc_muldiv(struct clk_si544_muldiv *settings,
+ unsigned long frequency)
+{
+ u64 vco;
+ u32 ls_freq;
+ u32 tmp;
+ u8 res;
+
+ /* Determine the minimum value of LS_DIV and resulting target freq. */
+ ls_freq = frequency;
+ settings->ls_div_bits = 0;
+
+ if (frequency >= MIN_HSDIV_FREQ) {
+ settings->ls_div_bits = 0;
+ } else {
+ res = 1;
+ tmp = 2 * HS_DIV_MAX;
+ while (tmp <= (HS_DIV_MAX * 32)) {
+ if (((u64)frequency * tmp) >= FVCO_MIN)
+ break;
+ ++res;
+ tmp <<= 1;
+ }
+ settings->ls_div_bits = res;
+ ls_freq = frequency << res;
+ }
+
+ /* Determine minimum HS_DIV by rounding up */
+ vco = FVCO_MIN + ls_freq - 1;
+ do_div(vco, ls_freq);
+ settings->hs_div = vco;
+
+ /* round up to even number when required */
+ if ((settings->hs_div & 1) &&
+ (settings->hs_div > HS_DIV_MAX_ODD || settings->ls_div_bits))
+ ++settings->hs_div;
+
+ /* Calculate VCO frequency (in 10..12GHz range) */
+ vco = (u64)ls_freq * settings->hs_div;
+
+ /* Calculate the integer part of the feedback divider */
+ tmp = do_div(vco, FXO);
+ settings->fb_div_int = vco;
+
+ /* And the fractional bits using the remainder */
+ vco = (u64)tmp << 32;
+ do_div(vco, FXO);
+ settings->fb_div_frac = vco;
+
+ return 0;
+}
+
+/* Calculate resulting frequency given the register settings */
+static unsigned long si544_calc_rate(struct clk_si544_muldiv *settings)
+{
+ u32 d = settings->hs_div * BIT(settings->ls_div_bits);
+ u64 vco;
+
+ /* Calculate VCO from the fractional part */
+ vco = (u64)settings->fb_div_frac * FXO;
+ vco += (FXO / 2);
+ vco >>= 32;
+
+ /* Add the integer part of the VCO frequency */
+ vco += (u64)settings->fb_div_int * FXO;
+
+ /* Apply divider to obtain the generated frequency */
+ do_div(vco, d);
+
+ return vco;
+}
+
+static unsigned long si544_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+ struct clk_si544_muldiv settings;
+ int err;
+
+ err = si544_get_muldiv(data, &settings);
+ if (err)
+ return 0;
+
+ return si544_calc_rate(&settings);
+}
+
+static long si544_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+ struct clk_si544_muldiv settings;
+ int err;
+
+ if (!is_valid_frequency(data, rate))
+ return -EINVAL;
+
+ err = si544_calc_muldiv(&settings, rate);
+ if (err)
+ return err;
+
+ return si544_calc_rate(&settings);
+}
+
+/*
+ * Update output frequency for "big" frequency changes
+ */
+static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_si544 *data = to_clk_si544(hw);
+ struct clk_si544_muldiv settings;
+ int err;
+
+ if (!is_valid_frequency(data, rate))
+ return -EINVAL;
+
+ err = si544_calc_muldiv(&settings, rate);
+ if (err)
+ return err;
+
+ si544_enable_output(data, false);
+
+ /* Allow FCAL for this frequency update */
+ err = regmap_write(data->regmap, SI544_REG_FCAL_OVR, 0);
+ if (err < 0)
+ return err;
+
+
+ err = si544_set_muldiv(data, &settings);
+ if (err < 0)
+ return err; /* Undefined state now, best to leave disabled */
+
+ /* Trigger calibration */
+ err = regmap_write(data->regmap, SI544_REG_CONTROL,
+ SI544_CONTROL_MS_ICAL2);
+ if (err < 0)
+ return err;
+
+ /* Applying a new frequency can take up to 10ms */
+ usleep_range(10000, 12000);
+
+ si544_enable_output(data, true);
+
+ return err;
+}
+
+static const struct clk_ops si544_clk_ops = {
+ .recalc_rate = si544_recalc_rate,
+ .round_rate = si544_round_rate,
+ .set_rate = si544_set_rate,
+};
+
+static bool si544_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SI544_REG_CONTROL:
+ case SI544_REG_FCAL_OVR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config si544_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = SI544_REG_PAGE_SELECT,
+ .volatile_reg = si544_regmap_is_volatile,
+};
+
+static int si544_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct clk_si544 *data;
+ struct clk_init_data init;
+ int err;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ init.ops = &si544_clk_ops;
+ init.flags = 0;
+ init.num_parents = 0;
+ data->hw.init = &init;
+ data->i2c_client = client;
+ data->speed_grade = id->driver_data;
+
+ if (of_property_read_string(client->dev.of_node, "clock-output-names",
+ &init.name))
+ init.name = client->dev.of_node->name;
+
+ data->regmap = devm_regmap_init_i2c(client, &si544_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ i2c_set_clientdata(client, data);
+
+ /* Select page 0, just to be sure, there appear to be no more */
+ err = regmap_write(data->regmap, SI544_REG_PAGE_SELECT, 0);
+ if (err < 0)
+ return err;
+
+ err = devm_clk_hw_register(&client->dev, &data->hw);
+ if (err) {
+ dev_err(&client->dev, "clock registration failed\n");
+ return err;
+ }
+ err = devm_of_clk_add_hw_provider(&client->dev, of_clk_hw_simple_get,
+ &data->hw);
+ if (err) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id si544_id[] = {
+ { "si544a", si544a },
+ { "si544b", si544b },
+ { "si544c", si544c },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si544_id);
+
+static const struct of_device_id clk_si544_of_match[] = {
+ { .compatible = "silabs,si544a" },
+ { .compatible = "silabs,si544b" },
+ { .compatible = "silabs,si544c" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, clk_si544_of_match);
+
+static struct i2c_driver si544_driver = {
+ .driver = {
+ .name = "si544",
+ .of_match_table = clk_si544_of_match,
+ },
+ .probe = si544_probe,
+ .id_table = si544_id,
+};
+module_i2c_driver(si544_driver);
+
+MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>");
+MODULE_DESCRIPTION("Si544 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index da44f8dc1d29..294850bdc195 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -282,6 +282,7 @@ static const struct stm32f4_gate_data stm32f746_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 0, "tim1", "apb2_mul" },
{ STM32F4_RCC_APB2ENR, 1, "tim8", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 7, "sdmmc2", "sdmux" },
{ STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" },
@@ -315,7 +316,7 @@ static const u64 stm32f46xx_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
static const u64 stm32f746_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
0x0000000000000003ull,
- 0x04f77f033e01c9ffull };
+ 0x04f77f833e01c9ffull };
static const u64 *stm32f4_gate_map;
@@ -521,7 +522,7 @@ static const struct stm32f4_pll_data stm32f429_pll[MAX_PLL_DIV] = {
};
static const struct stm32f4_pll_data stm32f469_pll[MAX_PLL_DIV] = {
- { PLL, 50, { "pll", "pll-q", NULL } },
+ { PLL, 50, { "pll", "pll-q", "pll-r" } },
{ PLL_I2S, 50, { "plli2s-p", "plli2s-q", "plli2s-r" } },
{ PLL_SAI, 50, { "pllsai-p", "pllsai-q", "pllsai-r" } },
};
@@ -1047,6 +1048,8 @@ static const char *rtc_parents[4] = {
"no-clock", "lse", "lsi", "hse-rtc"
};
+static const char *dsi_parent[2] = { NULL, "pll-r" };
+
static const char *lcd_parent[1] = { "pllsai-r-div" };
static const char *i2s_parents[2] = { "plli2s-r", NULL };
@@ -1156,6 +1159,12 @@ static const struct stm32_aux_clk stm32f469_aux_clk[] = {
NO_GATE, 0,
0
},
+ {
+ CLK_F469_DSI, "dsi", dsi_parent, ARRAY_SIZE(dsi_parent),
+ STM32F4_RCC_DCKCFGR, 29, 1,
+ STM32F4_RCC_APB2ENR, 27,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT
+ },
};
static const struct stm32_aux_clk stm32f746_aux_clk[] = {
@@ -1450,6 +1459,7 @@ static void __init stm32f4_rcc_init(struct device_node *np)
stm32f4_gate_map = data->gates_map;
hse_clk = of_clk_get_parent_name(np, 0);
+ dsi_parent[0] = hse_clk;
i2s_in_clk = of_clk_get_parent_name(np, 1);
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
new file mode 100644
index 000000000000..f1d5967b4b39
--- /dev/null
+++ b/drivers/clk/clk-stm32mp1.c
@@ -0,0 +1,2117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Olivier Bideau <olivier.bideau@st.com> for STMicroelectronics.
+ * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/stm32mp1-clks.h>
+
+static DEFINE_SPINLOCK(rlock);
+
+#define RCC_OCENSETR 0x0C
+#define RCC_HSICFGR 0x18
+#define RCC_RDLSICR 0x144
+#define RCC_PLL1CR 0x80
+#define RCC_PLL1CFGR1 0x84
+#define RCC_PLL1CFGR2 0x88
+#define RCC_PLL2CR 0x94
+#define RCC_PLL2CFGR1 0x98
+#define RCC_PLL2CFGR2 0x9C
+#define RCC_PLL3CR 0x880
+#define RCC_PLL3CFGR1 0x884
+#define RCC_PLL3CFGR2 0x888
+#define RCC_PLL4CR 0x894
+#define RCC_PLL4CFGR1 0x898
+#define RCC_PLL4CFGR2 0x89C
+#define RCC_APB1ENSETR 0xA00
+#define RCC_APB2ENSETR 0xA08
+#define RCC_APB3ENSETR 0xA10
+#define RCC_APB4ENSETR 0x200
+#define RCC_APB5ENSETR 0x208
+#define RCC_AHB2ENSETR 0xA18
+#define RCC_AHB3ENSETR 0xA20
+#define RCC_AHB4ENSETR 0xA28
+#define RCC_AHB5ENSETR 0x210
+#define RCC_AHB6ENSETR 0x218
+#define RCC_AHB6LPENSETR 0x318
+#define RCC_RCK12SELR 0x28
+#define RCC_RCK3SELR 0x820
+#define RCC_RCK4SELR 0x824
+#define RCC_MPCKSELR 0x20
+#define RCC_ASSCKSELR 0x24
+#define RCC_MSSCKSELR 0x48
+#define RCC_SPI6CKSELR 0xC4
+#define RCC_SDMMC12CKSELR 0x8F4
+#define RCC_SDMMC3CKSELR 0x8F8
+#define RCC_FMCCKSELR 0x904
+#define RCC_I2C46CKSELR 0xC0
+#define RCC_I2C12CKSELR 0x8C0
+#define RCC_I2C35CKSELR 0x8C4
+#define RCC_UART1CKSELR 0xC8
+#define RCC_QSPICKSELR 0x900
+#define RCC_ETHCKSELR 0x8FC
+#define RCC_RNG1CKSELR 0xCC
+#define RCC_RNG2CKSELR 0x920
+#define RCC_GPUCKSELR 0x938
+#define RCC_USBCKSELR 0x91C
+#define RCC_STGENCKSELR 0xD4
+#define RCC_SPDIFCKSELR 0x914
+#define RCC_SPI2S1CKSELR 0x8D8
+#define RCC_SPI2S23CKSELR 0x8DC
+#define RCC_SPI2S45CKSELR 0x8E0
+#define RCC_CECCKSELR 0x918
+#define RCC_LPTIM1CKSELR 0x934
+#define RCC_LPTIM23CKSELR 0x930
+#define RCC_LPTIM45CKSELR 0x92C
+#define RCC_UART24CKSELR 0x8E8
+#define RCC_UART35CKSELR 0x8EC
+#define RCC_UART6CKSELR 0x8E4
+#define RCC_UART78CKSELR 0x8F0
+#define RCC_FDCANCKSELR 0x90C
+#define RCC_SAI1CKSELR 0x8C8
+#define RCC_SAI2CKSELR 0x8CC
+#define RCC_SAI3CKSELR 0x8D0
+#define RCC_SAI4CKSELR 0x8D4
+#define RCC_ADCCKSELR 0x928
+#define RCC_MPCKDIVR 0x2C
+#define RCC_DSICKSELR 0x924
+#define RCC_CPERCKSELR 0xD0
+#define RCC_MCO1CFGR 0x800
+#define RCC_MCO2CFGR 0x804
+#define RCC_BDCR 0x140
+#define RCC_AXIDIVR 0x30
+#define RCC_MCUDIVR 0x830
+#define RCC_APB1DIVR 0x834
+#define RCC_APB2DIVR 0x838
+#define RCC_APB3DIVR 0x83C
+#define RCC_APB4DIVR 0x3C
+#define RCC_APB5DIVR 0x40
+#define RCC_TIMG1PRER 0x828
+#define RCC_TIMG2PRER 0x82C
+#define RCC_RTCDIVR 0x44
+#define RCC_DBGCFGR 0x80C
+
+#define RCC_CLR 0x4
+
+static const char * const ref12_parents[] = {
+ "ck_hsi", "ck_hse"
+};
+
+static const char * const ref3_parents[] = {
+ "ck_hsi", "ck_hse", "ck_csi"
+};
+
+static const char * const ref4_parents[] = {
+ "ck_hsi", "ck_hse", "ck_csi"
+};
+
+static const char * const cpu_src[] = {
+ "ck_hsi", "ck_hse", "pll1_p"
+};
+
+static const char * const axi_src[] = {
+ "ck_hsi", "ck_hse", "pll2_p", "pll3_p"
+};
+
+static const char * const per_src[] = {
+ "ck_hsi", "ck_csi", "ck_hse"
+};
+
+static const char * const mcu_src[] = {
+ "ck_hsi", "ck_hse", "ck_csi", "pll3_p"
+};
+
+static const char * const sdmmc12_src[] = {
+ "ck_axi", "pll3_r", "pll4_p", "ck_hsi"
+};
+
+static const char * const sdmmc3_src[] = {
+ "ck_mcu", "pll3_r", "pll4_p", "ck_hsi"
+};
+
+static const char * const fmc_src[] = {
+ "ck_axi", "pll3_r", "pll4_p", "ck_per"
+};
+
+static const char * const qspi_src[] = {
+ "ck_axi", "pll3_r", "pll4_p", "ck_per"
+};
+
+static const char * const eth_src[] = {
+ "pll4_p", "pll3_q"
+};
+
+static const char * const rng_src[] = {
+ "ck_csi", "pll4_r", "ck_lse", "ck_lsi"
+};
+
+static const char * const usbphy_src[] = {
+ "ck_hse", "pll4_r", "clk-hse-div2"
+};
+
+static const char * const usbo_src[] = {
+ "pll4_r", "ck_usbo_48m"
+};
+
+static const char * const stgen_src[] = {
+ "ck_hsi", "ck_hse"
+};
+
+static const char * const spdif_src[] = {
+ "pll4_p", "pll3_q", "ck_hsi"
+};
+
+static const char * const spi123_src[] = {
+ "pll4_p", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
+};
+
+static const char * const spi45_src[] = {
+ "pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
+};
+
+static const char * const spi6_src[] = {
+ "pclk5", "pll4_q", "ck_hsi", "ck_csi", "ck_hse", "pll3_q"
+};
+
+static const char * const cec_src[] = {
+ "ck_lse", "ck_lsi", "ck_csi"
+};
+
+static const char * const i2c12_src[] = {
+ "pclk1", "pll4_r", "ck_hsi", "ck_csi"
+};
+
+static const char * const i2c35_src[] = {
+ "pclk1", "pll4_r", "ck_hsi", "ck_csi"
+};
+
+static const char * const i2c46_src[] = {
+ "pclk5", "pll3_q", "ck_hsi", "ck_csi"
+};
+
+static const char * const lptim1_src[] = {
+ "pclk1", "pll4_p", "pll3_q", "ck_lse", "ck_lsi", "ck_per"
+};
+
+static const char * const lptim23_src[] = {
+ "pclk3", "pll4_q", "ck_per", "ck_lse", "ck_lsi"
+};
+
+static const char * const lptim45_src[] = {
+ "pclk3", "pll4_p", "pll3_q", "ck_lse", "ck_lsi", "ck_per"
+};
+
+static const char * const usart1_src[] = {
+ "pclk5", "pll3_q", "ck_hsi", "ck_csi", "pll4_q", "ck_hse"
+};
+
+const char * const usart234578_src[] = {
+ "pclk1", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
+};
+
+static const char * const usart6_src[] = {
+ "pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
+};
+
+static const char * const dfsdm_src[] = {
+ "pclk2", "ck_mcu"
+};
+
+static const char * const fdcan_src[] = {
+ "ck_hse", "pll3_q", "pll4_q"
+};
+
+static const char * const sai_src[] = {
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per"
+};
+
+static const char * const sai2_src[] = {
+ "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb"
+};
+
+static const char * const adc12_src[] = {
+ "pll4_q", "ck_per"
+};
+
+static const char * const dsi_src[] = {
+ "ck_dsi_phy", "pll4_p"
+};
+
+static const char * const rtc_src[] = {
+ "off", "ck_lse", "ck_lsi", "ck_hse_rtc"
+};
+
+static const char * const mco1_src[] = {
+ "ck_hsi", "ck_hse", "ck_csi", "ck_lsi", "ck_lse"
+};
+
+static const char * const mco2_src[] = {
+ "ck_mpu", "ck_axi", "ck_mcu", "pll4_p", "ck_hse", "ck_hsi"
+};
+
+static const char * const ck_trace_src[] = {
+ "ck_axi"
+};
+
+static const struct clk_div_table axi_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 4 }, { 5, 4 }, { 6, 4 }, { 7, 4 },
+ { 0 },
+};
+
+static const struct clk_div_table mcu_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
+ { 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 },
+ { 8, 512 }, { 9, 512 }, { 10, 512}, { 11, 512 },
+ { 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 },
+ { 0 },
+};
+
+static const struct clk_div_table apb_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
+ { 4, 16 }, { 5, 16 }, { 6, 16 }, { 7, 16 },
+ { 0 },
+};
+
+static const struct clk_div_table ck_trace_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
+ { 4, 16 }, { 5, 16 }, { 6, 16 }, { 7, 16 },
+ { 0 },
+};
+
+#define MAX_MUX_CLK 2
+
+struct stm32_mmux {
+ u8 nbr_clk;
+ struct clk_hw *hws[MAX_MUX_CLK];
+};
+
+struct stm32_clk_mmux {
+ struct clk_mux mux;
+ struct stm32_mmux *mmux;
+};
+
+struct stm32_mgate {
+ u8 nbr_clk;
+ u32 flag;
+};
+
+struct stm32_clk_mgate {
+ struct clk_gate gate;
+ struct stm32_mgate *mgate;
+ u32 mask;
+};
+
+struct clock_config {
+ u32 id;
+ const char *name;
+ union {
+ const char *parent_name;
+ const char * const *parent_names;
+ };
+ int num_parents;
+ unsigned long flags;
+ void *cfg;
+ struct clk_hw * (*func)(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg);
+};
+
+#define NO_ID ~0
+
+struct gate_cfg {
+ u32 reg_off;
+ u8 bit_idx;
+ u8 gate_flags;
+};
+
+struct fixed_factor_cfg {
+ unsigned int mult;
+ unsigned int div;
+};
+
+struct div_cfg {
+ u32 reg_off;
+ u8 shift;
+ u8 width;
+ u8 div_flags;
+ const struct clk_div_table *table;
+};
+
+struct mux_cfg {
+ u32 reg_off;
+ u8 shift;
+ u8 width;
+ u8 mux_flags;
+ u32 *table;
+};
+
+struct stm32_gate_cfg {
+ struct gate_cfg *gate;
+ struct stm32_mgate *mgate;
+ const struct clk_ops *ops;
+};
+
+struct stm32_div_cfg {
+ struct div_cfg *div;
+ const struct clk_ops *ops;
+};
+
+struct stm32_mux_cfg {
+ struct mux_cfg *mux;
+ struct stm32_mmux *mmux;
+ const struct clk_ops *ops;
+};
+
+/* STM32 Composite clock */
+struct stm32_composite_cfg {
+ const struct stm32_gate_cfg *gate;
+ const struct stm32_div_cfg *div;
+ const struct stm32_mux_cfg *mux;
+};
+
+static struct clk_hw *
+_clk_hw_register_gate(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct gate_cfg *gate_cfg = cfg->cfg;
+
+ return clk_hw_register_gate(dev,
+ cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ gate_cfg->reg_off + base,
+ gate_cfg->bit_idx,
+ gate_cfg->gate_flags,
+ lock);
+}
+
+static struct clk_hw *
+_clk_hw_register_fixed_factor(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct fixed_factor_cfg *ff_cfg = cfg->cfg;
+
+ return clk_hw_register_fixed_factor(dev, cfg->name, cfg->parent_name,
+ cfg->flags, ff_cfg->mult,
+ ff_cfg->div);
+}
+
+static struct clk_hw *
+_clk_hw_register_divider_table(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct div_cfg *div_cfg = cfg->cfg;
+
+ return clk_hw_register_divider_table(dev,
+ cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ div_cfg->reg_off + base,
+ div_cfg->shift,
+ div_cfg->width,
+ div_cfg->div_flags,
+ div_cfg->table,
+ lock);
+}
+
+static struct clk_hw *
+_clk_hw_register_mux(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct mux_cfg *mux_cfg = cfg->cfg;
+
+ return clk_hw_register_mux(dev, cfg->name, cfg->parent_names,
+ cfg->num_parents, cfg->flags,
+ mux_cfg->reg_off + base, mux_cfg->shift,
+ mux_cfg->width, mux_cfg->mux_flags, lock);
+}
+
+/* MP1 Gate clock with set & clear registers */
+
+static int mp1_gate_clk_enable(struct clk_hw *hw)
+{
+ if (!clk_gate_ops.is_enabled(hw))
+ clk_gate_ops.enable(hw);
+
+ return 0;
+}
+
+static void mp1_gate_clk_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags = 0;
+
+ if (clk_gate_ops.is_enabled(hw)) {
+ spin_lock_irqsave(gate->lock, flags);
+ writel_relaxed(BIT(gate->bit_idx), gate->reg + RCC_CLR);
+ spin_unlock_irqrestore(gate->lock, flags);
+ }
+}
+
+const struct clk_ops mp1_gate_clk_ops = {
+ .enable = mp1_gate_clk_enable,
+ .disable = mp1_gate_clk_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
+static struct clk_hw *_get_stm32_mux(void __iomem *base,
+ const struct stm32_mux_cfg *cfg,
+ spinlock_t *lock)
+{
+ struct stm32_clk_mmux *mmux;
+ struct clk_mux *mux;
+ struct clk_hw *mux_hw;
+
+ if (cfg->mmux) {
+ mmux = kzalloc(sizeof(*mmux), GFP_KERNEL);
+ if (!mmux)
+ return ERR_PTR(-ENOMEM);
+
+ mmux->mux.reg = cfg->mux->reg_off + base;
+ mmux->mux.shift = cfg->mux->shift;
+ mmux->mux.mask = (1 << cfg->mux->width) - 1;
+ mmux->mux.flags = cfg->mux->mux_flags;
+ mmux->mux.table = cfg->mux->table;
+ mmux->mux.lock = lock;
+ mmux->mmux = cfg->mmux;
+ mux_hw = &mmux->mux.hw;
+ cfg->mmux->hws[cfg->mmux->nbr_clk++] = mux_hw;
+
+ } else {
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->reg = cfg->mux->reg_off + base;
+ mux->shift = cfg->mux->shift;
+ mux->mask = (1 << cfg->mux->width) - 1;
+ mux->flags = cfg->mux->mux_flags;
+ mux->table = cfg->mux->table;
+ mux->lock = lock;
+ mux_hw = &mux->hw;
+ }
+
+ return mux_hw;
+}
+
+static struct clk_hw *_get_stm32_div(void __iomem *base,
+ const struct stm32_div_cfg *cfg,
+ spinlock_t *lock)
+{
+ struct clk_divider *div;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ div->reg = cfg->div->reg_off + base;
+ div->shift = cfg->div->shift;
+ div->width = cfg->div->width;
+ div->flags = cfg->div->div_flags;
+ div->table = cfg->div->table;
+ div->lock = lock;
+
+ return &div->hw;
+}
+
+static struct clk_hw *
+_get_stm32_gate(void __iomem *base,
+ const struct stm32_gate_cfg *cfg, spinlock_t *lock)
+{
+ struct stm32_clk_mgate *mgate;
+ struct clk_gate *gate;
+ struct clk_hw *gate_hw;
+
+ if (cfg->mgate) {
+ mgate = kzalloc(sizeof(*mgate), GFP_KERNEL);
+ if (!mgate)
+ return ERR_PTR(-ENOMEM);
+
+ mgate->gate.reg = cfg->gate->reg_off + base;
+ mgate->gate.bit_idx = cfg->gate->bit_idx;
+ mgate->gate.flags = cfg->gate->gate_flags;
+ mgate->gate.lock = lock;
+ mgate->mask = BIT(cfg->mgate->nbr_clk++);
+
+ mgate->mgate = cfg->mgate;
+
+ gate_hw = &mgate->gate.hw;
+
+ } else {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->reg = cfg->gate->reg_off + base;
+ gate->bit_idx = cfg->gate->bit_idx;
+ gate->flags = cfg->gate->gate_flags;
+ gate->lock = lock;
+
+ gate_hw = &gate->hw;
+ }
+
+ return gate_hw;
+}
+
+static struct clk_hw *
+clk_stm32_register_gate_ops(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *base,
+ const struct stm32_gate_cfg *cfg,
+ spinlock_t *lock)
+{
+ struct clk_init_data init = { NULL };
+ struct clk_gate *gate;
+ struct clk_hw *hw;
+ int ret;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ init.ops = &clk_gate_ops;
+
+ if (cfg->ops)
+ init.ops = cfg->ops;
+
+ hw = _get_stm32_gate(base, cfg, lock);
+ if (IS_ERR(hw))
+ return ERR_PTR(-ENOMEM);
+
+ hw->init = &init;
+
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(gate);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static struct clk_hw *
+clk_stm32_register_composite(struct device *dev,
+ const char *name, const char * const *parent_names,
+ int num_parents, void __iomem *base,
+ const struct stm32_composite_cfg *cfg,
+ unsigned long flags, spinlock_t *lock)
+{
+ const struct clk_ops *mux_ops, *div_ops, *gate_ops;
+ struct clk_hw *mux_hw, *div_hw, *gate_hw;
+
+ mux_hw = NULL;
+ div_hw = NULL;
+ gate_hw = NULL;
+ mux_ops = NULL;
+ div_ops = NULL;
+ gate_ops = NULL;
+
+ if (cfg->mux) {
+ mux_hw = _get_stm32_mux(base, cfg->mux, lock);
+
+ if (!IS_ERR(mux_hw)) {
+ mux_ops = &clk_mux_ops;
+
+ if (cfg->mux->ops)
+ mux_ops = cfg->mux->ops;
+ }
+ }
+
+ if (cfg->div) {
+ div_hw = _get_stm32_div(base, cfg->div, lock);
+
+ if (!IS_ERR(div_hw)) {
+ div_ops = &clk_divider_ops;
+
+ if (cfg->div->ops)
+ div_ops = cfg->div->ops;
+ }
+ }
+
+ if (cfg->gate) {
+ gate_hw = _get_stm32_gate(base, cfg->gate, lock);
+
+ if (!IS_ERR(gate_hw)) {
+ gate_ops = &clk_gate_ops;
+
+ if (cfg->gate->ops)
+ gate_ops = cfg->gate->ops;
+ }
+ }
+
+ return clk_hw_register_composite(dev, name, parent_names, num_parents,
+ mux_hw, mux_ops, div_hw, div_ops,
+ gate_hw, gate_ops, flags);
+}
+
+#define to_clk_mgate(_gate) container_of(_gate, struct stm32_clk_mgate, gate)
+
+static int mp1_mgate_clk_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);
+
+ clk_mgate->mgate->flag |= clk_mgate->mask;
+
+ mp1_gate_clk_enable(hw);
+
+ return 0;
+}
+
+static void mp1_mgate_clk_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);
+
+ clk_mgate->mgate->flag &= ~clk_mgate->mask;
+
+ if (clk_mgate->mgate->flag == 0)
+ mp1_gate_clk_disable(hw);
+}
+
+const struct clk_ops mp1_mgate_clk_ops = {
+ .enable = mp1_mgate_clk_enable,
+ .disable = mp1_mgate_clk_disable,
+ .is_enabled = clk_gate_is_enabled,
+
+};
+
+#define to_clk_mmux(_mux) container_of(_mux, struct stm32_clk_mmux, mux)
+
+static u8 clk_mmux_get_parent(struct clk_hw *hw)
+{
+ return clk_mux_ops.get_parent(hw);
+}
+
+static int clk_mmux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ struct stm32_clk_mmux *clk_mmux = to_clk_mmux(mux);
+ struct clk_hw *hwp;
+ int ret, n;
+
+ ret = clk_mux_ops.set_parent(hw, index);
+ if (ret)
+ return ret;
+
+ hwp = clk_hw_get_parent(hw);
+
+ for (n = 0; n < clk_mmux->mmux->nbr_clk; n++)
+ if (clk_mmux->mmux->hws[n] != hw)
+ clk_hw_reparent(clk_mmux->mmux->hws[n], hwp);
+
+ return 0;
+}
+
+const struct clk_ops clk_mmux_ops = {
+ .get_parent = clk_mmux_get_parent,
+ .set_parent = clk_mmux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+/* STM32 PLL */
+struct stm32_pll_obj {
+ /* lock pll enable/disable registers */
+ spinlock_t *lock;
+ void __iomem *reg;
+ struct clk_hw hw;
+};
+
+#define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
+
+#define PLL_ON BIT(0)
+#define PLL_RDY BIT(1)
+#define DIVN_MASK 0x1FF
+#define DIVM_MASK 0x3F
+#define DIVM_SHIFT 16
+#define DIVN_SHIFT 0
+#define FRAC_OFFSET 0xC
+#define FRAC_MASK 0x1FFF
+#define FRAC_SHIFT 3
+#define FRACLE BIT(16)
+
+static int __pll_is_enabled(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+
+ return readl_relaxed(clk_elem->reg) & PLL_ON;
+}
+
+#define TIMEOUT 5
+
+static int pll_enable(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ u32 reg;
+ unsigned long flags = 0;
+ unsigned int timeout = TIMEOUT;
+ int bit_status = 0;
+
+ spin_lock_irqsave(clk_elem->lock, flags);
+
+ if (__pll_is_enabled(hw))
+ goto unlock;
+
+ reg = readl_relaxed(clk_elem->reg);
+ reg |= PLL_ON;
+ writel_relaxed(reg, clk_elem->reg);
+
+ /* We can't use readl_poll_timeout() because we can be blocked if
+ * someone enables this clock before clocksource changes.
+ * Only jiffies counter is available. Jiffies are incremented by
+ * interruptions and enable op does not allow to be interrupted.
+ */
+ do {
+ bit_status = !(readl_relaxed(clk_elem->reg) & PLL_RDY);
+
+ if (bit_status)
+ udelay(120);
+
+ } while (bit_status && --timeout);
+
+unlock:
+ spin_unlock_irqrestore(clk_elem->lock, flags);
+
+ return bit_status;
+}
+
+static void pll_disable(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ u32 reg;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(clk_elem->lock, flags);
+
+ reg = readl_relaxed(clk_elem->reg);
+ reg &= ~PLL_ON;
+ writel_relaxed(reg, clk_elem->reg);
+
+ spin_unlock_irqrestore(clk_elem->lock, flags);
+}
+
+static u32 pll_frac_val(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ u32 reg, frac = 0;
+
+ reg = readl_relaxed(clk_elem->reg + FRAC_OFFSET);
+ if (reg & FRACLE)
+ frac = (reg >> FRAC_SHIFT) & FRAC_MASK;
+
+ return frac;
+}
+
+static unsigned long pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ u32 reg;
+ u32 frac, divm, divn;
+ u64 rate, rate_frac = 0;
+
+ reg = readl_relaxed(clk_elem->reg + 4);
+
+ divm = ((reg >> DIVM_SHIFT) & DIVM_MASK) + 1;
+ divn = ((reg >> DIVN_SHIFT) & DIVN_MASK) + 1;
+ rate = (u64)parent_rate * divn;
+
+ do_div(rate, divm);
+
+ frac = pll_frac_val(hw);
+ if (frac) {
+ rate_frac = (u64)parent_rate * (u64)frac;
+ do_div(rate_frac, (divm * 8192));
+ }
+
+ return rate + rate_frac;
+}
+
+static int pll_is_enabled(struct clk_hw *hw)
+{
+ struct stm32_pll_obj *clk_elem = to_pll(hw);
+ unsigned long flags = 0;
+ int ret;
+
+ spin_lock_irqsave(clk_elem->lock, flags);
+ ret = __pll_is_enabled(hw);
+ spin_unlock_irqrestore(clk_elem->lock, flags);
+
+ return ret;
+}
+
+static const struct clk_ops pll_ops = {
+ .enable = pll_enable,
+ .disable = pll_disable,
+ .recalc_rate = pll_recalc_rate,
+ .is_enabled = pll_is_enabled,
+};
+
+static struct clk_hw *clk_register_pll(struct device *dev, const char *name,
+ const char *parent_name,
+ void __iomem *reg,
+ unsigned long flags,
+ spinlock_t *lock)
+{
+ struct stm32_pll_obj *element;
+ struct clk_init_data init;
+ struct clk_hw *hw;
+ int err;
+
+ element = kzalloc(sizeof(*element), GFP_KERNEL);
+ if (!element)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &pll_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ element->hw.init = &init;
+ element->reg = reg;
+ element->lock = lock;
+
+ hw = &element->hw;
+ err = clk_hw_register(dev, hw);
+
+ if (err) {
+ kfree(element);
+ return ERR_PTR(err);
+ }
+
+ return hw;
+}
+
+/* Kernel Timer */
+struct timer_cker {
+ /* lock the kernel output divider register */
+ spinlock_t *lock;
+ void __iomem *apbdiv;
+ void __iomem *timpre;
+ struct clk_hw hw;
+};
+
+#define to_timer_cker(_hw) container_of(_hw, struct timer_cker, hw)
+
+#define APB_DIV_MASK 0x07
+#define TIM_PRE_MASK 0x01
+
+static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct timer_cker *tim_ker = to_timer_cker(hw);
+ u32 prescaler;
+ unsigned int mult = 0;
+
+ prescaler = readl_relaxed(tim_ker->apbdiv) & APB_DIV_MASK;
+ if (prescaler < 2)
+ return 1;
+
+ mult = 2;
+
+ if (rate / parent_rate >= 4)
+ mult = 4;
+
+ return mult;
+}
+
+static long timer_ker_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long factor = __bestmult(hw, rate, *parent_rate);
+
+ return *parent_rate * factor;
+}
+
+static int timer_ker_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct timer_cker *tim_ker = to_timer_cker(hw);
+ unsigned long flags = 0;
+ unsigned long factor = __bestmult(hw, rate, parent_rate);
+ int ret = 0;
+
+ spin_lock_irqsave(tim_ker->lock, flags);
+
+ switch (factor) {
+ case 1:
+ break;
+ case 2:
+ writel_relaxed(0, tim_ker->timpre);
+ break;
+ case 4:
+ writel_relaxed(1, tim_ker->timpre);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(tim_ker->lock, flags);
+
+ return ret;
+}
+
+static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct timer_cker *tim_ker = to_timer_cker(hw);
+ u32 prescaler, timpre;
+ u32 mul;
+
+ prescaler = readl_relaxed(tim_ker->apbdiv) & APB_DIV_MASK;
+
+ timpre = readl_relaxed(tim_ker->timpre) & TIM_PRE_MASK;
+
+ if (!prescaler)
+ return parent_rate;
+
+ mul = (timpre + 1) * 2;
+
+ return parent_rate * mul;
+}
+
+static const struct clk_ops timer_ker_ops = {
+ .recalc_rate = timer_ker_recalc_rate,
+ .round_rate = timer_ker_round_rate,
+ .set_rate = timer_ker_set_rate,
+
+};
+
+static struct clk_hw *clk_register_cktim(struct device *dev, const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *apbdiv,
+ void __iomem *timpre,
+ spinlock_t *lock)
+{
+ struct timer_cker *tim_ker;
+ struct clk_init_data init;
+ struct clk_hw *hw;
+ int err;
+
+ tim_ker = kzalloc(sizeof(*tim_ker), GFP_KERNEL);
+ if (!tim_ker)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &timer_ker_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ tim_ker->hw.init = &init;
+ tim_ker->lock = lock;
+ tim_ker->apbdiv = apbdiv;
+ tim_ker->timpre = timpre;
+
+ hw = &tim_ker->hw;
+ err = clk_hw_register(dev, hw);
+
+ if (err) {
+ kfree(tim_ker);
+ return ERR_PTR(err);
+ }
+
+ return hw;
+}
+
+struct stm32_pll_cfg {
+ u32 offset;
+};
+
+struct clk_hw *_clk_register_pll(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct stm32_pll_cfg *stm_pll_cfg = cfg->cfg;
+
+ return clk_register_pll(dev, cfg->name, cfg->parent_name,
+ base + stm_pll_cfg->offset, cfg->flags, lock);
+}
+
+struct stm32_cktim_cfg {
+ u32 offset_apbdiv;
+ u32 offset_timpre;
+};
+
+static struct clk_hw *_clk_register_cktim(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ struct stm32_cktim_cfg *cktim_cfg = cfg->cfg;
+
+ return clk_register_cktim(dev, cfg->name, cfg->parent_name, cfg->flags,
+ cktim_cfg->offset_apbdiv + base,
+ cktim_cfg->offset_timpre + base, lock);
+}
+
+static struct clk_hw *
+_clk_stm32_register_gate(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ return clk_stm32_register_gate_ops(dev,
+ cfg->name,
+ cfg->parent_name,
+ cfg->flags,
+ base,
+ cfg->cfg,
+ lock);
+}
+
+static struct clk_hw *
+_clk_stm32_register_composite(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ return clk_stm32_register_composite(dev, cfg->name, cfg->parent_names,
+ cfg->num_parents, base, cfg->cfg,
+ cfg->flags, lock);
+}
+
+#define GATE(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct gate_cfg) {\
+ .reg_off = _offset,\
+ .bit_idx = _bit_idx,\
+ .gate_flags = _gate_flags,\
+ },\
+ .func = _clk_hw_register_gate,\
+}
+
+#define FIXED_FACTOR(_id, _name, _parent, _flags, _mult, _div)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct fixed_factor_cfg) {\
+ .mult = _mult,\
+ .div = _div,\
+ },\
+ .func = _clk_hw_register_fixed_factor,\
+}
+
+#define DIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
+ _div_flags, _div_table)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct div_cfg) {\
+ .reg_off = _offset,\
+ .shift = _shift,\
+ .width = _width,\
+ .div_flags = _div_flags,\
+ .table = _div_table,\
+ },\
+ .func = _clk_hw_register_divider_table,\
+}
+
+#define DIV(_id, _name, _parent, _flags, _offset, _shift, _width, _div_flags)\
+ DIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
+ _div_flags, NULL)
+
+#define MUX(_id, _name, _parents, _flags, _offset, _shift, _width, _mux_flags)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_names = _parents,\
+ .num_parents = ARRAY_SIZE(_parents),\
+ .flags = _flags,\
+ .cfg = &(struct mux_cfg) {\
+ .reg_off = _offset,\
+ .shift = _shift,\
+ .width = _width,\
+ .mux_flags = _mux_flags,\
+ },\
+ .func = _clk_hw_register_mux,\
+}
+
+#define PLL(_id, _name, _parent, _flags, _offset)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct stm32_pll_cfg) {\
+ .offset = _offset,\
+ },\
+ .func = _clk_register_pll,\
+}
+
+#define STM32_CKTIM(_name, _parent, _flags, _offset_apbdiv, _offset_timpre)\
+{\
+ .id = NO_ID,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = &(struct stm32_cktim_cfg) {\
+ .offset_apbdiv = _offset_apbdiv,\
+ .offset_timpre = _offset_timpre,\
+ },\
+ .func = _clk_register_cktim,\
+}
+
+#define STM32_TIM(_id, _name, _parent, _offset_set, _bit_idx)\
+ GATE_MP1(_id, _name, _parent, CLK_SET_RATE_PARENT,\
+ _offset_set, _bit_idx, 0)
+
+/* STM32 GATE */
+#define STM32_GATE(_id, _name, _parent, _flags, _gate)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_name = _parent,\
+ .flags = _flags,\
+ .cfg = (struct stm32_gate_cfg *) {_gate},\
+ .func = _clk_stm32_register_gate,\
+}
+
+#define _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags, _mgate, _ops)\
+ (&(struct stm32_gate_cfg) {\
+ &(struct gate_cfg) {\
+ .reg_off = _gate_offset,\
+ .bit_idx = _gate_bit_idx,\
+ .gate_flags = _gate_flags,\
+ },\
+ .mgate = _mgate,\
+ .ops = _ops,\
+ })
+
+#define _STM32_MGATE(_mgate)\
+ (&per_gate_cfg[_mgate])
+
+#define _GATE(_gate_offset, _gate_bit_idx, _gate_flags)\
+ _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
+ NULL, NULL)\
+
+#define _GATE_MP1(_gate_offset, _gate_bit_idx, _gate_flags)\
+ _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
+ NULL, &mp1_gate_clk_ops)\
+
+#define _MGATE_MP1(_mgate)\
+ .gate = &per_gate_cfg[_mgate]
+
+#define GATE_MP1(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
+ STM32_GATE(_id, _name, _parent, _flags,\
+ _GATE_MP1(_offset, _bit_idx, _gate_flags))
+
+#define MGATE_MP1(_id, _name, _parent, _flags, _mgate)\
+ STM32_GATE(_id, _name, _parent, _flags,\
+ _STM32_MGATE(_mgate))
+
+#define _STM32_DIV(_div_offset, _div_shift, _div_width,\
+ _div_flags, _div_table, _ops)\
+ .div = &(struct stm32_div_cfg) {\
+ &(struct div_cfg) {\
+ .reg_off = _div_offset,\
+ .shift = _div_shift,\
+ .width = _div_width,\
+ .div_flags = _div_flags,\
+ .table = _div_table,\
+ },\
+ .ops = _ops,\
+ }
+
+#define _DIV(_div_offset, _div_shift, _div_width, _div_flags, _div_table)\
+ _STM32_DIV(_div_offset, _div_shift, _div_width,\
+ _div_flags, _div_table, NULL)\
+
+#define _STM32_MUX(_offset, _shift, _width, _mux_flags, _mmux, _ops)\
+ .mux = &(struct stm32_mux_cfg) {\
+ &(struct mux_cfg) {\
+ .reg_off = _offset,\
+ .shift = _shift,\
+ .width = _width,\
+ .mux_flags = _mux_flags,\
+ .table = NULL,\
+ },\
+ .mmux = _mmux,\
+ .ops = _ops,\
+ }
+
+#define _MUX(_offset, _shift, _width, _mux_flags)\
+ _STM32_MUX(_offset, _shift, _width, _mux_flags, NULL, NULL)\
+
+#define _MMUX(_mmux) .mux = &ker_mux_cfg[_mmux]
+
+#define PARENT(_parent) ((const char *[]) { _parent})
+
+#define _NO_MUX .mux = NULL
+#define _NO_DIV .div = NULL
+#define _NO_GATE .gate = NULL
+
+#define COMPOSITE(_id, _name, _parents, _flags, _gate, _mux, _div)\
+{\
+ .id = _id,\
+ .name = _name,\
+ .parent_names = _parents,\
+ .num_parents = ARRAY_SIZE(_parents),\
+ .flags = _flags,\
+ .cfg = &(struct stm32_composite_cfg) {\
+ _gate,\
+ _mux,\
+ _div,\
+ },\
+ .func = _clk_stm32_register_composite,\
+}
+
+#define PCLK(_id, _name, _parent, _flags, _mgate)\
+ MGATE_MP1(_id, _name, _parent, _flags, _mgate)
+
+#define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\
+ COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE | _flags,\
+ _MGATE_MP1(_mgate),\
+ _MMUX(_mmux),\
+ _NO_DIV)
+
+enum {
+ G_SAI1,
+ G_SAI2,
+ G_SAI3,
+ G_SAI4,
+ G_SPI1,
+ G_SPI2,
+ G_SPI3,
+ G_SPI4,
+ G_SPI5,
+ G_SPI6,
+ G_SPDIF,
+ G_I2C1,
+ G_I2C2,
+ G_I2C3,
+ G_I2C4,
+ G_I2C5,
+ G_I2C6,
+ G_USART2,
+ G_UART4,
+ G_USART3,
+ G_UART5,
+ G_USART1,
+ G_USART6,
+ G_UART7,
+ G_UART8,
+ G_LPTIM1,
+ G_LPTIM2,
+ G_LPTIM3,
+ G_LPTIM4,
+ G_LPTIM5,
+ G_LTDC,
+ G_DSI,
+ G_QSPI,
+ G_FMC,
+ G_SDMMC1,
+ G_SDMMC2,
+ G_SDMMC3,
+ G_USBO,
+ G_USBPHY,
+ G_RNG1,
+ G_RNG2,
+ G_FDCAN,
+ G_DAC12,
+ G_CEC,
+ G_ADC12,
+ G_GPU,
+ G_STGEN,
+ G_DFSDM,
+ G_ADFSDM,
+ G_TIM2,
+ G_TIM3,
+ G_TIM4,
+ G_TIM5,
+ G_TIM6,
+ G_TIM7,
+ G_TIM12,
+ G_TIM13,
+ G_TIM14,
+ G_MDIO,
+ G_TIM1,
+ G_TIM8,
+ G_TIM15,
+ G_TIM16,
+ G_TIM17,
+ G_SYSCFG,
+ G_VREF,
+ G_TMPSENS,
+ G_PMBCTRL,
+ G_HDP,
+ G_IWDG2,
+ G_STGENRO,
+ G_DMA1,
+ G_DMA2,
+ G_DMAMUX,
+ G_DCMI,
+ G_CRYP2,
+ G_HASH2,
+ G_CRC2,
+ G_HSEM,
+ G_IPCC,
+ G_GPIOA,
+ G_GPIOB,
+ G_GPIOC,
+ G_GPIOD,
+ G_GPIOE,
+ G_GPIOF,
+ G_GPIOG,
+ G_GPIOH,
+ G_GPIOI,
+ G_GPIOJ,
+ G_GPIOK,
+ G_MDMA,
+ G_ETHCK,
+ G_ETHTX,
+ G_ETHRX,
+ G_ETHMAC,
+ G_CRC1,
+ G_USBH,
+ G_ETHSTP,
+ G_RTCAPB,
+ G_TZC,
+ G_TZPC,
+ G_IWDG1,
+ G_BSEC,
+ G_GPIOZ,
+ G_CRYP1,
+ G_HASH1,
+ G_BKPSRAM,
+
+ G_LAST
+};
+
+struct stm32_mgate mp1_mgate[G_LAST];
+
+#define _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
+ _mgate, _ops)\
+ [_id] = {\
+ &(struct gate_cfg) {\
+ .reg_off = _gate_offset,\
+ .bit_idx = _gate_bit_idx,\
+ .gate_flags = _gate_flags,\
+ },\
+ .mgate = _mgate,\
+ .ops = _ops,\
+ }
+
+#define K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
+ _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
+ NULL, &mp1_gate_clk_ops)
+
+#define K_MGATE(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
+ _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
+ &mp1_mgate[_id], &mp1_mgate_clk_ops)
+
+/* Peripheral gates */
+struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
+ /* Multi gates */
+ K_GATE(G_MDIO, RCC_APB1ENSETR, 31, 0),
+ K_MGATE(G_DAC12, RCC_APB1ENSETR, 29, 0),
+ K_MGATE(G_CEC, RCC_APB1ENSETR, 27, 0),
+ K_MGATE(G_SPDIF, RCC_APB1ENSETR, 26, 0),
+ K_MGATE(G_I2C5, RCC_APB1ENSETR, 24, 0),
+ K_MGATE(G_I2C3, RCC_APB1ENSETR, 23, 0),
+ K_MGATE(G_I2C2, RCC_APB1ENSETR, 22, 0),
+ K_MGATE(G_I2C1, RCC_APB1ENSETR, 21, 0),
+ K_MGATE(G_UART8, RCC_APB1ENSETR, 19, 0),
+ K_MGATE(G_UART7, RCC_APB1ENSETR, 18, 0),
+ K_MGATE(G_UART5, RCC_APB1ENSETR, 17, 0),
+ K_MGATE(G_UART4, RCC_APB1ENSETR, 16, 0),
+ K_MGATE(G_USART3, RCC_APB1ENSETR, 15, 0),
+ K_MGATE(G_USART2, RCC_APB1ENSETR, 14, 0),
+ K_MGATE(G_SPI3, RCC_APB1ENSETR, 12, 0),
+ K_MGATE(G_SPI2, RCC_APB1ENSETR, 11, 0),
+ K_MGATE(G_LPTIM1, RCC_APB1ENSETR, 9, 0),
+ K_GATE(G_TIM14, RCC_APB1ENSETR, 8, 0),
+ K_GATE(G_TIM13, RCC_APB1ENSETR, 7, 0),
+ K_GATE(G_TIM12, RCC_APB1ENSETR, 6, 0),
+ K_GATE(G_TIM7, RCC_APB1ENSETR, 5, 0),
+ K_GATE(G_TIM6, RCC_APB1ENSETR, 4, 0),
+ K_GATE(G_TIM5, RCC_APB1ENSETR, 3, 0),
+ K_GATE(G_TIM4, RCC_APB1ENSETR, 2, 0),
+ K_GATE(G_TIM3, RCC_APB1ENSETR, 1, 0),
+ K_GATE(G_TIM2, RCC_APB1ENSETR, 0, 0),
+
+ K_MGATE(G_FDCAN, RCC_APB2ENSETR, 24, 0),
+ K_GATE(G_ADFSDM, RCC_APB2ENSETR, 21, 0),
+ K_GATE(G_DFSDM, RCC_APB2ENSETR, 20, 0),
+ K_MGATE(G_SAI3, RCC_APB2ENSETR, 18, 0),
+ K_MGATE(G_SAI2, RCC_APB2ENSETR, 17, 0),
+ K_MGATE(G_SAI1, RCC_APB2ENSETR, 16, 0),
+ K_MGATE(G_USART6, RCC_APB2ENSETR, 13, 0),
+ K_MGATE(G_SPI5, RCC_APB2ENSETR, 10, 0),
+ K_MGATE(G_SPI4, RCC_APB2ENSETR, 9, 0),
+ K_MGATE(G_SPI1, RCC_APB2ENSETR, 8, 0),
+ K_GATE(G_TIM17, RCC_APB2ENSETR, 4, 0),
+ K_GATE(G_TIM16, RCC_APB2ENSETR, 3, 0),
+ K_GATE(G_TIM15, RCC_APB2ENSETR, 2, 0),
+ K_GATE(G_TIM8, RCC_APB2ENSETR, 1, 0),
+ K_GATE(G_TIM1, RCC_APB2ENSETR, 0, 0),
+
+ K_GATE(G_HDP, RCC_APB3ENSETR, 20, 0),
+ K_GATE(G_PMBCTRL, RCC_APB3ENSETR, 17, 0),
+ K_GATE(G_TMPSENS, RCC_APB3ENSETR, 16, 0),
+ K_GATE(G_VREF, RCC_APB3ENSETR, 13, 0),
+ K_GATE(G_SYSCFG, RCC_APB3ENSETR, 11, 0),
+ K_MGATE(G_SAI4, RCC_APB3ENSETR, 8, 0),
+ K_MGATE(G_LPTIM5, RCC_APB3ENSETR, 3, 0),
+ K_MGATE(G_LPTIM4, RCC_APB3ENSETR, 2, 0),
+ K_MGATE(G_LPTIM3, RCC_APB3ENSETR, 1, 0),
+ K_MGATE(G_LPTIM2, RCC_APB3ENSETR, 0, 0),
+
+ K_GATE(G_STGENRO, RCC_APB4ENSETR, 20, 0),
+ K_MGATE(G_USBPHY, RCC_APB4ENSETR, 16, 0),
+ K_GATE(G_IWDG2, RCC_APB4ENSETR, 15, 0),
+ K_MGATE(G_DSI, RCC_APB4ENSETR, 4, 0),
+ K_MGATE(G_LTDC, RCC_APB4ENSETR, 0, 0),
+
+ K_GATE(G_STGEN, RCC_APB5ENSETR, 20, 0),
+ K_GATE(G_BSEC, RCC_APB5ENSETR, 16, 0),
+ K_GATE(G_IWDG1, RCC_APB5ENSETR, 15, 0),
+ K_GATE(G_TZPC, RCC_APB5ENSETR, 13, 0),
+ K_GATE(G_TZC, RCC_APB5ENSETR, 12, 0),
+ K_GATE(G_RTCAPB, RCC_APB5ENSETR, 8, 0),
+ K_MGATE(G_USART1, RCC_APB5ENSETR, 4, 0),
+ K_MGATE(G_I2C6, RCC_APB5ENSETR, 3, 0),
+ K_MGATE(G_I2C4, RCC_APB5ENSETR, 2, 0),
+ K_MGATE(G_SPI6, RCC_APB5ENSETR, 0, 0),
+
+ K_MGATE(G_SDMMC3, RCC_AHB2ENSETR, 16, 0),
+ K_MGATE(G_USBO, RCC_AHB2ENSETR, 8, 0),
+ K_MGATE(G_ADC12, RCC_AHB2ENSETR, 5, 0),
+ K_GATE(G_DMAMUX, RCC_AHB2ENSETR, 2, 0),
+ K_GATE(G_DMA2, RCC_AHB2ENSETR, 1, 0),
+ K_GATE(G_DMA1, RCC_AHB2ENSETR, 0, 0),
+
+ K_GATE(G_IPCC, RCC_AHB3ENSETR, 12, 0),
+ K_GATE(G_HSEM, RCC_AHB3ENSETR, 11, 0),
+ K_GATE(G_CRC2, RCC_AHB3ENSETR, 7, 0),
+ K_MGATE(G_RNG2, RCC_AHB3ENSETR, 6, 0),
+ K_GATE(G_HASH2, RCC_AHB3ENSETR, 5, 0),
+ K_GATE(G_CRYP2, RCC_AHB3ENSETR, 4, 0),
+ K_GATE(G_DCMI, RCC_AHB3ENSETR, 0, 0),
+
+ K_GATE(G_GPIOK, RCC_AHB4ENSETR, 10, 0),
+ K_GATE(G_GPIOJ, RCC_AHB4ENSETR, 9, 0),
+ K_GATE(G_GPIOI, RCC_AHB4ENSETR, 8, 0),
+ K_GATE(G_GPIOH, RCC_AHB4ENSETR, 7, 0),
+ K_GATE(G_GPIOG, RCC_AHB4ENSETR, 6, 0),
+ K_GATE(G_GPIOF, RCC_AHB4ENSETR, 5, 0),
+ K_GATE(G_GPIOE, RCC_AHB4ENSETR, 4, 0),
+ K_GATE(G_GPIOD, RCC_AHB4ENSETR, 3, 0),
+ K_GATE(G_GPIOC, RCC_AHB4ENSETR, 2, 0),
+ K_GATE(G_GPIOB, RCC_AHB4ENSETR, 1, 0),
+ K_GATE(G_GPIOA, RCC_AHB4ENSETR, 0, 0),
+
+ K_GATE(G_BKPSRAM, RCC_AHB5ENSETR, 8, 0),
+ K_MGATE(G_RNG1, RCC_AHB5ENSETR, 6, 0),
+ K_GATE(G_HASH1, RCC_AHB5ENSETR, 5, 0),
+ K_GATE(G_CRYP1, RCC_AHB5ENSETR, 4, 0),
+ K_GATE(G_GPIOZ, RCC_AHB5ENSETR, 0, 0),
+
+ K_GATE(G_USBH, RCC_AHB6ENSETR, 24, 0),
+ K_GATE(G_CRC1, RCC_AHB6ENSETR, 20, 0),
+ K_MGATE(G_SDMMC2, RCC_AHB6ENSETR, 17, 0),
+ K_MGATE(G_SDMMC1, RCC_AHB6ENSETR, 16, 0),
+ K_MGATE(G_QSPI, RCC_AHB6ENSETR, 14, 0),
+ K_MGATE(G_FMC, RCC_AHB6ENSETR, 12, 0),
+ K_GATE(G_ETHMAC, RCC_AHB6ENSETR, 10, 0),
+ K_GATE(G_ETHRX, RCC_AHB6ENSETR, 9, 0),
+ K_GATE(G_ETHTX, RCC_AHB6ENSETR, 8, 0),
+ K_GATE(G_ETHCK, RCC_AHB6ENSETR, 7, 0),
+ K_MGATE(G_GPU, RCC_AHB6ENSETR, 5, 0),
+ K_GATE(G_MDMA, RCC_AHB6ENSETR, 0, 0),
+ K_GATE(G_ETHSTP, RCC_AHB6LPENSETR, 11, 0),
+};
+
+enum {
+ M_SDMMC12,
+ M_SDMMC3,
+ M_FMC,
+ M_QSPI,
+ M_RNG1,
+ M_RNG2,
+ M_USBPHY,
+ M_USBO,
+ M_STGEN,
+ M_SPDIF,
+ M_SPI1,
+ M_SPI23,
+ M_SPI45,
+ M_SPI6,
+ M_CEC,
+ M_I2C12,
+ M_I2C35,
+ M_I2C46,
+ M_LPTIM1,
+ M_LPTIM23,
+ M_LPTIM45,
+ M_USART1,
+ M_UART24,
+ M_UART35,
+ M_USART6,
+ M_UART78,
+ M_SAI1,
+ M_SAI2,
+ M_SAI3,
+ M_SAI4,
+ M_DSI,
+ M_FDCAN,
+ M_ADC12,
+ M_ETHCK,
+ M_CKPER,
+ M_LAST
+};
+
+struct stm32_mmux ker_mux[M_LAST];
+
+#define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops)\
+ [_id] = {\
+ &(struct mux_cfg) {\
+ .reg_off = _offset,\
+ .shift = _shift,\
+ .width = _width,\
+ .mux_flags = _mux_flags,\
+ .table = NULL,\
+ },\
+ .mmux = _mmux,\
+ .ops = _ops,\
+ }
+
+#define K_MUX(_id, _offset, _shift, _width, _mux_flags)\
+ _K_MUX(_id, _offset, _shift, _width, _mux_flags,\
+ NULL, NULL)
+
+#define K_MMUX(_id, _offset, _shift, _width, _mux_flags)\
+ _K_MUX(_id, _offset, _shift, _width, _mux_flags,\
+ &ker_mux[_id], &clk_mmux_ops)
+
+const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
+ /* Kernel multi mux */
+ K_MMUX(M_SDMMC12, RCC_SDMMC12CKSELR, 0, 3, 0),
+ K_MMUX(M_SPI23, RCC_SPI2S23CKSELR, 0, 3, 0),
+ K_MMUX(M_SPI45, RCC_SPI2S45CKSELR, 0, 3, 0),
+ K_MMUX(M_I2C12, RCC_I2C12CKSELR, 0, 3, 0),
+ K_MMUX(M_I2C35, RCC_I2C35CKSELR, 0, 3, 0),
+ K_MMUX(M_LPTIM23, RCC_LPTIM23CKSELR, 0, 3, 0),
+ K_MMUX(M_LPTIM45, RCC_LPTIM45CKSELR, 0, 3, 0),
+ K_MMUX(M_UART24, RCC_UART24CKSELR, 0, 3, 0),
+ K_MMUX(M_UART35, RCC_UART35CKSELR, 0, 3, 0),
+ K_MMUX(M_UART78, RCC_UART78CKSELR, 0, 3, 0),
+ K_MMUX(M_SAI1, RCC_SAI1CKSELR, 0, 3, 0),
+ K_MMUX(M_ETHCK, RCC_ETHCKSELR, 0, 2, 0),
+ K_MMUX(M_I2C46, RCC_I2C46CKSELR, 0, 3, 0),
+
+ /* Kernel simple mux */
+ K_MUX(M_RNG2, RCC_RNG2CKSELR, 0, 2, 0),
+ K_MUX(M_SDMMC3, RCC_SDMMC3CKSELR, 0, 3, 0),
+ K_MUX(M_FMC, RCC_FMCCKSELR, 0, 2, 0),
+ K_MUX(M_QSPI, RCC_QSPICKSELR, 0, 2, 0),
+ K_MUX(M_USBPHY, RCC_USBCKSELR, 0, 2, 0),
+ K_MUX(M_USBO, RCC_USBCKSELR, 4, 1, 0),
+ K_MUX(M_SPDIF, RCC_SPDIFCKSELR, 0, 2, 0),
+ K_MUX(M_SPI1, RCC_SPI2S1CKSELR, 0, 3, 0),
+ K_MUX(M_CEC, RCC_CECCKSELR, 0, 2, 0),
+ K_MUX(M_LPTIM1, RCC_LPTIM1CKSELR, 0, 3, 0),
+ K_MUX(M_USART6, RCC_UART6CKSELR, 0, 3, 0),
+ K_MUX(M_FDCAN, RCC_FDCANCKSELR, 0, 2, 0),
+ K_MUX(M_SAI2, RCC_SAI2CKSELR, 0, 3, 0),
+ K_MUX(M_SAI3, RCC_SAI3CKSELR, 0, 3, 0),
+ K_MUX(M_SAI4, RCC_SAI4CKSELR, 0, 3, 0),
+ K_MUX(M_ADC12, RCC_ADCCKSELR, 0, 2, 0),
+ K_MUX(M_DSI, RCC_DSICKSELR, 0, 1, 0),
+ K_MUX(M_CKPER, RCC_CPERCKSELR, 0, 2, 0),
+ K_MUX(M_RNG1, RCC_RNG1CKSELR, 0, 2, 0),
+ K_MUX(M_STGEN, RCC_STGENCKSELR, 0, 2, 0),
+ K_MUX(M_USART1, RCC_UART1CKSELR, 0, 3, 0),
+ K_MUX(M_SPI6, RCC_SPI6CKSELR, 0, 3, 0),
+};
+
+static const struct clock_config stm32mp1_clock_cfg[] = {
+ /* Oscillator divider */
+ DIV(NO_ID, "clk-hsi-div", "clk-hsi", 0, RCC_HSICFGR, 0, 2,
+ CLK_DIVIDER_READ_ONLY),
+
+ /* External / Internal Oscillators */
+ GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0),
+ GATE_MP1(CK_CSI, "ck_csi", "clk-csi", 0, RCC_OCENSETR, 4, 0),
+ GATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0),
+ GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0),
+ GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0),
+
+ FIXED_FACTOR(CK_HSE_DIV2, "clk-hse-div2", "ck_hse", 0, 1, 2),
+
+ /* ref clock pll */
+ MUX(NO_ID, "ref1", ref12_parents, CLK_OPS_PARENT_ENABLE, RCC_RCK12SELR,
+ 0, 2, CLK_MUX_READ_ONLY),
+
+ MUX(NO_ID, "ref3", ref3_parents, CLK_OPS_PARENT_ENABLE, RCC_RCK3SELR,
+ 0, 2, CLK_MUX_READ_ONLY),
+
+ MUX(NO_ID, "ref4", ref4_parents, CLK_OPS_PARENT_ENABLE, RCC_RCK4SELR,
+ 0, 2, CLK_MUX_READ_ONLY),
+
+ /* PLLs */
+ PLL(PLL1, "pll1", "ref1", CLK_IGNORE_UNUSED, RCC_PLL1CR),
+ PLL(PLL2, "pll2", "ref1", CLK_IGNORE_UNUSED, RCC_PLL2CR),
+ PLL(PLL3, "pll3", "ref3", CLK_IGNORE_UNUSED, RCC_PLL3CR),
+ PLL(PLL4, "pll4", "ref4", CLK_IGNORE_UNUSED, RCC_PLL4CR),
+
+ /* ODF */
+ COMPOSITE(PLL1_P, "pll1_p", PARENT("pll1"), 0,
+ _GATE(RCC_PLL1CR, 4, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL1CFGR2, 0, 7, 0, NULL)),
+
+ COMPOSITE(PLL2_P, "pll2_p", PARENT("pll2"), 0,
+ _GATE(RCC_PLL2CR, 4, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL2CFGR2, 0, 7, 0, NULL)),
+
+ COMPOSITE(PLL2_Q, "pll2_q", PARENT("pll2"), 0,
+ _GATE(RCC_PLL2CR, 5, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL2CFGR2, 8, 7, 0, NULL)),
+
+ COMPOSITE(PLL2_R, "pll2_r", PARENT("pll2"), CLK_IS_CRITICAL,
+ _GATE(RCC_PLL2CR, 6, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL2CFGR2, 16, 7, 0, NULL)),
+
+ COMPOSITE(PLL3_P, "pll3_p", PARENT("pll3"), 0,
+ _GATE(RCC_PLL3CR, 4, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL3CFGR2, 0, 7, 0, NULL)),
+
+ COMPOSITE(PLL3_Q, "pll3_q", PARENT("pll3"), 0,
+ _GATE(RCC_PLL3CR, 5, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL3CFGR2, 8, 7, 0, NULL)),
+
+ COMPOSITE(PLL3_R, "pll3_r", PARENT("pll3"), 0,
+ _GATE(RCC_PLL3CR, 6, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL3CFGR2, 16, 7, 0, NULL)),
+
+ COMPOSITE(PLL4_P, "pll4_p", PARENT("pll4"), 0,
+ _GATE(RCC_PLL4CR, 4, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL4CFGR2, 0, 7, 0, NULL)),
+
+ COMPOSITE(PLL4_Q, "pll4_q", PARENT("pll4"), 0,
+ _GATE(RCC_PLL4CR, 5, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL4CFGR2, 8, 7, 0, NULL)),
+
+ COMPOSITE(PLL4_R, "pll4_r", PARENT("pll4"), 0,
+ _GATE(RCC_PLL4CR, 6, 0),
+ _NO_MUX,
+ _DIV(RCC_PLL4CFGR2, 16, 7, 0, NULL)),
+
+ /* MUX system clocks */
+ MUX(CK_PER, "ck_per", per_src, CLK_OPS_PARENT_ENABLE,
+ RCC_CPERCKSELR, 0, 2, 0),
+
+ MUX(CK_MPU, "ck_mpu", cpu_src, CLK_OPS_PARENT_ENABLE |
+ CLK_IS_CRITICAL, RCC_MPCKSELR, 0, 2, 0),
+
+ COMPOSITE(CK_AXI, "ck_axi", axi_src, CLK_IS_CRITICAL |
+ CLK_OPS_PARENT_ENABLE,
+ _NO_GATE,
+ _MUX(RCC_ASSCKSELR, 0, 2, 0),
+ _DIV(RCC_AXIDIVR, 0, 3, 0, axi_div_table)),
+
+ COMPOSITE(CK_MCU, "ck_mcu", mcu_src, CLK_IS_CRITICAL |
+ CLK_OPS_PARENT_ENABLE,
+ _NO_GATE,
+ _MUX(RCC_MSSCKSELR, 0, 2, 0),
+ _DIV(RCC_MCUDIVR, 0, 4, 0, mcu_div_table)),
+
+ DIV_TABLE(NO_ID, "pclk1", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB1DIVR, 0,
+ 3, CLK_DIVIDER_READ_ONLY, apb_div_table),
+
+ DIV_TABLE(NO_ID, "pclk2", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB2DIVR, 0,
+ 3, CLK_DIVIDER_READ_ONLY, apb_div_table),
+
+ DIV_TABLE(NO_ID, "pclk3", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB3DIVR, 0,
+ 3, CLK_DIVIDER_READ_ONLY, apb_div_table),
+
+ DIV_TABLE(NO_ID, "pclk4", "ck_axi", CLK_IGNORE_UNUSED, RCC_APB4DIVR, 0,
+ 3, CLK_DIVIDER_READ_ONLY, apb_div_table),
+
+ DIV_TABLE(NO_ID, "pclk5", "ck_axi", CLK_IGNORE_UNUSED, RCC_APB5DIVR, 0,
+ 3, CLK_DIVIDER_READ_ONLY, apb_div_table),
+
+ /* Kernel Timers */
+ STM32_CKTIM("ck1_tim", "pclk1", 0, RCC_APB1DIVR, RCC_TIMG1PRER),
+ STM32_CKTIM("ck2_tim", "pclk2", 0, RCC_APB2DIVR, RCC_TIMG2PRER),
+
+ STM32_TIM(TIM2_K, "tim2_k", "ck1_tim", RCC_APB1ENSETR, 0),
+ STM32_TIM(TIM3_K, "tim3_k", "ck1_tim", RCC_APB1ENSETR, 1),
+ STM32_TIM(TIM4_K, "tim4_k", "ck1_tim", RCC_APB1ENSETR, 2),
+ STM32_TIM(TIM5_K, "tim5_k", "ck1_tim", RCC_APB1ENSETR, 3),
+ STM32_TIM(TIM6_K, "tim6_k", "ck1_tim", RCC_APB1ENSETR, 4),
+ STM32_TIM(TIM7_K, "tim7_k", "ck1_tim", RCC_APB1ENSETR, 5),
+ STM32_TIM(TIM12_K, "tim12_k", "ck1_tim", RCC_APB1ENSETR, 6),
+ STM32_TIM(TIM13_K, "tim13_k", "ck1_tim", RCC_APB1ENSETR, 7),
+ STM32_TIM(TIM14_K, "tim14_k", "ck1_tim", RCC_APB1ENSETR, 8),
+ STM32_TIM(TIM1_K, "tim1_k", "ck2_tim", RCC_APB2ENSETR, 0),
+ STM32_TIM(TIM8_K, "tim8_k", "ck2_tim", RCC_APB2ENSETR, 1),
+ STM32_TIM(TIM15_K, "tim15_k", "ck2_tim", RCC_APB2ENSETR, 2),
+ STM32_TIM(TIM16_K, "tim16_k", "ck2_tim", RCC_APB2ENSETR, 3),
+ STM32_TIM(TIM17_K, "tim17_k", "ck2_tim", RCC_APB2ENSETR, 4),
+
+ /* Peripheral clocks */
+ PCLK(TIM2, "tim2", "pclk1", CLK_IGNORE_UNUSED, G_TIM2),
+ PCLK(TIM3, "tim3", "pclk1", CLK_IGNORE_UNUSED, G_TIM3),
+ PCLK(TIM4, "tim4", "pclk1", CLK_IGNORE_UNUSED, G_TIM4),
+ PCLK(TIM5, "tim5", "pclk1", CLK_IGNORE_UNUSED, G_TIM5),
+ PCLK(TIM6, "tim6", "pclk1", CLK_IGNORE_UNUSED, G_TIM6),
+ PCLK(TIM7, "tim7", "pclk1", CLK_IGNORE_UNUSED, G_TIM7),
+ PCLK(TIM12, "tim12", "pclk1", CLK_IGNORE_UNUSED, G_TIM12),
+ PCLK(TIM13, "tim13", "pclk1", CLK_IGNORE_UNUSED, G_TIM13),
+ PCLK(TIM14, "tim14", "pclk1", CLK_IGNORE_UNUSED, G_TIM14),
+ PCLK(LPTIM1, "lptim1", "pclk1", 0, G_LPTIM1),
+ PCLK(SPI2, "spi2", "pclk1", 0, G_SPI2),
+ PCLK(SPI3, "spi3", "pclk1", 0, G_SPI3),
+ PCLK(USART2, "usart2", "pclk1", 0, G_USART2),
+ PCLK(USART3, "usart3", "pclk1", 0, G_USART3),
+ PCLK(UART4, "uart4", "pclk1", 0, G_UART4),
+ PCLK(UART5, "uart5", "pclk1", 0, G_UART5),
+ PCLK(UART7, "uart7", "pclk1", 0, G_UART7),
+ PCLK(UART8, "uart8", "pclk1", 0, G_UART8),
+ PCLK(I2C1, "i2c1", "pclk1", 0, G_I2C1),
+ PCLK(I2C2, "i2c2", "pclk1", 0, G_I2C2),
+ PCLK(I2C3, "i2c3", "pclk1", 0, G_I2C3),
+ PCLK(I2C5, "i2c5", "pclk1", 0, G_I2C5),
+ PCLK(SPDIF, "spdif", "pclk1", 0, G_SPDIF),
+ PCLK(CEC, "cec", "pclk1", 0, G_CEC),
+ PCLK(DAC12, "dac12", "pclk1", 0, G_DAC12),
+ PCLK(MDIO, "mdio", "pclk1", 0, G_MDIO),
+ PCLK(TIM1, "tim1", "pclk2", CLK_IGNORE_UNUSED, G_TIM1),
+ PCLK(TIM8, "tim8", "pclk2", CLK_IGNORE_UNUSED, G_TIM8),
+ PCLK(TIM15, "tim15", "pclk2", CLK_IGNORE_UNUSED, G_TIM15),
+ PCLK(TIM16, "tim16", "pclk2", CLK_IGNORE_UNUSED, G_TIM16),
+ PCLK(TIM17, "tim17", "pclk2", CLK_IGNORE_UNUSED, G_TIM17),
+ PCLK(SPI1, "spi1", "pclk2", 0, G_SPI1),
+ PCLK(SPI4, "spi4", "pclk2", 0, G_SPI4),
+ PCLK(SPI5, "spi5", "pclk2", 0, G_SPI5),
+ PCLK(USART6, "usart6", "pclk2", 0, G_USART6),
+ PCLK(SAI1, "sai1", "pclk2", 0, G_SAI1),
+ PCLK(SAI2, "sai2", "pclk2", 0, G_SAI2),
+ PCLK(SAI3, "sai3", "pclk2", 0, G_SAI3),
+ PCLK(DFSDM, "dfsdm", "pclk2", 0, G_DFSDM),
+ PCLK(FDCAN, "fdcan", "pclk2", 0, G_FDCAN),
+ PCLK(LPTIM2, "lptim2", "pclk3", 0, G_LPTIM2),
+ PCLK(LPTIM3, "lptim3", "pclk3", 0, G_LPTIM3),
+ PCLK(LPTIM4, "lptim4", "pclk3", 0, G_LPTIM4),
+ PCLK(LPTIM5, "lptim5", "pclk3", 0, G_LPTIM5),
+ PCLK(SAI4, "sai4", "pclk3", 0, G_SAI4),
+ PCLK(SYSCFG, "syscfg", "pclk3", 0, G_SYSCFG),
+ PCLK(VREF, "vref", "pclk3", 13, G_VREF),
+ PCLK(TMPSENS, "tmpsens", "pclk3", 0, G_TMPSENS),
+ PCLK(PMBCTRL, "pmbctrl", "pclk3", 0, G_PMBCTRL),
+ PCLK(HDP, "hdp", "pclk3", 0, G_HDP),
+ PCLK(LTDC, "ltdc", "pclk4", 0, G_LTDC),
+ PCLK(DSI, "dsi", "pclk4", 0, G_DSI),
+ PCLK(IWDG2, "iwdg2", "pclk4", 0, G_IWDG2),
+ PCLK(USBPHY, "usbphy", "pclk4", 0, G_USBPHY),
+ PCLK(STGENRO, "stgenro", "pclk4", 0, G_STGENRO),
+ PCLK(SPI6, "spi6", "pclk5", 0, G_SPI6),
+ PCLK(I2C4, "i2c4", "pclk5", 0, G_I2C4),
+ PCLK(I2C6, "i2c6", "pclk5", 0, G_I2C6),
+ PCLK(USART1, "usart1", "pclk5", 0, G_USART1),
+ PCLK(RTCAPB, "rtcapb", "pclk5", CLK_IGNORE_UNUSED |
+ CLK_IS_CRITICAL, G_RTCAPB),
+ PCLK(TZC, "tzc", "pclk5", CLK_IGNORE_UNUSED, G_TZC),
+ PCLK(TZPC, "tzpc", "pclk5", CLK_IGNORE_UNUSED, G_TZPC),
+ PCLK(IWDG1, "iwdg1", "pclk5", 0, G_IWDG1),
+ PCLK(BSEC, "bsec", "pclk5", CLK_IGNORE_UNUSED, G_BSEC),
+ PCLK(STGEN, "stgen", "pclk5", CLK_IGNORE_UNUSED, G_STGEN),
+ PCLK(DMA1, "dma1", "ck_mcu", 0, G_DMA1),
+ PCLK(DMA2, "dma2", "ck_mcu", 0, G_DMA2),
+ PCLK(DMAMUX, "dmamux", "ck_mcu", 0, G_DMAMUX),
+ PCLK(ADC12, "adc12", "ck_mcu", 0, G_ADC12),
+ PCLK(USBO, "usbo", "ck_mcu", 0, G_USBO),
+ PCLK(SDMMC3, "sdmmc3", "ck_mcu", 0, G_SDMMC3),
+ PCLK(DCMI, "dcmi", "ck_mcu", 0, G_DCMI),
+ PCLK(CRYP2, "cryp2", "ck_mcu", 0, G_CRYP2),
+ PCLK(HASH2, "hash2", "ck_mcu", 0, G_HASH2),
+ PCLK(RNG2, "rng2", "ck_mcu", 0, G_RNG2),
+ PCLK(CRC2, "crc2", "ck_mcu", 0, G_CRC2),
+ PCLK(HSEM, "hsem", "ck_mcu", 0, G_HSEM),
+ PCLK(IPCC, "ipcc", "ck_mcu", 0, G_IPCC),
+ PCLK(GPIOA, "gpioa", "ck_mcu", 0, G_GPIOA),
+ PCLK(GPIOB, "gpiob", "ck_mcu", 0, G_GPIOB),
+ PCLK(GPIOC, "gpioc", "ck_mcu", 0, G_GPIOC),
+ PCLK(GPIOD, "gpiod", "ck_mcu", 0, G_GPIOD),
+ PCLK(GPIOE, "gpioe", "ck_mcu", 0, G_GPIOE),
+ PCLK(GPIOF, "gpiof", "ck_mcu", 0, G_GPIOF),
+ PCLK(GPIOG, "gpiog", "ck_mcu", 0, G_GPIOG),
+ PCLK(GPIOH, "gpioh", "ck_mcu", 0, G_GPIOH),
+ PCLK(GPIOI, "gpioi", "ck_mcu", 0, G_GPIOI),
+ PCLK(GPIOJ, "gpioj", "ck_mcu", 0, G_GPIOJ),
+ PCLK(GPIOK, "gpiok", "ck_mcu", 0, G_GPIOK),
+ PCLK(GPIOZ, "gpioz", "ck_axi", CLK_IGNORE_UNUSED, G_GPIOZ),
+ PCLK(CRYP1, "cryp1", "ck_axi", CLK_IGNORE_UNUSED, G_CRYP1),
+ PCLK(HASH1, "hash1", "ck_axi", CLK_IGNORE_UNUSED, G_HASH1),
+ PCLK(RNG1, "rng1", "ck_axi", 0, G_RNG1),
+ PCLK(BKPSRAM, "bkpsram", "ck_axi", CLK_IGNORE_UNUSED, G_BKPSRAM),
+ PCLK(MDMA, "mdma", "ck_axi", 0, G_MDMA),
+ PCLK(GPU, "gpu", "ck_axi", 0, G_GPU),
+ PCLK(ETHTX, "ethtx", "ck_axi", 0, G_ETHTX),
+ PCLK(ETHRX, "ethrx", "ck_axi", 0, G_ETHRX),
+ PCLK(ETHMAC, "ethmac", "ck_axi", 0, G_ETHMAC),
+ PCLK(FMC, "fmc", "ck_axi", CLK_IGNORE_UNUSED, G_FMC),
+ PCLK(QSPI, "qspi", "ck_axi", CLK_IGNORE_UNUSED, G_QSPI),
+ PCLK(SDMMC1, "sdmmc1", "ck_axi", 0, G_SDMMC1),
+ PCLK(SDMMC2, "sdmmc2", "ck_axi", 0, G_SDMMC2),
+ PCLK(CRC1, "crc1", "ck_axi", 0, G_CRC1),
+ PCLK(USBH, "usbh", "ck_axi", 0, G_USBH),
+ PCLK(ETHSTP, "ethstp", "ck_axi", 0, G_ETHSTP),
+
+ /* Kernel clocks */
+ KCLK(SDMMC1_K, "sdmmc1_k", sdmmc12_src, 0, G_SDMMC1, M_SDMMC12),
+ KCLK(SDMMC2_K, "sdmmc2_k", sdmmc12_src, 0, G_SDMMC2, M_SDMMC12),
+ KCLK(SDMMC3_K, "sdmmc3_k", sdmmc3_src, 0, G_SDMMC3, M_SDMMC3),
+ KCLK(FMC_K, "fmc_k", fmc_src, 0, G_FMC, M_FMC),
+ KCLK(QSPI_K, "qspi_k", qspi_src, 0, G_QSPI, M_QSPI),
+ KCLK(RNG1_K, "rng1_k", rng_src, 0, G_RNG1, M_RNG1),
+ KCLK(RNG2_K, "rng2_k", rng_src, 0, G_RNG2, M_RNG2),
+ KCLK(USBPHY_K, "usbphy_k", usbphy_src, 0, G_USBPHY, M_USBPHY),
+ KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IGNORE_UNUSED,
+ G_STGEN, M_STGEN),
+ KCLK(SPDIF_K, "spdif_k", spdif_src, 0, G_SPDIF, M_SPDIF),
+ KCLK(SPI1_K, "spi1_k", spi123_src, 0, G_SPI1, M_SPI1),
+ KCLK(SPI2_K, "spi2_k", spi123_src, 0, G_SPI2, M_SPI23),
+ KCLK(SPI3_K, "spi3_k", spi123_src, 0, G_SPI3, M_SPI23),
+ KCLK(SPI4_K, "spi4_k", spi45_src, 0, G_SPI4, M_SPI45),
+ KCLK(SPI5_K, "spi5_k", spi45_src, 0, G_SPI5, M_SPI45),
+ KCLK(SPI6_K, "spi6_k", spi6_src, 0, G_SPI6, M_SPI6),
+ KCLK(CEC_K, "cec_k", cec_src, 0, G_CEC, M_CEC),
+ KCLK(I2C1_K, "i2c1_k", i2c12_src, 0, G_I2C1, M_I2C12),
+ KCLK(I2C2_K, "i2c2_k", i2c12_src, 0, G_I2C2, M_I2C12),
+ KCLK(I2C3_K, "i2c3_k", i2c35_src, 0, G_I2C3, M_I2C35),
+ KCLK(I2C5_K, "i2c5_k", i2c35_src, 0, G_I2C5, M_I2C35),
+ KCLK(I2C4_K, "i2c4_k", i2c46_src, 0, G_I2C4, M_I2C46),
+ KCLK(I2C6_K, "i2c6_k", i2c46_src, 0, G_I2C6, M_I2C46),
+ KCLK(LPTIM1_K, "lptim1_k", lptim1_src, 0, G_LPTIM1, M_LPTIM1),
+ KCLK(LPTIM2_K, "lptim2_k", lptim23_src, 0, G_LPTIM2, M_LPTIM23),
+ KCLK(LPTIM3_K, "lptim3_k", lptim23_src, 0, G_LPTIM3, M_LPTIM23),
+ KCLK(LPTIM4_K, "lptim4_k", lptim45_src, 0, G_LPTIM4, M_LPTIM45),
+ KCLK(LPTIM5_K, "lptim5_k", lptim45_src, 0, G_LPTIM5, M_LPTIM45),
+ KCLK(USART1_K, "usart1_k", usart1_src, 0, G_USART1, M_USART1),
+ KCLK(USART2_K, "usart2_k", usart234578_src, 0, G_USART2, M_UART24),
+ KCLK(USART3_K, "usart3_k", usart234578_src, 0, G_USART3, M_UART35),
+ KCLK(UART4_K, "uart4_k", usart234578_src, 0, G_UART4, M_UART24),
+ KCLK(UART5_K, "uart5_k", usart234578_src, 0, G_UART5, M_UART35),
+ KCLK(USART6_K, "uart6_k", usart6_src, 0, G_USART6, M_USART6),
+ KCLK(UART7_K, "uart7_k", usart234578_src, 0, G_UART7, M_UART78),
+ KCLK(UART8_K, "uart8_k", usart234578_src, 0, G_UART8, M_UART78),
+ KCLK(FDCAN_K, "fdcan_k", fdcan_src, 0, G_FDCAN, M_FDCAN),
+ KCLK(SAI1_K, "sai1_k", sai_src, 0, G_SAI1, M_SAI1),
+ KCLK(SAI2_K, "sai2_k", sai2_src, 0, G_SAI2, M_SAI2),
+ KCLK(SAI3_K, "sai3_k", sai_src, 0, G_SAI2, M_SAI3),
+ KCLK(SAI4_K, "sai4_k", sai_src, 0, G_SAI2, M_SAI4),
+ KCLK(ADC12_K, "adc12_k", adc12_src, 0, G_ADC12, M_ADC12),
+ KCLK(DSI_K, "dsi_k", dsi_src, 0, G_DSI, M_DSI),
+ KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1),
+ KCLK(USBO_K, "usbo_k", usbo_src, 0, G_USBO, M_USBO),
+ KCLK(ETHCK_K, "ethck_k", eth_src, 0, G_ETHCK, M_ETHCK),
+
+ /* Particulary Kernel Clocks (no mux or no gate) */
+ MGATE_MP1(DFSDM_K, "dfsdm_k", "ck_mcu", 0, G_DFSDM),
+ MGATE_MP1(DSI_PX, "dsi_px", "pll4_q", CLK_SET_RATE_PARENT, G_DSI),
+ MGATE_MP1(LTDC_PX, "ltdc_px", "pll4_q", CLK_SET_RATE_PARENT, G_LTDC),
+ MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU),
+ MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12),
+
+ COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE,
+ _NO_GATE,
+ _MMUX(M_ETHCK),
+ _DIV(RCC_ETHCKSELR, 4, 4, CLK_DIVIDER_ALLOW_ZERO, NULL)),
+
+ /* RTC clock */
+ DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 7,
+ CLK_DIVIDER_ALLOW_ZERO),
+
+ COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE |
+ CLK_SET_RATE_PARENT,
+ _GATE(RCC_BDCR, 20, 0),
+ _MUX(RCC_BDCR, 16, 2, 0),
+ _NO_DIV),
+
+ /* MCO clocks */
+ COMPOSITE(CK_MCO1, "ck_mco1", mco1_src, CLK_OPS_PARENT_ENABLE |
+ CLK_SET_RATE_NO_REPARENT,
+ _GATE(RCC_MCO1CFGR, 12, 0),
+ _MUX(RCC_MCO1CFGR, 0, 3, 0),
+ _DIV(RCC_MCO1CFGR, 4, 4, 0, NULL)),
+
+ COMPOSITE(CK_MCO2, "ck_mco2", mco2_src, CLK_OPS_PARENT_ENABLE |
+ CLK_SET_RATE_NO_REPARENT,
+ _GATE(RCC_MCO2CFGR, 12, 0),
+ _MUX(RCC_MCO2CFGR, 0, 3, 0),
+ _DIV(RCC_MCO2CFGR, 4, 4, 0, NULL)),
+
+ /* Debug clocks */
+ FIXED_FACTOR(NO_ID, "ck_axi_div2", "ck_axi", 0, 1, 2),
+
+ GATE(DBG, "ck_apb_dbg", "ck_axi_div2", 0, RCC_DBGCFGR, 8, 0),
+
+ GATE(CK_DBG, "ck_sys_dbg", "ck_axi", 0, RCC_DBGCFGR, 8, 0),
+
+ COMPOSITE(CK_TRACE, "ck_trace", ck_trace_src, CLK_OPS_PARENT_ENABLE,
+ _GATE(RCC_DBGCFGR, 9, 0),
+ _NO_MUX,
+ _DIV(RCC_DBGCFGR, 0, 3, 0, ck_trace_div_table)),
+};
+
+struct stm32_clock_match_data {
+ const struct clock_config *cfg;
+ unsigned int num;
+ unsigned int maxbinding;
+};
+
+static struct stm32_clock_match_data stm32mp1_data = {
+ .cfg = stm32mp1_clock_cfg,
+ .num = ARRAY_SIZE(stm32mp1_clock_cfg),
+ .maxbinding = STM32MP1_LAST_CLK,
+};
+
+static const struct of_device_id stm32mp1_match_data[] = {
+ {
+ .compatible = "st,stm32mp1-rcc",
+ .data = &stm32mp1_data,
+ },
+ { }
+};
+
+static int stm32_register_hw_clk(struct device *dev,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, spinlock_t *lock,
+ const struct clock_config *cfg)
+{
+ static struct clk_hw **hws;
+ struct clk_hw *hw = ERR_PTR(-ENOENT);
+
+ hws = clk_data->hws;
+
+ if (cfg->func)
+ hw = (*cfg->func)(dev, clk_data, base, lock, cfg);
+
+ if (IS_ERR(hw)) {
+ pr_err("Unable to register %s\n", cfg->name);
+ return PTR_ERR(hw);
+ }
+
+ if (cfg->id != NO_ID)
+ hws[cfg->id] = hw;
+
+ return 0;
+}
+
+static int stm32_rcc_init(struct device_node *np,
+ void __iomem *base,
+ const struct of_device_id *match_data)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **hws;
+ const struct of_device_id *match;
+ const struct stm32_clock_match_data *data;
+ int err, n, max_binding;
+
+ match = of_match_node(match_data, np);
+ if (!match) {
+ pr_err("%s: match data not found\n", __func__);
+ return -ENODEV;
+ }
+
+ data = match->data;
+
+ max_binding = data->maxbinding;
+
+ clk_data = kzalloc(sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * max_binding,
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = max_binding;
+
+ hws = clk_data->hws;
+
+ for (n = 0; n < max_binding; n++)
+ hws[n] = ERR_PTR(-ENOENT);
+
+ for (n = 0; n < data->num; n++) {
+ err = stm32_register_hw_clk(NULL, clk_data, base, &rlock,
+ &data->cfg[n]);
+ if (err) {
+ pr_err("%s: can't register %s\n", __func__,
+ data->cfg[n].name);
+
+ kfree(clk_data);
+
+ return err;
+ }
+ }
+
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+}
+
+static void stm32mp1_rcc_init(struct device_node *np)
+{
+ void __iomem *base;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%s: unable to map resource", np->name);
+ of_node_put(np);
+ return;
+ }
+
+ if (stm32_rcc_init(np, base, stm32mp1_match_data)) {
+ iounmap(base);
+ of_node_put(np);
+ }
+}
+
+CLK_OF_DECLARE_DRIVER(stm32mp1_rcc, "st,stm32mp1-rcc", stm32mp1_rcc_init);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 076d4244d672..ea67ac81c6f9 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2375,6 +2375,9 @@ static int clk_core_get_phase(struct clk_core *core)
int ret;
clk_prepare_lock();
+ /* Always try to update cached phase if possible */
+ if (core->ops->get_phase)
+ core->phase = core->ops->get_phase(core->hw);
ret = core->phase;
clk_prepare_unlock();
@@ -2491,19 +2494,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
return 0;
}
-
-
-static int clk_summary_open(struct inode *inode, struct file *file)
-{
- return single_open(file, clk_summary_show, inode->i_private);
-}
-
-static const struct file_operations clk_summary_fops = {
- .open = clk_summary_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(clk_summary);
static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
{
@@ -2537,7 +2528,7 @@ static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
seq_putc(s, '}');
}
-static int clk_dump(struct seq_file *s, void *data)
+static int clk_dump_show(struct seq_file *s, void *data)
{
struct clk_core *c;
bool first_node = true;
@@ -2560,19 +2551,7 @@ static int clk_dump(struct seq_file *s, void *data)
seq_puts(s, "}\n");
return 0;
}
-
-
-static int clk_dump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, clk_dump, inode->i_private);
-}
-
-static const struct file_operations clk_dump_fops = {
- .open = clk_dump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(clk_dump);
static const struct {
unsigned long flag;
@@ -2594,7 +2573,7 @@ static const struct {
#undef ENTRY
};
-static int clk_flags_dump(struct seq_file *s, void *data)
+static int clk_flags_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
unsigned long flags = core->flags;
@@ -2613,20 +2592,9 @@ static int clk_flags_dump(struct seq_file *s, void *data)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(clk_flags);
-static int clk_flags_open(struct inode *inode, struct file *file)
-{
- return single_open(file, clk_flags_dump, inode->i_private);
-}
-
-static const struct file_operations clk_flags_fops = {
- .open = clk_flags_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int possible_parents_dump(struct seq_file *s, void *data)
+static int possible_parents_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
int i;
@@ -2638,18 +2606,7 @@ static int possible_parents_dump(struct seq_file *s, void *data)
return 0;
}
-
-static int possible_parents_open(struct inode *inode, struct file *file)
-{
- return single_open(file, possible_parents_dump, inode->i_private);
-}
-
-static const struct file_operations possible_parents_fops = {
- .open = possible_parents_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(possible_parents);
static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
{
@@ -2933,6 +2890,17 @@ static int __clk_core_init(struct clk_core *core)
}
/*
+ * optional platform-specific magic
+ *
+ * The .init callback is not used by any of the basic clock types, but
+ * exists for weird hardware that must perform initialization magic.
+ * Please consider other ways of solving initialization problems before
+ * using this callback, as its use is discouraged.
+ */
+ if (core->ops->init)
+ core->ops->init(core->hw);
+
+ /*
* Set clk's accuracy. The preferred method is to use
* .recalc_accuracy. For simple clocks and lazy developers the default
* fallback is to use the parent's accuracy. If a clock doesn't have a
@@ -3009,17 +2977,6 @@ static int __clk_core_init(struct clk_core *core)
}
}
- /*
- * optional platform-specific magic
- *
- * The .init callback is not used by any of the basic clock types, but
- * exists for weird hardware that must perform initialization magic.
- * Please consider other ways of solving initialization problems before
- * using this callback, as its use is discouraged.
- */
- if (core->ops->init)
- core->ops->init(core->hw);
-
kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
diff --git a/drivers/clk/davinci/Makefile b/drivers/clk/davinci/Makefile
new file mode 100644
index 000000000000..11178b79b483
--- /dev/null
+++ b/drivers/clk/davinci/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ifeq ($(CONFIG_COMMON_CLK), y)
+obj-$(CONFIG_ARCH_DAVINCI_DA8XX) += da8xx-cfgchip.o
+
+obj-y += pll.o
+obj-$(CONFIG_ARCH_DAVINCI_DA830) += pll-da830.o
+obj-$(CONFIG_ARCH_DAVINCI_DA850) += pll-da850.o
+obj-$(CONFIG_ARCH_DAVINCI_DM355) += pll-dm355.o
+obj-$(CONFIG_ARCH_DAVINCI_DM365) += pll-dm365.o
+obj-$(CONFIG_ARCH_DAVINCI_DM644x) += pll-dm644x.o
+obj-$(CONFIG_ARCH_DAVINCI_DM646x) += pll-dm646x.o
+
+obj-y += psc.o
+obj-$(CONFIG_ARCH_DAVINCI_DA830) += psc-da830.o
+obj-$(CONFIG_ARCH_DAVINCI_DA850) += psc-da850.o
+obj-$(CONFIG_ARCH_DAVINCI_DM355) += psc-dm355.o
+obj-$(CONFIG_ARCH_DAVINCI_DM365) += psc-dm365.o
+obj-$(CONFIG_ARCH_DAVINCI_DM644x) += psc-dm644x.o
+obj-$(CONFIG_ARCH_DAVINCI_DM646x) += psc-dm646x.o
+endif
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
new file mode 100644
index 000000000000..c971111d2601
--- /dev/null
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -0,0 +1,790 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock driver for DA8xx/AM17xx/AM18xx/OMAP-L13x CFGCHIP
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/mfd/da8xx-cfgchip.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_data/clk-da8xx-cfgchip.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* --- Gate clocks --- */
+
+#define DA8XX_GATE_CLOCK_IS_DIV4P5 BIT(1)
+
+struct da8xx_cfgchip_gate_clk_info {
+ const char *name;
+ u32 cfgchip;
+ u32 bit;
+ u32 flags;
+};
+
+struct da8xx_cfgchip_gate_clk {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u32 reg;
+ u32 mask;
+};
+
+#define to_da8xx_cfgchip_gate_clk(_hw) \
+ container_of((_hw), struct da8xx_cfgchip_gate_clk, hw)
+
+static int da8xx_cfgchip_gate_clk_enable(struct clk_hw *hw)
+{
+ struct da8xx_cfgchip_gate_clk *clk = to_da8xx_cfgchip_gate_clk(hw);
+
+ return regmap_write_bits(clk->regmap, clk->reg, clk->mask, clk->mask);
+}
+
+static void da8xx_cfgchip_gate_clk_disable(struct clk_hw *hw)
+{
+ struct da8xx_cfgchip_gate_clk *clk = to_da8xx_cfgchip_gate_clk(hw);
+
+ regmap_write_bits(clk->regmap, clk->reg, clk->mask, 0);
+}
+
+static int da8xx_cfgchip_gate_clk_is_enabled(struct clk_hw *hw)
+{
+ struct da8xx_cfgchip_gate_clk *clk = to_da8xx_cfgchip_gate_clk(hw);
+ unsigned int val;
+
+ regmap_read(clk->regmap, clk->reg, &val);
+
+ return !!(val & clk->mask);
+}
+
+static unsigned long da8xx_cfgchip_div4p5_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ /* this clock divides by 4.5 */
+ return parent_rate * 2 / 9;
+}
+
+static const struct clk_ops da8xx_cfgchip_gate_clk_ops = {
+ .enable = da8xx_cfgchip_gate_clk_enable,
+ .disable = da8xx_cfgchip_gate_clk_disable,
+ .is_enabled = da8xx_cfgchip_gate_clk_is_enabled,
+};
+
+static const struct clk_ops da8xx_cfgchip_div4p5_clk_ops = {
+ .enable = da8xx_cfgchip_gate_clk_enable,
+ .disable = da8xx_cfgchip_gate_clk_disable,
+ .is_enabled = da8xx_cfgchip_gate_clk_is_enabled,
+ .recalc_rate = da8xx_cfgchip_div4p5_recalc_rate,
+};
+
+static struct da8xx_cfgchip_gate_clk * __init
+da8xx_cfgchip_gate_clk_register(struct device *dev,
+ const struct da8xx_cfgchip_gate_clk_info *info,
+ struct regmap *regmap)
+{
+ struct clk *parent;
+ const char *parent_name;
+ struct da8xx_cfgchip_gate_clk *gate;
+ struct clk_init_data init;
+ int ret;
+
+ parent = devm_clk_get(dev, NULL);
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = info->name;
+ if (info->flags & DA8XX_GATE_CLOCK_IS_DIV4P5)
+ init.ops = &da8xx_cfgchip_div4p5_clk_ops;
+ else
+ init.ops = &da8xx_cfgchip_gate_clk_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ gate->hw.init = &init;
+ gate->regmap = regmap;
+ gate->reg = info->cfgchip;
+ gate->mask = info->bit;
+
+ ret = devm_clk_hw_register(dev, &gate->hw);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return gate;
+}
+
+static const struct da8xx_cfgchip_gate_clk_info da8xx_tbclksync_info __initconst = {
+ .name = "ehrpwm_tbclk",
+ .cfgchip = CFGCHIP(1),
+ .bit = CFGCHIP1_TBCLKSYNC,
+};
+
+static int __init da8xx_cfgchip_register_tbclk(struct device *dev,
+ struct regmap *regmap)
+{
+ struct da8xx_cfgchip_gate_clk *gate;
+
+ gate = da8xx_cfgchip_gate_clk_register(dev, &da8xx_tbclksync_info,
+ regmap);
+ if (IS_ERR(gate))
+ return PTR_ERR(gate);
+
+ clk_hw_register_clkdev(&gate->hw, "tbclk", "ehrpwm.0");
+ clk_hw_register_clkdev(&gate->hw, "tbclk", "ehrpwm.1");
+
+ return 0;
+}
+
+static const struct da8xx_cfgchip_gate_clk_info da8xx_div4p5ena_info __initconst = {
+ .name = "div4.5",
+ .cfgchip = CFGCHIP(3),
+ .bit = CFGCHIP3_DIV45PENA,
+ .flags = DA8XX_GATE_CLOCK_IS_DIV4P5,
+};
+
+static int __init da8xx_cfgchip_register_div4p5(struct device *dev,
+ struct regmap *regmap)
+{
+ struct da8xx_cfgchip_gate_clk *gate;
+
+ gate = da8xx_cfgchip_gate_clk_register(dev, &da8xx_div4p5ena_info, regmap);
+ if (IS_ERR(gate))
+ return PTR_ERR(gate);
+
+ return 0;
+}
+
+static int __init
+of_da8xx_cfgchip_gate_clk_init(struct device *dev,
+ const struct da8xx_cfgchip_gate_clk_info *info,
+ struct regmap *regmap)
+{
+ struct da8xx_cfgchip_gate_clk *gate;
+
+ gate = da8xx_cfgchip_gate_clk_register(dev, info, regmap);
+ if (IS_ERR(gate))
+ return PTR_ERR(gate);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, gate);
+}
+
+static int __init of_da8xx_tbclksync_init(struct device *dev,
+ struct regmap *regmap)
+{
+ return of_da8xx_cfgchip_gate_clk_init(dev, &da8xx_tbclksync_info, regmap);
+}
+
+static int __init of_da8xx_div4p5ena_init(struct device *dev,
+ struct regmap *regmap)
+{
+ return of_da8xx_cfgchip_gate_clk_init(dev, &da8xx_div4p5ena_info, regmap);
+}
+
+/* --- MUX clocks --- */
+
+struct da8xx_cfgchip_mux_clk_info {
+ const char *name;
+ const char *parent0;
+ const char *parent1;
+ u32 cfgchip;
+ u32 bit;
+};
+
+struct da8xx_cfgchip_mux_clk {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u32 reg;
+ u32 mask;
+};
+
+#define to_da8xx_cfgchip_mux_clk(_hw) \
+ container_of((_hw), struct da8xx_cfgchip_mux_clk, hw)
+
+static int da8xx_cfgchip_mux_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct da8xx_cfgchip_mux_clk *clk = to_da8xx_cfgchip_mux_clk(hw);
+ unsigned int val = index ? clk->mask : 0;
+
+ return regmap_write_bits(clk->regmap, clk->reg, clk->mask, val);
+}
+
+static u8 da8xx_cfgchip_mux_clk_get_parent(struct clk_hw *hw)
+{
+ struct da8xx_cfgchip_mux_clk *clk = to_da8xx_cfgchip_mux_clk(hw);
+ unsigned int val;
+
+ regmap_read(clk->regmap, clk->reg, &val);
+
+ return (val & clk->mask) ? 1 : 0;
+}
+
+static const struct clk_ops da8xx_cfgchip_mux_clk_ops = {
+ .set_parent = da8xx_cfgchip_mux_clk_set_parent,
+ .get_parent = da8xx_cfgchip_mux_clk_get_parent,
+};
+
+static struct da8xx_cfgchip_mux_clk * __init
+da8xx_cfgchip_mux_clk_register(struct device *dev,
+ const struct da8xx_cfgchip_mux_clk_info *info,
+ struct regmap *regmap)
+{
+ const char * const parent_names[] = { info->parent0, info->parent1 };
+ struct da8xx_cfgchip_mux_clk *mux;
+ struct clk_init_data init;
+ int ret;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = info->name;
+ init.ops = &da8xx_cfgchip_mux_clk_ops;
+ init.parent_names = parent_names;
+ init.num_parents = 2;
+ init.flags = 0;
+
+ mux->hw.init = &init;
+ mux->regmap = regmap;
+ mux->reg = info->cfgchip;
+ mux->mask = info->bit;
+
+ ret = devm_clk_hw_register(dev, &mux->hw);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return mux;
+}
+
+static const struct da8xx_cfgchip_mux_clk_info da850_async1_info __initconst = {
+ .name = "async1",
+ .parent0 = "pll0_sysclk3",
+ .parent1 = "div4.5",
+ .cfgchip = CFGCHIP(3),
+ .bit = CFGCHIP3_EMA_CLKSRC,
+};
+
+static int __init da8xx_cfgchip_register_async1(struct device *dev,
+ struct regmap *regmap)
+{
+ struct da8xx_cfgchip_mux_clk *mux;
+
+ mux = da8xx_cfgchip_mux_clk_register(dev, &da850_async1_info, regmap);
+ if (IS_ERR(mux))
+ return PTR_ERR(mux);
+
+ clk_hw_register_clkdev(&mux->hw, "async1", "da850-psc0");
+
+ return 0;
+}
+
+static const struct da8xx_cfgchip_mux_clk_info da850_async3_info __initconst = {
+ .name = "async3",
+ .parent0 = "pll0_sysclk2",
+ .parent1 = "pll1_sysclk2",
+ .cfgchip = CFGCHIP(3),
+ .bit = CFGCHIP3_ASYNC3_CLKSRC,
+};
+
+static int __init da850_cfgchip_register_async3(struct device *dev,
+ struct regmap *regmap)
+{
+ struct da8xx_cfgchip_mux_clk *mux;
+ struct clk_hw *parent;
+
+ mux = da8xx_cfgchip_mux_clk_register(dev, &da850_async3_info, regmap);
+ if (IS_ERR(mux))
+ return PTR_ERR(mux);
+
+ clk_hw_register_clkdev(&mux->hw, "async3", "da850-psc1");
+
+ /* pll1_sysclk2 is not affected by CPU scaling, so use it for async3 */
+ parent = clk_hw_get_parent_by_index(&mux->hw, 1);
+ if (parent)
+ clk_set_parent(mux->hw.clk, parent->clk);
+ else
+ dev_warn(dev, "Failed to find async3 parent clock\n");
+
+ return 0;
+}
+
+static int __init
+of_da8xx_cfgchip_init_mux_clock(struct device *dev,
+ const struct da8xx_cfgchip_mux_clk_info *info,
+ struct regmap *regmap)
+{
+ struct da8xx_cfgchip_mux_clk *mux;
+
+ mux = da8xx_cfgchip_mux_clk_register(dev, info, regmap);
+ if (IS_ERR(mux))
+ return PTR_ERR(mux);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &mux->hw);
+}
+
+static int __init of_da850_async1_init(struct device *dev, struct regmap *regmap)
+{
+ return of_da8xx_cfgchip_init_mux_clock(dev, &da850_async1_info, regmap);
+}
+
+static int __init of_da850_async3_init(struct device *dev, struct regmap *regmap)
+{
+ return of_da8xx_cfgchip_init_mux_clock(dev, &da850_async3_info, regmap);
+}
+
+/* --- USB 2.0 PHY clock --- */
+
+struct da8xx_usb0_clk48 {
+ struct clk_hw hw;
+ struct clk *fck;
+ struct regmap *regmap;
+};
+
+#define to_da8xx_usb0_clk48(_hw) \
+ container_of((_hw), struct da8xx_usb0_clk48, hw)
+
+static int da8xx_usb0_clk48_prepare(struct clk_hw *hw)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+
+ /* The USB 2.0 PSC clock is only needed temporarily during the USB 2.0
+ * PHY clock enable, but since clk_prepare() can't be called in an
+ * atomic context (i.e. in clk_enable()), we have to prepare it here.
+ */
+ return clk_prepare(usb0->fck);
+}
+
+static void da8xx_usb0_clk48_unprepare(struct clk_hw *hw)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+
+ clk_unprepare(usb0->fck);
+}
+
+static int da8xx_usb0_clk48_enable(struct clk_hw *hw)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+ unsigned int mask, val;
+ int ret;
+
+ /* Locking the USB 2.O PLL requires that the USB 2.O PSC is enabled
+ * temporaily. It can be turned back off once the PLL is locked.
+ */
+ clk_enable(usb0->fck);
+
+ /* Turn on the USB 2.0 PHY, but just the PLL, and not OTG. The USB 1.1
+ * PHY may use the USB 2.0 PLL clock without USB 2.0 OTG being used.
+ */
+ mask = CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN | CFGCHIP2_PHY_PLLON;
+ val = CFGCHIP2_PHY_PLLON;
+
+ regmap_write_bits(usb0->regmap, CFGCHIP(2), mask, val);
+ ret = regmap_read_poll_timeout(usb0->regmap, CFGCHIP(2), val,
+ val & CFGCHIP2_PHYCLKGD, 0, 500000);
+
+ clk_disable(usb0->fck);
+
+ return ret;
+}
+
+static void da8xx_usb0_clk48_disable(struct clk_hw *hw)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+ unsigned int val;
+
+ val = CFGCHIP2_PHYPWRDN;
+ regmap_write_bits(usb0->regmap, CFGCHIP(2), val, val);
+}
+
+static int da8xx_usb0_clk48_is_enabled(struct clk_hw *hw)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+ unsigned int val;
+
+ regmap_read(usb0->regmap, CFGCHIP(2), &val);
+
+ return !!(val & CFGCHIP2_PHYCLKGD);
+}
+
+static unsigned long da8xx_usb0_clk48_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+ unsigned int mask, val;
+
+ /* The parent clock rate must be one of the following */
+ mask = CFGCHIP2_REFFREQ_MASK;
+ switch (parent_rate) {
+ case 12000000:
+ val = CFGCHIP2_REFFREQ_12MHZ;
+ break;
+ case 13000000:
+ val = CFGCHIP2_REFFREQ_13MHZ;
+ break;
+ case 19200000:
+ val = CFGCHIP2_REFFREQ_19_2MHZ;
+ break;
+ case 20000000:
+ val = CFGCHIP2_REFFREQ_20MHZ;
+ break;
+ case 24000000:
+ val = CFGCHIP2_REFFREQ_24MHZ;
+ break;
+ case 26000000:
+ val = CFGCHIP2_REFFREQ_26MHZ;
+ break;
+ case 38400000:
+ val = CFGCHIP2_REFFREQ_38_4MHZ;
+ break;
+ case 40000000:
+ val = CFGCHIP2_REFFREQ_40MHZ;
+ break;
+ case 48000000:
+ val = CFGCHIP2_REFFREQ_48MHZ;
+ break;
+ default:
+ return 0;
+ }
+
+ regmap_write_bits(usb0->regmap, CFGCHIP(2), mask, val);
+
+ /* USB 2.0 PLL always supplies 48MHz */
+ return 48000000;
+}
+
+static long da8xx_usb0_clk48_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return 48000000;
+}
+
+static int da8xx_usb0_clk48_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+
+ return regmap_write_bits(usb0->regmap, CFGCHIP(2),
+ CFGCHIP2_USB2PHYCLKMUX,
+ index ? CFGCHIP2_USB2PHYCLKMUX : 0);
+}
+
+static u8 da8xx_usb0_clk48_get_parent(struct clk_hw *hw)
+{
+ struct da8xx_usb0_clk48 *usb0 = to_da8xx_usb0_clk48(hw);
+ unsigned int val;
+
+ regmap_read(usb0->regmap, CFGCHIP(2), &val);
+
+ return (val & CFGCHIP2_USB2PHYCLKMUX) ? 1 : 0;
+}
+
+static const struct clk_ops da8xx_usb0_clk48_ops = {
+ .prepare = da8xx_usb0_clk48_prepare,
+ .unprepare = da8xx_usb0_clk48_unprepare,
+ .enable = da8xx_usb0_clk48_enable,
+ .disable = da8xx_usb0_clk48_disable,
+ .is_enabled = da8xx_usb0_clk48_is_enabled,
+ .recalc_rate = da8xx_usb0_clk48_recalc_rate,
+ .round_rate = da8xx_usb0_clk48_round_rate,
+ .set_parent = da8xx_usb0_clk48_set_parent,
+ .get_parent = da8xx_usb0_clk48_get_parent,
+};
+
+static struct da8xx_usb0_clk48 *
+da8xx_cfgchip_register_usb0_clk48(struct device *dev,
+ struct regmap *regmap)
+{
+ const char * const parent_names[] = { "usb_refclkin", "pll0_auxclk" };
+ struct clk *fck_clk;
+ struct da8xx_usb0_clk48 *usb0;
+ struct clk_init_data init;
+ int ret;
+
+ fck_clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(fck_clk)) {
+ if (PTR_ERR(fck_clk) != -EPROBE_DEFER)
+ dev_err(dev, "Missing fck clock\n");
+ return ERR_CAST(fck_clk);
+ }
+
+ usb0 = devm_kzalloc(dev, sizeof(*usb0), GFP_KERNEL);
+ if (!usb0)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = "usb0_clk48";
+ init.ops = &da8xx_usb0_clk48_ops;
+ init.parent_names = parent_names;
+ init.num_parents = 2;
+
+ usb0->hw.init = &init;
+ usb0->fck = fck_clk;
+ usb0->regmap = regmap;
+
+ ret = devm_clk_hw_register(dev, &usb0->hw);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return usb0;
+}
+
+/* --- USB 1.1 PHY clock --- */
+
+struct da8xx_usb1_clk48 {
+ struct clk_hw hw;
+ struct regmap *regmap;
+};
+
+#define to_da8xx_usb1_clk48(_hw) \
+ container_of((_hw), struct da8xx_usb1_clk48, hw)
+
+static int da8xx_usb1_clk48_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct da8xx_usb1_clk48 *usb1 = to_da8xx_usb1_clk48(hw);
+
+ return regmap_write_bits(usb1->regmap, CFGCHIP(2),
+ CFGCHIP2_USB1PHYCLKMUX,
+ index ? CFGCHIP2_USB1PHYCLKMUX : 0);
+}
+
+static u8 da8xx_usb1_clk48_get_parent(struct clk_hw *hw)
+{
+ struct da8xx_usb1_clk48 *usb1 = to_da8xx_usb1_clk48(hw);
+ unsigned int val;
+
+ regmap_read(usb1->regmap, CFGCHIP(2), &val);
+
+ return (val & CFGCHIP2_USB1PHYCLKMUX) ? 1 : 0;
+}
+
+static const struct clk_ops da8xx_usb1_clk48_ops = {
+ .set_parent = da8xx_usb1_clk48_set_parent,
+ .get_parent = da8xx_usb1_clk48_get_parent,
+};
+
+/**
+ * da8xx_cfgchip_register_usb1_clk48 - Register a new USB 1.1 PHY clock
+ * @regmap: The CFGCHIP regmap
+ */
+static struct da8xx_usb1_clk48 *
+da8xx_cfgchip_register_usb1_clk48(struct device *dev,
+ struct regmap *regmap)
+{
+ const char * const parent_names[] = { "usb0_clk48", "usb_refclkin" };
+ struct da8xx_usb1_clk48 *usb1;
+ struct clk_init_data init;
+ int ret;
+
+ usb1 = devm_kzalloc(dev, sizeof(*usb1), GFP_KERNEL);
+ if (!usb1)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = "usb1_clk48";
+ init.ops = &da8xx_usb1_clk48_ops;
+ init.parent_names = parent_names;
+ init.num_parents = 2;
+
+ usb1->hw.init = &init;
+ usb1->regmap = regmap;
+
+ ret = devm_clk_hw_register(dev, &usb1->hw);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return usb1;
+}
+
+static int da8xx_cfgchip_register_usb_phy_clk(struct device *dev,
+ struct regmap *regmap)
+{
+ struct da8xx_usb0_clk48 *usb0;
+ struct da8xx_usb1_clk48 *usb1;
+ struct clk_hw *parent;
+
+ usb0 = da8xx_cfgchip_register_usb0_clk48(dev, regmap);
+ if (IS_ERR(usb0))
+ return PTR_ERR(usb0);
+
+ /*
+ * All existing boards use pll0_auxclk as the parent and new boards
+ * should use device tree, so hard-coding the value (1) here.
+ */
+ parent = clk_hw_get_parent_by_index(&usb0->hw, 1);
+ if (parent)
+ clk_set_parent(usb0->hw.clk, parent->clk);
+ else
+ dev_warn(dev, "Failed to find usb0 parent clock\n");
+
+ usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
+ if (IS_ERR(usb1))
+ return PTR_ERR(usb1);
+
+ /*
+ * All existing boards use usb0_clk48 as the parent and new boards
+ * should use device tree, so hard-coding the value (0) here.
+ */
+ parent = clk_hw_get_parent_by_index(&usb1->hw, 0);
+ if (parent)
+ clk_set_parent(usb1->hw.clk, parent->clk);
+ else
+ dev_warn(dev, "Failed to find usb1 parent clock\n");
+
+ clk_hw_register_clkdev(&usb0->hw, "usb0_clk48", "da8xx-usb-phy");
+ clk_hw_register_clkdev(&usb1->hw, "usb1_clk48", "da8xx-usb-phy");
+
+ return 0;
+}
+
+static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct da8xx_usb0_clk48 *usb0;
+ struct da8xx_usb1_clk48 *usb1;
+
+ clk_data = devm_kzalloc(dev, sizeof(*clk_data) + 2 *
+ sizeof(*clk_data->hws), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = 2;
+
+ usb0 = da8xx_cfgchip_register_usb0_clk48(dev, regmap);
+ if (IS_ERR(usb0)) {
+ if (PTR_ERR(usb0) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(dev, "Failed to register usb0_clk48 (%ld)\n",
+ PTR_ERR(usb0));
+
+ clk_data->hws[0] = ERR_PTR(-ENOENT);
+ } else {
+ clk_data->hws[0] = &usb0->hw;
+ }
+
+ usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
+ if (IS_ERR(usb1)) {
+ if (PTR_ERR(usb0) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n",
+ PTR_ERR(usb1));
+
+ clk_data->hws[1] = ERR_PTR(-ENOENT);
+ } else {
+ clk_data->hws[1] = &usb1->hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+}
+
+/* --- platform device --- */
+
+static const struct of_device_id da8xx_cfgchip_of_match[] = {
+ {
+ .compatible = "ti,da830-tbclksync",
+ .data = of_da8xx_tbclksync_init,
+ },
+ {
+ .compatible = "ti,da830-div4p5ena",
+ .data = of_da8xx_div4p5ena_init,
+ },
+ {
+ .compatible = "ti,da850-async1-clksrc",
+ .data = of_da850_async1_init,
+ },
+ {
+ .compatible = "ti,da850-async3-clksrc",
+ .data = of_da850_async3_init,
+ },
+ {
+ .compatible = "ti,da830-usb-phy-clocks",
+ .data = of_da8xx_usb_phy_clk_init,
+ },
+ { }
+};
+
+static const struct platform_device_id da8xx_cfgchip_id_table[] = {
+ {
+ .name = "da830-tbclksync",
+ .driver_data = (kernel_ulong_t)da8xx_cfgchip_register_tbclk,
+ },
+ {
+ .name = "da830-div4p5ena",
+ .driver_data = (kernel_ulong_t)da8xx_cfgchip_register_div4p5,
+ },
+ {
+ .name = "da850-async1-clksrc",
+ .driver_data = (kernel_ulong_t)da8xx_cfgchip_register_async1,
+ },
+ {
+ .name = "da850-async3-clksrc",
+ .driver_data = (kernel_ulong_t)da850_cfgchip_register_async3,
+ },
+ {
+ .name = "da830-usb-phy-clks",
+ .driver_data = (kernel_ulong_t)da8xx_cfgchip_register_usb_phy_clk,
+ },
+ { }
+};
+
+typedef int (*da8xx_cfgchip_init)(struct device *dev, struct regmap *regmap);
+
+static int da8xx_cfgchip_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct da8xx_cfgchip_clk_platform_data *pdata = dev->platform_data;
+ const struct of_device_id *of_id;
+ da8xx_cfgchip_init clk_init = NULL;
+ struct regmap *regmap = NULL;
+
+ of_id = of_match_device(da8xx_cfgchip_of_match, dev);
+ if (of_id) {
+ struct device_node *parent;
+
+ clk_init = of_id->data;
+ parent = of_get_parent(dev->of_node);
+ regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ } else if (pdev->id_entry && pdata) {
+ clk_init = (void *)pdev->id_entry->driver_data;
+ regmap = pdata->cfgchip;
+ }
+
+ if (!clk_init) {
+ dev_err(dev, "unable to find driver data\n");
+ return -EINVAL;
+ }
+
+ if (IS_ERR_OR_NULL(regmap)) {
+ dev_err(dev, "no regmap for CFGCHIP syscon\n");
+ return regmap ? PTR_ERR(regmap) : -ENOENT;
+ }
+
+ return clk_init(dev, regmap);
+}
+
+static struct platform_driver da8xx_cfgchip_driver = {
+ .probe = da8xx_cfgchip_probe,
+ .driver = {
+ .name = "da8xx-cfgchip-clk",
+ .of_match_table = da8xx_cfgchip_of_match,
+ },
+ .id_table = da8xx_cfgchip_id_table,
+};
+
+static int __init da8xx_cfgchip_driver_init(void)
+{
+ return platform_driver_register(&da8xx_cfgchip_driver);
+}
+
+/* has to be postcore_initcall because PSC devices depend on the async3 clock */
+postcore_initcall(da8xx_cfgchip_driver_init);
diff --git a/drivers/clk/davinci/pll-da830.c b/drivers/clk/davinci/pll-da830.c
new file mode 100644
index 000000000000..929a3d3a9adb
--- /dev/null
+++ b/drivers/clk/davinci/pll-da830.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock descriptions for TI DA830/OMAP-L137/AM17XX
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clkdev.h>
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+static const struct davinci_pll_clk_info da830_pll_info = {
+ .name = "pll0",
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 4,
+ .pllm_max = 32,
+ .pllout_min_rate = 300000000,
+ .pllout_max_rate = 600000000,
+ .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_HAS_POSTDIV,
+};
+
+/*
+ * NB: Technically, the clocks flagged as SYSCLK_FIXED_DIV are "fixed ratio",
+ * meaning that we could change the divider as long as we keep the correct
+ * ratio between all of the clocks, but we don't support that because there is
+ * currently not a need for it.
+ */
+
+SYSCLK(2, pll0_sysclk2, pll0_pllen, 5, SYSCLK_FIXED_DIV);
+SYSCLK(3, pll0_sysclk3, pll0_pllen, 5, 0);
+SYSCLK(4, pll0_sysclk4, pll0_pllen, 5, SYSCLK_FIXED_DIV);
+SYSCLK(5, pll0_sysclk5, pll0_pllen, 5, 0);
+SYSCLK(6, pll0_sysclk6, pll0_pllen, 5, SYSCLK_FIXED_DIV);
+SYSCLK(7, pll0_sysclk7, pll0_pllen, 5, 0);
+
+int da830_pll_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &da830_pll_info, "ref_clk", base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk2, base);
+ clk_register_clkdev(clk, "pll0_sysclk2", "da830-psc0");
+ clk_register_clkdev(clk, "pll0_sysclk2", "da830-psc1");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk3, base);
+ clk_register_clkdev(clk, "pll0_sysclk3", "da830-psc0");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk4, base);
+ clk_register_clkdev(clk, "pll0_sysclk4", "da830-psc0");
+ clk_register_clkdev(clk, "pll0_sysclk4", "da830-psc1");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk5, base);
+ clk_register_clkdev(clk, "pll0_sysclk5", "da830-psc1");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk6, base);
+ clk_register_clkdev(clk, "pll0_sysclk6", "da830-psc0");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk7, base);
+
+ clk = davinci_pll_auxclk_register(dev, "pll0_auxclk", base);
+ clk_register_clkdev(clk, NULL, "i2c_davinci.1");
+ clk_register_clkdev(clk, "timer0", NULL);
+ clk_register_clkdev(clk, NULL, "davinci-wdt");
+
+ return 0;
+}
diff --git a/drivers/clk/davinci/pll-da850.c b/drivers/clk/davinci/pll-da850.c
new file mode 100644
index 000000000000..2a038b7908cc
--- /dev/null
+++ b/drivers/clk/davinci/pll-da850.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock descriptions for TI DA850/OMAP-L138/AM18XX
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/da8xx-cfgchip.h>
+#include <linux/of.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+#define OCSEL_OCSRC_OSCIN 0x14
+#define OCSEL_OCSRC_PLL0_SYSCLK(n) (0x16 + (n))
+#define OCSEL_OCSRC_PLL1_OBSCLK 0x1e
+#define OCSEL_OCSRC_PLL1_SYSCLK(n) (0x16 + (n))
+
+static const struct davinci_pll_clk_info da850_pll0_info = {
+ .name = "pll0",
+ .unlock_reg = CFGCHIP(0),
+ .unlock_mask = CFGCHIP0_PLL_MASTER_LOCK,
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 4,
+ .pllm_max = 32,
+ .pllout_min_rate = 300000000,
+ .pllout_max_rate = 600000000,
+ .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_HAS_POSTDIV |
+ PLL_HAS_EXTCLKSRC,
+};
+
+/*
+ * NB: Technically, the clocks flagged as SYSCLK_FIXED_DIV are "fixed ratio",
+ * meaning that we could change the divider as long as we keep the correct
+ * ratio between all of the clocks, but we don't support that because there is
+ * currently not a need for it.
+ */
+
+SYSCLK(1, pll0_sysclk1, pll0_pllen, 5, SYSCLK_FIXED_DIV);
+SYSCLK(2, pll0_sysclk2, pll0_pllen, 5, SYSCLK_FIXED_DIV);
+SYSCLK(3, pll0_sysclk3, pll0_pllen, 5, 0);
+SYSCLK(4, pll0_sysclk4, pll0_pllen, 5, SYSCLK_FIXED_DIV);
+SYSCLK(5, pll0_sysclk5, pll0_pllen, 5, 0);
+SYSCLK(6, pll0_sysclk6, pll0_pllen, 5, SYSCLK_ARM_RATE | SYSCLK_FIXED_DIV);
+SYSCLK(7, pll0_sysclk7, pll0_pllen, 5, 0);
+
+static const char * const da850_pll0_obsclk_parent_names[] = {
+ "oscin",
+ "pll0_sysclk1",
+ "pll0_sysclk2",
+ "pll0_sysclk3",
+ "pll0_sysclk4",
+ "pll0_sysclk5",
+ "pll0_sysclk6",
+ "pll0_sysclk7",
+ "pll1_obsclk",
+};
+
+static u32 da850_pll0_obsclk_table[] = {
+ OCSEL_OCSRC_OSCIN,
+ OCSEL_OCSRC_PLL0_SYSCLK(1),
+ OCSEL_OCSRC_PLL0_SYSCLK(2),
+ OCSEL_OCSRC_PLL0_SYSCLK(3),
+ OCSEL_OCSRC_PLL0_SYSCLK(4),
+ OCSEL_OCSRC_PLL0_SYSCLK(5),
+ OCSEL_OCSRC_PLL0_SYSCLK(6),
+ OCSEL_OCSRC_PLL0_SYSCLK(7),
+ OCSEL_OCSRC_PLL1_OBSCLK,
+};
+
+static const struct davinci_pll_obsclk_info da850_pll0_obsclk_info = {
+ .name = "pll0_obsclk",
+ .parent_names = da850_pll0_obsclk_parent_names,
+ .num_parents = ARRAY_SIZE(da850_pll0_obsclk_parent_names),
+ .table = da850_pll0_obsclk_table,
+ .ocsrc_mask = GENMASK(4, 0),
+};
+
+int da850_pll0_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &da850_pll0_info, "ref_clk", base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk1, base);
+ clk_register_clkdev(clk, "pll0_sysclk1", "da850-psc0");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk2, base);
+ clk_register_clkdev(clk, "pll0_sysclk2", "da850-psc0");
+ clk_register_clkdev(clk, "pll0_sysclk2", "da850-psc1");
+ clk_register_clkdev(clk, "pll0_sysclk2", "da850-async3-clksrc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk3, base);
+ clk_register_clkdev(clk, "pll0_sysclk3", "da850-async1-clksrc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk4, base);
+ clk_register_clkdev(clk, "pll0_sysclk4", "da850-psc0");
+ clk_register_clkdev(clk, "pll0_sysclk4", "da850-psc1");
+
+ davinci_pll_sysclk_register(dev, &pll0_sysclk5, base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll0_sysclk6, base);
+ clk_register_clkdev(clk, "pll0_sysclk6", "da850-psc0");
+
+ davinci_pll_sysclk_register(dev, &pll0_sysclk7, base);
+
+ davinci_pll_auxclk_register(dev, "pll0_auxclk", base);
+
+ clk = clk_register_fixed_factor(dev, "async2", "pll0_auxclk",
+ CLK_IS_CRITICAL, 1, 1);
+
+ clk_register_clkdev(clk, NULL, "i2c_davinci.1");
+ clk_register_clkdev(clk, "timer0", NULL);
+ clk_register_clkdev(clk, NULL, "davinci-wdt");
+
+ davinci_pll_obsclk_register(dev, &da850_pll0_obsclk_info, base);
+
+ return 0;
+}
+
+static const struct davinci_pll_sysclk_info *da850_pll0_sysclk_info[] = {
+ &pll0_sysclk1,
+ &pll0_sysclk2,
+ &pll0_sysclk3,
+ &pll0_sysclk4,
+ &pll0_sysclk5,
+ &pll0_sysclk6,
+ &pll0_sysclk7,
+ NULL
+};
+
+int of_da850_pll0_init(struct device *dev, void __iomem *base)
+{
+ return of_davinci_pll_init(dev, &da850_pll0_info,
+ &da850_pll0_obsclk_info,
+ da850_pll0_sysclk_info, 7, base);
+}
+
+static const struct davinci_pll_clk_info da850_pll1_info = {
+ .name = "pll1",
+ .unlock_reg = CFGCHIP(3),
+ .unlock_mask = CFGCHIP3_PLL1_MASTER_LOCK,
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 4,
+ .pllm_max = 32,
+ .pllout_min_rate = 300000000,
+ .pllout_max_rate = 600000000,
+ .flags = PLL_HAS_POSTDIV,
+};
+
+SYSCLK(1, pll1_sysclk1, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(2, pll1_sysclk2, pll1_pllen, 5, 0);
+SYSCLK(3, pll1_sysclk3, pll1_pllen, 5, 0);
+
+static const char * const da850_pll1_obsclk_parent_names[] = {
+ "oscin",
+ "pll1_sysclk1",
+ "pll1_sysclk2",
+ "pll1_sysclk3",
+};
+
+static u32 da850_pll1_obsclk_table[] = {
+ OCSEL_OCSRC_OSCIN,
+ OCSEL_OCSRC_PLL1_SYSCLK(1),
+ OCSEL_OCSRC_PLL1_SYSCLK(2),
+ OCSEL_OCSRC_PLL1_SYSCLK(3),
+};
+
+static const struct davinci_pll_obsclk_info da850_pll1_obsclk_info = {
+ .name = "pll1_obsclk",
+ .parent_names = da850_pll1_obsclk_parent_names,
+ .num_parents = ARRAY_SIZE(da850_pll1_obsclk_parent_names),
+ .table = da850_pll1_obsclk_table,
+ .ocsrc_mask = GENMASK(4, 0),
+};
+
+int da850_pll1_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &da850_pll1_info, "oscin", base);
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
+ clk_register_clkdev(clk, "pll1_sysclk2", "da850-async3-clksrc");
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
+
+ davinci_pll_obsclk_register(dev, &da850_pll1_obsclk_info, base);
+
+ return 0;
+}
+
+static const struct davinci_pll_sysclk_info *da850_pll1_sysclk_info[] = {
+ &pll1_sysclk1,
+ &pll1_sysclk2,
+ &pll1_sysclk3,
+ NULL
+};
+
+int of_da850_pll1_init(struct device *dev, void __iomem *base)
+{
+ return of_davinci_pll_init(dev, &da850_pll1_info,
+ &da850_pll1_obsclk_info,
+ da850_pll1_sysclk_info, 3, base);
+}
diff --git a/drivers/clk/davinci/pll-dm355.c b/drivers/clk/davinci/pll-dm355.c
new file mode 100644
index 000000000000..5345f8286c50
--- /dev/null
+++ b/drivers/clk/davinci/pll-dm355.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock descriptions for TI DM355
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+static const struct davinci_pll_clk_info dm355_pll1_info = {
+ .name = "pll1",
+ .pllm_mask = GENMASK(7, 0),
+ .pllm_min = 92,
+ .pllm_max = 184,
+ .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_PREDIV_ALWAYS_ENABLED |
+ PLL_PREDIV_FIXED8 | PLL_HAS_POSTDIV |
+ PLL_POSTDIV_ALWAYS_ENABLED | PLL_POSTDIV_FIXED_DIV,
+};
+
+SYSCLK(1, pll1_sysclk1, pll1, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
+SYSCLK(2, pll1_sysclk2, pll1, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
+SYSCLK(3, pll1_sysclk3, pll1, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(4, pll1_sysclk4, pll1, 5, SYSCLK_ALWAYS_ENABLED);
+
+int dm355_pll1_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &dm355_pll1_info, "ref_clk", base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
+ clk_register_clkdev(clk, "pll1_sysclk1", "dm355-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
+ clk_register_clkdev(clk, "pll1_sysclk2", "dm355-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
+ clk_register_clkdev(clk, "pll1_sysclk3", "dm355-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk4, base);
+ clk_register_clkdev(clk, "pll1_sysclk4", "dm355-psc");
+
+ clk = davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
+ clk_register_clkdev(clk, "pll1_auxclk", "dm355-psc");
+
+ davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
+
+ return 0;
+}
+
+static const struct davinci_pll_clk_info dm355_pll2_info = {
+ .name = "pll2",
+ .pllm_mask = GENMASK(7, 0),
+ .pllm_min = 92,
+ .pllm_max = 184,
+ .flags = PLL_HAS_PREDIV | PLL_PREDIV_ALWAYS_ENABLED | PLL_HAS_POSTDIV |
+ PLL_POSTDIV_ALWAYS_ENABLED | PLL_POSTDIV_FIXED_DIV,
+};
+
+SYSCLK(1, pll2_sysclk1, pll2, 5, SYSCLK_FIXED_DIV);
+SYSCLK(2, pll2_sysclk2, pll2, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
+
+int dm355_pll2_init(struct device *dev, void __iomem *base)
+{
+ davinci_pll_clk_register(dev, &dm355_pll2_info, "oscin", base);
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk2, base);
+
+ davinci_pll_sysclkbp_clk_register(dev, "pll2_sysclkbp", base);
+
+ return 0;
+}
diff --git a/drivers/clk/davinci/pll-dm365.c b/drivers/clk/davinci/pll-dm365.c
new file mode 100644
index 000000000000..5f8d9f42d0f3
--- /dev/null
+++ b/drivers/clk/davinci/pll-dm365.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock descriptions for TI DM365
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+#define OCSEL_OCSRC_ENABLE 0
+
+static const struct davinci_pll_clk_info dm365_pll1_info = {
+ .name = "pll1",
+ .pllm_mask = GENMASK(9, 0),
+ .pllm_min = 1,
+ .pllm_max = 1023,
+ .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_HAS_POSTDIV |
+ PLL_POSTDIV_ALWAYS_ENABLED | PLL_PLLM_2X,
+};
+
+SYSCLK(1, pll1_sysclk1, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(2, pll1_sysclk2, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(3, pll1_sysclk3, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(4, pll1_sysclk4, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(5, pll1_sysclk5, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(6, pll1_sysclk6, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(7, pll1_sysclk7, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(8, pll1_sysclk8, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(9, pll1_sysclk9, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+
+/*
+ * This is a bit of a hack to make OCSEL[OCSRC] on DM365 look like OCSEL[OCSRC]
+ * on DA850. On DM365, OCSEL[OCSRC] is just an enable/disable bit instead of a
+ * multiplexer. By modeling it as a single parent mux clock, the clock code will
+ * still do the right thing in this case.
+ */
+static const char * const dm365_pll_obsclk_parent_names[] = {
+ "oscin",
+};
+
+static u32 dm365_pll_obsclk_table[] = {
+ OCSEL_OCSRC_ENABLE,
+};
+
+static const struct davinci_pll_obsclk_info dm365_pll1_obsclk_info = {
+ .name = "pll1_obsclk",
+ .parent_names = dm365_pll_obsclk_parent_names,
+ .num_parents = ARRAY_SIZE(dm365_pll_obsclk_parent_names),
+ .table = dm365_pll_obsclk_table,
+ .ocsrc_mask = BIT(4),
+};
+
+int dm365_pll1_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &dm365_pll1_info, "ref_clk", base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
+ clk_register_clkdev(clk, "pll1_sysclk1", "dm365-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
+ clk_register_clkdev(clk, "pll1_sysclk2", "dm365-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
+ clk_register_clkdev(clk, "pll1_sysclk3", "dm365-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk4, base);
+ clk_register_clkdev(clk, "pll1_sysclk4", "dm365-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk5, base);
+ clk_register_clkdev(clk, "pll1_sysclk5", "dm365-psc");
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk6, base);
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk7, base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk8, base);
+ clk_register_clkdev(clk, "pll1_sysclk8", "dm365-psc");
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk9, base);
+
+ clk = davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
+ clk_register_clkdev(clk, "pll1_auxclk", "dm355-psc");
+
+ davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
+
+ davinci_pll_obsclk_register(dev, &dm365_pll1_obsclk_info, base);
+
+ return 0;
+}
+
+static const struct davinci_pll_clk_info dm365_pll2_info = {
+ .name = "pll2",
+ .pllm_mask = GENMASK(9, 0),
+ .pllm_min = 1,
+ .pllm_max = 1023,
+ .flags = PLL_HAS_PREDIV | PLL_HAS_POSTDIV | PLL_POSTDIV_ALWAYS_ENABLED |
+ PLL_PLLM_2X,
+};
+
+SYSCLK(1, pll2_sysclk1, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(2, pll2_sysclk2, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(3, pll2_sysclk3, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(4, pll2_sysclk4, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+SYSCLK(5, pll2_sysclk5, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
+
+static const struct davinci_pll_obsclk_info dm365_pll2_obsclk_info = {
+ .name = "pll2_obsclk",
+ .parent_names = dm365_pll_obsclk_parent_names,
+ .num_parents = ARRAY_SIZE(dm365_pll_obsclk_parent_names),
+ .table = dm365_pll_obsclk_table,
+ .ocsrc_mask = BIT(4),
+};
+
+int dm365_pll2_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &dm365_pll2_info, "oscin", base);
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll2_sysclk2, base);
+ clk_register_clkdev(clk, "pll1_sysclk2", "dm365-psc");
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk3, base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll2_sysclk4, base);
+ clk_register_clkdev(clk, "pll1_sysclk4", "dm365-psc");
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk5, base);
+
+ davinci_pll_auxclk_register(dev, "pll2_auxclk", base);
+
+ davinci_pll_obsclk_register(dev, &dm365_pll2_obsclk_info, base);
+
+ return 0;
+}
diff --git a/drivers/clk/davinci/pll-dm644x.c b/drivers/clk/davinci/pll-dm644x.c
new file mode 100644
index 000000000000..69bf785377cf
--- /dev/null
+++ b/drivers/clk/davinci/pll-dm644x.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock descriptions for TI DM644X
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+static const struct davinci_pll_clk_info dm644x_pll1_info = {
+ .name = "pll1",
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 1,
+ .pllm_max = 32,
+ .pllout_min_rate = 400000000,
+ .pllout_max_rate = 600000000, /* 810MHz @ 1.3V, -810 only */
+ .flags = PLL_HAS_CLKMODE | PLL_HAS_POSTDIV,
+};
+
+SYSCLK(1, pll1_sysclk1, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+SYSCLK(2, pll1_sysclk2, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+SYSCLK(3, pll1_sysclk3, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+SYSCLK(5, pll1_sysclk5, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+
+int dm644x_pll1_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &dm644x_pll1_info, "ref_clk", base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
+ clk_register_clkdev(clk, "pll1_sysclk1", "dm644x-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
+ clk_register_clkdev(clk, "pll1_sysclk2", "dm644x-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
+ clk_register_clkdev(clk, "pll1_sysclk3", "dm644x-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk5, base);
+ clk_register_clkdev(clk, "pll1_sysclk5", "dm644x-psc");
+
+ clk = davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
+ clk_register_clkdev(clk, "pll1_auxclk", "dm644x-psc");
+
+ davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
+
+ return 0;
+}
+
+static const struct davinci_pll_clk_info dm644x_pll2_info = {
+ .name = "pll2",
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 1,
+ .pllm_max = 32,
+ .pllout_min_rate = 400000000,
+ .pllout_max_rate = 900000000,
+ .flags = PLL_HAS_POSTDIV | PLL_POSTDIV_FIXED_DIV,
+};
+
+SYSCLK(1, pll2_sysclk1, pll2_pllen, 4, 0);
+SYSCLK(2, pll2_sysclk2, pll2_pllen, 4, 0);
+
+int dm644x_pll2_init(struct device *dev, void __iomem *base)
+{
+ davinci_pll_clk_register(dev, &dm644x_pll2_info, "oscin", base);
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk2, base);
+
+ davinci_pll_sysclkbp_clk_register(dev, "pll2_sysclkbp", base);
+
+ return 0;
+}
diff --git a/drivers/clk/davinci/pll-dm646x.c b/drivers/clk/davinci/pll-dm646x.c
new file mode 100644
index 000000000000..a61cc3256418
--- /dev/null
+++ b/drivers/clk/davinci/pll-dm646x.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock descriptions for TI DM646X
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+static const struct davinci_pll_clk_info dm646x_pll1_info = {
+ .name = "pll1",
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 14,
+ .pllm_max = 32,
+ .flags = PLL_HAS_CLKMODE,
+};
+
+SYSCLK(1, pll1_sysclk1, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+SYSCLK(2, pll1_sysclk2, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+SYSCLK(3, pll1_sysclk3, pll1_pllen, 4, SYSCLK_FIXED_DIV);
+SYSCLK(4, pll1_sysclk4, pll1_pllen, 4, 0);
+SYSCLK(5, pll1_sysclk5, pll1_pllen, 4, 0);
+SYSCLK(6, pll1_sysclk6, pll1_pllen, 4, 0);
+SYSCLK(8, pll1_sysclk8, pll1_pllen, 4, 0);
+SYSCLK(9, pll1_sysclk9, pll1_pllen, 4, 0);
+
+int dm646x_pll1_init(struct device *dev, void __iomem *base)
+{
+ struct clk *clk;
+
+ davinci_pll_clk_register(dev, &dm646x_pll1_info, "ref_clk", base);
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
+ clk_register_clkdev(clk, "pll1_sysclk1", "dm646x-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
+ clk_register_clkdev(clk, "pll1_sysclk2", "dm646x-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
+ clk_register_clkdev(clk, "pll1_sysclk3", "dm646x-psc");
+ clk_register_clkdev(clk, NULL, "davinci-wdt");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk4, base);
+ clk_register_clkdev(clk, "pll1_sysclk4", "dm646x-psc");
+
+ clk = davinci_pll_sysclk_register(dev, &pll1_sysclk5, base);
+ clk_register_clkdev(clk, "pll1_sysclk5", "dm646x-psc");
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk6, base);
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk8, base);
+
+ davinci_pll_sysclk_register(dev, &pll1_sysclk9, base);
+
+ davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
+
+ davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
+
+ return 0;
+}
+
+static const struct davinci_pll_clk_info dm646x_pll2_info = {
+ .name = "pll2",
+ .pllm_mask = GENMASK(4, 0),
+ .pllm_min = 14,
+ .pllm_max = 32,
+ .flags = 0,
+};
+
+SYSCLK(1, pll2_sysclk1, pll2_pllen, 4, 0);
+
+int dm646x_pll2_init(struct device *dev, void __iomem *base)
+{
+ davinci_pll_clk_register(dev, &dm646x_pll2_info, "oscin", base);
+
+ davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
+
+ return 0;
+}
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
new file mode 100644
index 000000000000..23a24c944f1d
--- /dev/null
+++ b/drivers/clk/davinci/pll.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock driver for TI Davinci SoCs
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ *
+ * Based on arch/arm/mach-davinci/clock.c
+ * Copyright (C) 2006-2007 Texas Instruments.
+ * Copyright (C) 2008-2009 Deep Root Systems, LLC
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/notifier.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_data/clk-davinci-pll.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "pll.h"
+
+#define MAX_NAME_SIZE 20
+#define OSCIN_CLK_NAME "oscin"
+
+#define REVID 0x000
+#define PLLCTL 0x100
+#define OCSEL 0x104
+#define PLLSECCTL 0x108
+#define PLLM 0x110
+#define PREDIV 0x114
+#define PLLDIV1 0x118
+#define PLLDIV2 0x11c
+#define PLLDIV3 0x120
+#define OSCDIV 0x124
+#define POSTDIV 0x128
+#define BPDIV 0x12c
+#define PLLCMD 0x138
+#define PLLSTAT 0x13c
+#define ALNCTL 0x140
+#define DCHANGE 0x144
+#define CKEN 0x148
+#define CKSTAT 0x14c
+#define SYSTAT 0x150
+#define PLLDIV4 0x160
+#define PLLDIV5 0x164
+#define PLLDIV6 0x168
+#define PLLDIV7 0x16c
+#define PLLDIV8 0x170
+#define PLLDIV9 0x174
+
+#define PLLCTL_PLLEN BIT(0)
+#define PLLCTL_PLLPWRDN BIT(1)
+#define PLLCTL_PLLRST BIT(3)
+#define PLLCTL_PLLDIS BIT(4)
+#define PLLCTL_PLLENSRC BIT(5)
+#define PLLCTL_CLKMODE BIT(8)
+
+/* shared by most *DIV registers */
+#define DIV_RATIO_SHIFT 0
+#define DIV_RATIO_WIDTH 5
+#define DIV_ENABLE_SHIFT 15
+
+#define PLLCMD_GOSET BIT(0)
+#define PLLSTAT_GOSTAT BIT(0)
+
+#define CKEN_OBSCLK_SHIFT 1
+#define CKEN_AUXEN_SHIFT 0
+
+/*
+ * OMAP-L138 system reference guide recommends a wait for 4 OSCIN/CLKIN
+ * cycles to ensure that the PLLC has switched to bypass mode. Delay of 1us
+ * ensures we are good for all > 4MHz OSCIN/CLKIN inputs. Typically the input
+ * is ~25MHz. Units are micro seconds.
+ */
+#define PLL_BYPASS_TIME 1
+
+/* From OMAP-L138 datasheet table 6-4. Units are micro seconds */
+#define PLL_RESET_TIME 1
+
+/*
+ * From OMAP-L138 datasheet table 6-4; assuming prediv = 1, sqrt(pllm) = 4
+ * Units are micro seconds.
+ */
+#define PLL_LOCK_TIME 20
+
+/**
+ * struct davinci_pll_clk - Main PLL clock (aka PLLOUT)
+ * @hw: clk_hw for the pll
+ * @base: Base memory address
+ * @pllm_min: The minimum allowable PLLM[PLLM] value
+ * @pllm_max: The maxiumum allowable PLLM[PLLM] value
+ * @pllm_mask: Bitmask for PLLM[PLLM] value
+ */
+struct davinci_pll_clk {
+ struct clk_hw hw;
+ void __iomem *base;
+ u32 pllm_min;
+ u32 pllm_max;
+ u32 pllm_mask;
+};
+
+#define to_davinci_pll_clk(_hw) \
+ container_of((_hw), struct davinci_pll_clk, hw)
+
+static unsigned long davinci_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct davinci_pll_clk *pll = to_davinci_pll_clk(hw);
+ unsigned long rate = parent_rate;
+ u32 mult;
+
+ mult = readl(pll->base + PLLM) & pll->pllm_mask;
+ rate *= mult + 1;
+
+ return rate;
+}
+
+static int davinci_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct davinci_pll_clk *pll = to_davinci_pll_clk(hw);
+ struct clk_hw *parent = req->best_parent_hw;
+ unsigned long parent_rate = req->best_parent_rate;
+ unsigned long rate = req->rate;
+ unsigned long best_rate, r;
+ u32 mult;
+
+ /* there is a limited range of valid outputs (see datasheet) */
+ if (rate < req->min_rate)
+ return -EINVAL;
+
+ rate = min(rate, req->max_rate);
+ mult = rate / parent_rate;
+ best_rate = parent_rate * mult;
+
+ /* easy case when there is no PREDIV */
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ if (best_rate < req->min_rate)
+ return -EINVAL;
+
+ if (mult < pll->pllm_min || mult > pll->pllm_max)
+ return -EINVAL;
+
+ req->rate = best_rate;
+
+ return 0;
+ }
+
+ /* see if the PREDIV clock can help us */
+ best_rate = 0;
+
+ for (mult = pll->pllm_min; mult <= pll->pllm_max; mult++) {
+ parent_rate = clk_hw_round_rate(parent, rate / mult);
+ r = parent_rate * mult;
+ if (r < req->min_rate)
+ continue;
+ if (r > rate || r > req->max_rate)
+ break;
+ if (r > best_rate) {
+ best_rate = r;
+ req->rate = best_rate;
+ req->best_parent_rate = parent_rate;
+ if (best_rate == rate)
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int davinci_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct davinci_pll_clk *pll = to_davinci_pll_clk(hw);
+ u32 mult;
+
+ mult = rate / parent_rate;
+ writel(mult - 1, pll->base + PLLM);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int davinci_pll_debug_init(struct clk_hw *hw, struct dentry *dentry);
+#else
+#define davinci_pll_debug_init NULL
+#endif
+
+static const struct clk_ops davinci_pll_ops = {
+ .recalc_rate = davinci_pll_recalc_rate,
+ .determine_rate = davinci_pll_determine_rate,
+ .set_rate = davinci_pll_set_rate,
+ .debug_init = davinci_pll_debug_init,
+};
+
+/* PLLM works differently on DM365 */
+static unsigned long dm365_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct davinci_pll_clk *pll = to_davinci_pll_clk(hw);
+ unsigned long rate = parent_rate;
+ u32 mult;
+
+ mult = readl(pll->base + PLLM) & pll->pllm_mask;
+ rate *= mult * 2;
+
+ return rate;
+}
+
+static const struct clk_ops dm365_pll_ops = {
+ .recalc_rate = dm365_pll_recalc_rate,
+ .debug_init = davinci_pll_debug_init,
+};
+
+/**
+ * davinci_pll_div_register - common *DIV clock implementation
+ * @name: the clock name
+ * @parent_name: the parent clock name
+ * @reg: the *DIV register
+ * @fixed: if true, the divider is a fixed value
+ * @flags: bitmap of CLK_* flags from clock-provider.h
+ */
+static struct clk *davinci_pll_div_register(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ void __iomem *reg,
+ bool fixed, u32 flags)
+{
+ const char * const *parent_names = parent_name ? &parent_name : NULL;
+ int num_parents = parent_name ? 1 : 0;
+ const struct clk_ops *divider_ops = &clk_divider_ops;
+ struct clk_gate *gate;
+ struct clk_divider *divider;
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->reg = reg;
+ gate->bit_idx = DIV_ENABLE_SHIFT;
+
+ divider = devm_kzalloc(dev, sizeof(*divider), GFP_KERNEL);
+ if (!divider)
+ return ERR_PTR(-ENOMEM);
+
+ divider->reg = reg;
+ divider->shift = DIV_RATIO_SHIFT;
+ divider->width = DIV_RATIO_WIDTH;
+
+ if (fixed) {
+ divider->flags |= CLK_DIVIDER_READ_ONLY;
+ divider_ops = &clk_divider_ro_ops;
+ }
+
+ return clk_register_composite(dev, name, parent_names, num_parents,
+ NULL, NULL, &divider->hw, divider_ops,
+ &gate->hw, &clk_gate_ops, flags);
+}
+
+struct davinci_pllen_clk {
+ struct clk_hw hw;
+ void __iomem *base;
+};
+
+#define to_davinci_pllen_clk(_hw) \
+ container_of((_hw), struct davinci_pllen_clk, hw)
+
+static const struct clk_ops davinci_pllen_ops = {
+ /* this clocks just uses the clock notification feature */
+};
+
+/*
+ * The PLL has to be switched into bypass mode while we are chaning the rate,
+ * so we do that on the PLLEN clock since it is the end of the line. This will
+ * switch to bypass before any of the parent clocks (PREDIV, PLL, POSTDIV) are
+ * changed and will switch back to the PLL after the changes have been made.
+ */
+static int davinci_pllen_rate_change(struct notifier_block *nb,
+ unsigned long flags, void *data)
+{
+ struct clk_notifier_data *cnd = data;
+ struct clk_hw *hw = __clk_get_hw(cnd->clk);
+ struct davinci_pllen_clk *pll = to_davinci_pllen_clk(hw);
+ u32 ctrl;
+
+ ctrl = readl(pll->base + PLLCTL);
+
+ if (flags == PRE_RATE_CHANGE) {
+ /* Switch the PLL to bypass mode */
+ ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
+ writel(ctrl, pll->base + PLLCTL);
+
+ udelay(PLL_BYPASS_TIME);
+
+ /* Reset and enable PLL */
+ ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
+ writel(ctrl, pll->base + PLLCTL);
+ } else {
+ udelay(PLL_RESET_TIME);
+
+ /* Bring PLL out of reset */
+ ctrl |= PLLCTL_PLLRST;
+ writel(ctrl, pll->base + PLLCTL);
+
+ udelay(PLL_LOCK_TIME);
+
+ /* Remove PLL from bypass mode */
+ ctrl |= PLLCTL_PLLEN;
+ writel(ctrl, pll->base + PLLCTL);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct davinci_pll_platform_data *davinci_pll_get_pdata(struct device *dev)
+{
+ struct davinci_pll_platform_data *pdata = dev_get_platdata(dev);
+
+ /*
+ * Platform data is optional, so allocate a new struct if one was not
+ * provided. For device tree, this will always be the case.
+ */
+ if (!pdata)
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ /* for device tree, we need to fill in the struct */
+ if (dev->of_node)
+ pdata->cfgchip =
+ syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
+
+ return pdata;
+}
+
+static struct notifier_block davinci_pllen_notifier = {
+ .notifier_call = davinci_pllen_rate_change,
+};
+
+/**
+ * davinci_pll_clk_register - Register a PLL clock
+ * @info: The device-specific clock info
+ * @parent_name: The parent clock name
+ * @base: The PLL's memory region
+ *
+ * This creates a series of clocks that represent the PLL.
+ *
+ * OSCIN > [PREDIV >] PLL > [POSTDIV >] PLLEN
+ *
+ * - OSCIN is the parent clock (on secondary PLL, may come from primary PLL)
+ * - PREDIV and POSTDIV are optional (depends on the PLL controller)
+ * - PLL is the PLL output (aka PLLOUT)
+ * - PLLEN is the bypass multiplexer
+ *
+ * Returns: The PLLOUT clock or a negative error code.
+ */
+struct clk *davinci_pll_clk_register(struct device *dev,
+ const struct davinci_pll_clk_info *info,
+ const char *parent_name,
+ void __iomem *base)
+{
+ struct davinci_pll_platform_data *pdata;
+ char prediv_name[MAX_NAME_SIZE];
+ char pllout_name[MAX_NAME_SIZE];
+ char postdiv_name[MAX_NAME_SIZE];
+ char pllen_name[MAX_NAME_SIZE];
+ struct clk_init_data init;
+ struct davinci_pll_clk *pllout;
+ struct davinci_pllen_clk *pllen;
+ struct clk *pllout_clk, *clk;
+
+ pdata = davinci_pll_get_pdata(dev);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ if (info->flags & PLL_HAS_CLKMODE) {
+ /*
+ * If a PLL has PLLCTL[CLKMODE], then it is the primary PLL.
+ * We register a clock named "oscin" that serves as the internal
+ * "input clock" domain shared by both PLLs (if there are 2)
+ * and will be the parent clock to the AUXCLK, SYSCLKBP and
+ * OBSCLK domains. NB: The various TRMs use "OSCIN" to mean
+ * a number of different things. In this driver we use it to
+ * mean the signal after the PLLCTL[CLKMODE] switch.
+ */
+ clk = clk_register_fixed_factor(dev, OSCIN_CLK_NAME,
+ parent_name, 0, 1, 1);
+ if (IS_ERR(clk))
+ return clk;
+
+ parent_name = OSCIN_CLK_NAME;
+ }
+
+ if (info->flags & PLL_HAS_PREDIV) {
+ bool fixed = info->flags & PLL_PREDIV_FIXED_DIV;
+ u32 flags = 0;
+
+ snprintf(prediv_name, MAX_NAME_SIZE, "%s_prediv", info->name);
+
+ if (info->flags & PLL_PREDIV_ALWAYS_ENABLED)
+ flags |= CLK_IS_CRITICAL;
+
+ /* Some? DM355 chips don't correctly report the PREDIV value */
+ if (info->flags & PLL_PREDIV_FIXED8)
+ clk = clk_register_fixed_factor(dev, prediv_name,
+ parent_name, flags, 1, 8);
+ else
+ clk = davinci_pll_div_register(dev, prediv_name,
+ parent_name, base + PREDIV, fixed, flags);
+ if (IS_ERR(clk))
+ return clk;
+
+ parent_name = prediv_name;
+ }
+
+ /* Unlock writing to PLL registers */
+ if (info->unlock_reg) {
+ if (IS_ERR_OR_NULL(pdata->cfgchip))
+ dev_warn(dev, "Failed to get CFGCHIP (%ld)\n",
+ PTR_ERR(pdata->cfgchip));
+ else
+ regmap_write_bits(pdata->cfgchip, info->unlock_reg,
+ info->unlock_mask, 0);
+ }
+
+ pllout = devm_kzalloc(dev, sizeof(*pllout), GFP_KERNEL);
+ if (!pllout)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(pllout_name, MAX_NAME_SIZE, "%s_pllout", info->name);
+
+ init.name = pllout_name;
+ if (info->flags & PLL_PLLM_2X)
+ init.ops = &dm365_pll_ops;
+ else
+ init.ops = &davinci_pll_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ if (info->flags & PLL_HAS_PREDIV)
+ init.flags |= CLK_SET_RATE_PARENT;
+
+ pllout->hw.init = &init;
+ pllout->base = base;
+ pllout->pllm_mask = info->pllm_mask;
+ pllout->pllm_min = info->pllm_min;
+ pllout->pllm_max = info->pllm_max;
+
+ pllout_clk = devm_clk_register(dev, &pllout->hw);
+ if (IS_ERR(pllout_clk))
+ return pllout_clk;
+
+ clk_hw_set_rate_range(&pllout->hw, info->pllout_min_rate,
+ info->pllout_max_rate);
+
+ parent_name = pllout_name;
+
+ if (info->flags & PLL_HAS_POSTDIV) {
+ bool fixed = info->flags & PLL_POSTDIV_FIXED_DIV;
+ u32 flags = CLK_SET_RATE_PARENT;
+
+ snprintf(postdiv_name, MAX_NAME_SIZE, "%s_postdiv", info->name);
+
+ if (info->flags & PLL_POSTDIV_ALWAYS_ENABLED)
+ flags |= CLK_IS_CRITICAL;
+
+ clk = davinci_pll_div_register(dev, postdiv_name, parent_name,
+ base + POSTDIV, fixed, flags);
+ if (IS_ERR(clk))
+ return clk;
+
+ parent_name = postdiv_name;
+ }
+
+ pllen = devm_kzalloc(dev, sizeof(*pllout), GFP_KERNEL);
+ if (!pllen)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(pllen_name, MAX_NAME_SIZE, "%s_pllen", info->name);
+
+ init.name = pllen_name;
+ init.ops = &davinci_pllen_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_PARENT;
+
+ pllen->hw.init = &init;
+ pllen->base = base;
+
+ clk = devm_clk_register(dev, &pllen->hw);
+ if (IS_ERR(clk))
+ return clk;
+
+ clk_notifier_register(clk, &davinci_pllen_notifier);
+
+ return pllout_clk;
+}
+
+/**
+ * davinci_pll_auxclk_register - Register bypass clock (AUXCLK)
+ * @name: The clock name
+ * @base: The PLL memory region
+ */
+struct clk *davinci_pll_auxclk_register(struct device *dev,
+ const char *name,
+ void __iomem *base)
+{
+ return clk_register_gate(dev, name, OSCIN_CLK_NAME, 0, base + CKEN,
+ CKEN_AUXEN_SHIFT, 0, NULL);
+}
+
+/**
+ * davinci_pll_sysclkbp_clk_register - Register bypass divider clock (SYSCLKBP)
+ * @name: The clock name
+ * @base: The PLL memory region
+ */
+struct clk *davinci_pll_sysclkbp_clk_register(struct device *dev,
+ const char *name,
+ void __iomem *base)
+{
+ return clk_register_divider(dev, name, OSCIN_CLK_NAME, 0, base + BPDIV,
+ DIV_RATIO_SHIFT, DIV_RATIO_WIDTH,
+ CLK_DIVIDER_READ_ONLY, NULL);
+}
+
+/**
+ * davinci_pll_obsclk_register - Register oscillator divider clock (OBSCLK)
+ * @info: The clock info
+ * @base: The PLL memory region
+ */
+struct clk *
+davinci_pll_obsclk_register(struct device *dev,
+ const struct davinci_pll_obsclk_info *info,
+ void __iomem *base)
+{
+ struct clk_mux *mux;
+ struct clk_gate *gate;
+ struct clk_divider *divider;
+ u32 oscdiv;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->reg = base + OCSEL;
+ mux->table = info->table;
+ mux->mask = info->ocsrc_mask;
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->reg = base + CKEN;
+ gate->bit_idx = CKEN_OBSCLK_SHIFT;
+
+ divider = devm_kzalloc(dev, sizeof(*divider), GFP_KERNEL);
+ if (!divider)
+ return ERR_PTR(-ENOMEM);
+
+ divider->reg = base + OSCDIV;
+ divider->shift = DIV_RATIO_SHIFT;
+ divider->width = DIV_RATIO_WIDTH;
+
+ /* make sure divider is enabled just in case bootloader disabled it */
+ oscdiv = readl(base + OSCDIV);
+ oscdiv |= BIT(DIV_ENABLE_SHIFT);
+ writel(oscdiv, base + OSCDIV);
+
+ return clk_register_composite(dev, info->name, info->parent_names,
+ info->num_parents,
+ &mux->hw, &clk_mux_ops,
+ &divider->hw, &clk_divider_ops,
+ &gate->hw, &clk_gate_ops, 0);
+}
+
+/* The PLL SYSCLKn clocks have a mechanism for synchronizing rate changes. */
+static int davinci_pll_sysclk_rate_change(struct notifier_block *nb,
+ unsigned long flags, void *data)
+{
+ struct clk_notifier_data *cnd = data;
+ struct clk_hw *hw = __clk_get_hw(clk_get_parent(cnd->clk));
+ struct davinci_pllen_clk *pll = to_davinci_pllen_clk(hw);
+ u32 pllcmd, pllstat;
+
+ switch (flags) {
+ case POST_RATE_CHANGE:
+ /* apply the changes */
+ pllcmd = readl(pll->base + PLLCMD);
+ pllcmd |= PLLCMD_GOSET;
+ writel(pllcmd, pll->base + PLLCMD);
+ /* fallthrough */
+ case PRE_RATE_CHANGE:
+ /* Wait until for outstanding changes to take effect */
+ do {
+ pllstat = readl(pll->base + PLLSTAT);
+ } while (pllstat & PLLSTAT_GOSTAT);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block davinci_pll_sysclk_notifier = {
+ .notifier_call = davinci_pll_sysclk_rate_change,
+};
+
+/**
+ * davinci_pll_sysclk_register - Register divider clocks (SYSCLKn)
+ * @info: The clock info
+ * @base: The PLL memory region
+ */
+struct clk *
+davinci_pll_sysclk_register(struct device *dev,
+ const struct davinci_pll_sysclk_info *info,
+ void __iomem *base)
+{
+ const struct clk_ops *divider_ops = &clk_divider_ops;
+ struct clk_gate *gate;
+ struct clk_divider *divider;
+ struct clk *clk;
+ u32 reg;
+ u32 flags = 0;
+
+ /* PLLDIVn registers are not entirely consecutive */
+ if (info->id < 4)
+ reg = PLLDIV1 + 4 * (info->id - 1);
+ else
+ reg = PLLDIV4 + 4 * (info->id - 4);
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->reg = base + reg;
+ gate->bit_idx = DIV_ENABLE_SHIFT;
+
+ divider = devm_kzalloc(dev, sizeof(*divider), GFP_KERNEL);
+ if (!divider)
+ return ERR_PTR(-ENOMEM);
+
+ divider->reg = base + reg;
+ divider->shift = DIV_RATIO_SHIFT;
+ divider->width = info->ratio_width;
+ divider->flags = 0;
+
+ if (info->flags & SYSCLK_FIXED_DIV) {
+ divider->flags |= CLK_DIVIDER_READ_ONLY;
+ divider_ops = &clk_divider_ro_ops;
+ }
+
+ /* Only the ARM clock can change the parent PLL rate */
+ if (info->flags & SYSCLK_ARM_RATE)
+ flags |= CLK_SET_RATE_PARENT;
+
+ if (info->flags & SYSCLK_ALWAYS_ENABLED)
+ flags |= CLK_IS_CRITICAL;
+
+ clk = clk_register_composite(dev, info->name, &info->parent_name, 1,
+ NULL, NULL, &divider->hw, divider_ops,
+ &gate->hw, &clk_gate_ops, flags);
+ if (IS_ERR(clk))
+ return clk;
+
+ clk_notifier_register(clk, &davinci_pll_sysclk_notifier);
+
+ return clk;
+}
+
+int of_davinci_pll_init(struct device *dev,
+ const struct davinci_pll_clk_info *info,
+ const struct davinci_pll_obsclk_info *obsclk_info,
+ const struct davinci_pll_sysclk_info **div_info,
+ u8 max_sysclk_id,
+ void __iomem *base)
+{
+ struct device_node *node = dev->of_node;
+ struct device_node *child;
+ const char *parent_name;
+ struct clk *clk;
+
+ if (info->flags & PLL_HAS_CLKMODE)
+ parent_name = of_clk_get_parent_name(node, 0);
+ else
+ parent_name = OSCIN_CLK_NAME;
+
+ clk = davinci_pll_clk_register(dev, info, parent_name, base);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to register %s\n", info->name);
+ return PTR_ERR(clk);
+ }
+
+ child = of_get_child_by_name(node, "pllout");
+ if (of_device_is_available(child))
+ of_clk_add_provider(child, of_clk_src_simple_get, clk);
+ of_node_put(child);
+
+ child = of_get_child_by_name(node, "sysclk");
+ if (of_device_is_available(child)) {
+ struct clk_onecell_data *clk_data;
+ struct clk **clks;
+ int n_clks = max_sysclk_id + 1;
+ int i;
+
+ clk_data = devm_kzalloc(dev, sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clks = devm_kmalloc_array(dev, n_clks, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ clk_data->clks = clks;
+ clk_data->clk_num = n_clks;
+
+ for (i = 0; i < n_clks; i++)
+ clks[i] = ERR_PTR(-ENOENT);
+
+ for (; *div_info; div_info++) {
+ clk = davinci_pll_sysclk_register(dev, *div_info, base);
+ if (IS_ERR(clk))
+ dev_warn(dev, "failed to register %s (%ld)\n",
+ (*div_info)->name, PTR_ERR(clk));
+ else
+ clks[(*div_info)->id] = clk;
+ }
+ of_clk_add_provider(child, of_clk_src_onecell_get, clk_data);
+ }
+ of_node_put(child);
+
+ child = of_get_child_by_name(node, "auxclk");
+ if (of_device_is_available(child)) {
+ char child_name[MAX_NAME_SIZE];
+
+ snprintf(child_name, MAX_NAME_SIZE, "%s_auxclk", info->name);
+
+ clk = davinci_pll_auxclk_register(dev, child_name, base);
+ if (IS_ERR(clk))
+ dev_warn(dev, "failed to register %s (%ld)\n",
+ child_name, PTR_ERR(clk));
+ else
+ of_clk_add_provider(child, of_clk_src_simple_get, clk);
+ }
+ of_node_put(child);
+
+ child = of_get_child_by_name(node, "obsclk");
+ if (of_device_is_available(child)) {
+ if (obsclk_info)
+ clk = davinci_pll_obsclk_register(dev, obsclk_info, base);
+ else
+ clk = ERR_PTR(-EINVAL);
+
+ if (IS_ERR(clk))
+ dev_warn(dev, "failed to register obsclk (%ld)\n",
+ PTR_ERR(clk));
+ else
+ of_clk_add_provider(child, of_clk_src_simple_get, clk);
+ }
+ of_node_put(child);
+
+ return 0;
+}
+
+static const struct of_device_id davinci_pll_of_match[] = {
+ { .compatible = "ti,da850-pll0", .data = of_da850_pll0_init },
+ { .compatible = "ti,da850-pll1", .data = of_da850_pll1_init },
+ { }
+};
+
+static const struct platform_device_id davinci_pll_id_table[] = {
+ { .name = "da830-pll", .driver_data = (kernel_ulong_t)da830_pll_init },
+ { .name = "da850-pll0", .driver_data = (kernel_ulong_t)da850_pll0_init },
+ { .name = "da850-pll1", .driver_data = (kernel_ulong_t)da850_pll1_init },
+ { .name = "dm355-pll1", .driver_data = (kernel_ulong_t)dm355_pll1_init },
+ { .name = "dm355-pll2", .driver_data = (kernel_ulong_t)dm355_pll2_init },
+ { .name = "dm365-pll1", .driver_data = (kernel_ulong_t)dm365_pll1_init },
+ { .name = "dm365-pll2", .driver_data = (kernel_ulong_t)dm365_pll2_init },
+ { .name = "dm644x-pll1", .driver_data = (kernel_ulong_t)dm644x_pll1_init },
+ { .name = "dm644x-pll2", .driver_data = (kernel_ulong_t)dm644x_pll2_init },
+ { .name = "dm646x-pll1", .driver_data = (kernel_ulong_t)dm646x_pll1_init },
+ { .name = "dm646x-pll2", .driver_data = (kernel_ulong_t)dm646x_pll2_init },
+ { }
+};
+
+typedef int (*davinci_pll_init)(struct device *dev, void __iomem *base);
+
+static int davinci_pll_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ davinci_pll_init pll_init = NULL;
+ struct resource *res;
+ void __iomem *base;
+
+ of_id = of_match_device(davinci_pll_of_match, dev);
+ if (of_id)
+ pll_init = of_id->data;
+ else if (pdev->id_entry)
+ pll_init = (void *)pdev->id_entry->driver_data;
+
+ if (!pll_init) {
+ dev_err(dev, "unable to find driver data\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ return pll_init(dev, base);
+}
+
+static struct platform_driver davinci_pll_driver = {
+ .probe = davinci_pll_probe,
+ .driver = {
+ .name = "davinci-pll-clk",
+ .of_match_table = davinci_pll_of_match,
+ },
+ .id_table = davinci_pll_id_table,
+};
+
+static int __init davinci_pll_driver_init(void)
+{
+ return platform_driver_register(&davinci_pll_driver);
+}
+
+/* has to be postcore_initcall because PSC devices depend on PLL parent clocks */
+postcore_initcall(davinci_pll_driver_init);
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+#define DEBUG_REG(n) \
+{ \
+ .name = #n, \
+ .offset = n, \
+}
+
+static const struct debugfs_reg32 davinci_pll_regs[] = {
+ DEBUG_REG(REVID),
+ DEBUG_REG(PLLCTL),
+ DEBUG_REG(OCSEL),
+ DEBUG_REG(PLLSECCTL),
+ DEBUG_REG(PLLM),
+ DEBUG_REG(PREDIV),
+ DEBUG_REG(PLLDIV1),
+ DEBUG_REG(PLLDIV2),
+ DEBUG_REG(PLLDIV3),
+ DEBUG_REG(OSCDIV),
+ DEBUG_REG(POSTDIV),
+ DEBUG_REG(BPDIV),
+ DEBUG_REG(PLLCMD),
+ DEBUG_REG(PLLSTAT),
+ DEBUG_REG(ALNCTL),
+ DEBUG_REG(DCHANGE),
+ DEBUG_REG(CKEN),
+ DEBUG_REG(CKSTAT),
+ DEBUG_REG(SYSTAT),
+ DEBUG_REG(PLLDIV4),
+ DEBUG_REG(PLLDIV5),
+ DEBUG_REG(PLLDIV6),
+ DEBUG_REG(PLLDIV7),
+ DEBUG_REG(PLLDIV8),
+ DEBUG_REG(PLLDIV9),
+};
+
+static int davinci_pll_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct davinci_pll_clk *pll = to_davinci_pll_clk(hw);
+ struct debugfs_regset32 *regset;
+ struct dentry *d;
+
+ regset = kzalloc(sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = davinci_pll_regs;
+ regset->nregs = ARRAY_SIZE(davinci_pll_regs);
+ regset->base = pll->base;
+
+ d = debugfs_create_regset32("registers", 0400, dentry, regset);
+ if (IS_ERR(d)) {
+ kfree(regset);
+ return PTR_ERR(d);
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
new file mode 100644
index 000000000000..b1b6fb23f972
--- /dev/null
+++ b/drivers/clk/davinci/pll.h
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock driver for TI Davinci PSC controllers
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __CLK_DAVINCI_PLL_H___
+#define __CLK_DAVINCI_PLL_H___
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/types.h>
+
+#define PLL_HAS_CLKMODE BIT(0) /* PLL has PLLCTL[CLKMODE] */
+#define PLL_HAS_PREDIV BIT(1) /* has prediv before PLL */
+#define PLL_PREDIV_ALWAYS_ENABLED BIT(2) /* don't clear DEN bit */
+#define PLL_PREDIV_FIXED_DIV BIT(3) /* fixed divider value */
+#define PLL_HAS_POSTDIV BIT(4) /* has postdiv after PLL */
+#define PLL_POSTDIV_ALWAYS_ENABLED BIT(5) /* don't clear DEN bit */
+#define PLL_POSTDIV_FIXED_DIV BIT(6) /* fixed divider value */
+#define PLL_HAS_EXTCLKSRC BIT(7) /* has selectable bypass */
+#define PLL_PLLM_2X BIT(8) /* PLLM value is 2x (DM365) */
+#define PLL_PREDIV_FIXED8 BIT(9) /* DM355 quirk */
+
+/** davinci_pll_clk_info - controller-specific PLL info
+ * @name: The name of the PLL
+ * @unlock_reg: Option CFGCHIP register for unlocking PLL
+ * @unlock_mask: Bitmask used with @unlock_reg
+ * @pllm_mask: Bitmask for PLLM[PLLM] value
+ * @pllm_min: Minimum allowable value for PLLM[PLLM]
+ * @pllm_max: Maximum allowable value for PLLM[PLLM]
+ * @pllout_min_rate: Minimum allowable rate for PLLOUT
+ * @pllout_max_rate: Maximum allowable rate for PLLOUT
+ * @flags: Bitmap of PLL_* flags.
+ */
+struct davinci_pll_clk_info {
+ const char *name;
+ u32 unlock_reg;
+ u32 unlock_mask;
+ u32 pllm_mask;
+ u32 pllm_min;
+ u32 pllm_max;
+ unsigned long pllout_min_rate;
+ unsigned long pllout_max_rate;
+ u32 flags;
+};
+
+#define SYSCLK_ARM_RATE BIT(0) /* Controls ARM rate */
+#define SYSCLK_ALWAYS_ENABLED BIT(1) /* Or bad things happen */
+#define SYSCLK_FIXED_DIV BIT(2) /* Fixed divider */
+
+/** davinci_pll_sysclk_info - SYSCLKn-specific info
+ * @name: The name of the clock
+ * @parent_name: The name of the parent clock
+ * @id: "n" in "SYSCLKn"
+ * @ratio_width: Width (in bits) of RATIO in PLLDIVn register
+ * @flags: Bitmap of SYSCLK_* flags.
+ */
+struct davinci_pll_sysclk_info {
+ const char *name;
+ const char *parent_name;
+ u32 id;
+ u32 ratio_width;
+ u32 flags;
+};
+
+#define SYSCLK(i, n, p, w, f) \
+static const struct davinci_pll_sysclk_info n = { \
+ .name = #n, \
+ .parent_name = #p, \
+ .id = (i), \
+ .ratio_width = (w), \
+ .flags = (f), \
+}
+
+/** davinci_pll_obsclk_info - OBSCLK-specific info
+ * @name: The name of the clock
+ * @parent_names: Array of names of the parent clocks
+ * @num_parents: Length of @parent_names
+ * @table: Array of values to write to OCSEL[OCSRC] cooresponding to
+ * @parent_names
+ * @ocsrc_mask: Bitmask for OCSEL[OCSRC]
+ */
+struct davinci_pll_obsclk_info {
+ const char *name;
+ const char * const *parent_names;
+ u8 num_parents;
+ u32 *table;
+ u32 ocsrc_mask;
+};
+
+struct clk *davinci_pll_clk_register(struct device *dev,
+ const struct davinci_pll_clk_info *info,
+ const char *parent_name,
+ void __iomem *base);
+struct clk *davinci_pll_auxclk_register(struct device *dev,
+ const char *name,
+ void __iomem *base);
+struct clk *davinci_pll_sysclkbp_clk_register(struct device *dev,
+ const char *name,
+ void __iomem *base);
+struct clk *
+davinci_pll_obsclk_register(struct device *dev,
+ const struct davinci_pll_obsclk_info *info,
+ void __iomem *base);
+struct clk *
+davinci_pll_sysclk_register(struct device *dev,
+ const struct davinci_pll_sysclk_info *info,
+ void __iomem *base);
+
+int of_davinci_pll_init(struct device *dev,
+ const struct davinci_pll_clk_info *info,
+ const struct davinci_pll_obsclk_info *obsclk_info,
+ const struct davinci_pll_sysclk_info **div_info,
+ u8 max_sysclk_id,
+ void __iomem *base);
+
+/* Platform-specific callbacks */
+
+int da830_pll_init(struct device *dev, void __iomem *base);
+
+int da850_pll0_init(struct device *dev, void __iomem *base);
+int da850_pll1_init(struct device *dev, void __iomem *base);
+int of_da850_pll0_init(struct device *dev, void __iomem *base);
+int of_da850_pll1_init(struct device *dev, void __iomem *base);
+
+int dm355_pll1_init(struct device *dev, void __iomem *base);
+int dm355_pll2_init(struct device *dev, void __iomem *base);
+
+int dm365_pll1_init(struct device *dev, void __iomem *base);
+int dm365_pll2_init(struct device *dev, void __iomem *base);
+
+int dm644x_pll1_init(struct device *dev, void __iomem *base);
+int dm644x_pll2_init(struct device *dev, void __iomem *base);
+
+int dm646x_pll1_init(struct device *dev, void __iomem *base);
+int dm646x_pll2_init(struct device *dev, void __iomem *base);
+
+#endif /* __CLK_DAVINCI_PLL_H___ */
diff --git a/drivers/clk/davinci/psc-da830.c b/drivers/clk/davinci/psc-da830.c
new file mode 100644
index 000000000000..f61abf5632ff
--- /dev/null
+++ b/drivers/clk/davinci/psc-da830.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PSC clock descriptions for TI DA830/OMAP-L137/AM17XX
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
+LPSC_CLKDEV1(mmcsd_clkdev, NULL, "da830-mmc.0");
+LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
+
+static const struct davinci_lpsc_clk_info da830_psc0_info[] = {
+ LPSC(0, 0, tpcc, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(1, 0, tptc0, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(2, 0, tptc1, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(3, 0, aemif, pll0_sysclk3, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(4, 0, spi0, pll0_sysclk2, spi0_clkdev, 0),
+ LPSC(5, 0, mmcsd, pll0_sysclk2, mmcsd_clkdev, 0),
+ LPSC(6, 0, aintc, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(7, 0, arm_rom, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(8, 0, secu_mgr, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(9, 0, uart0, pll0_sysclk2, uart0_clkdev, 0),
+ LPSC(10, 0, scr0_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(11, 0, scr1_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(12, 0, scr2_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(13, 0, pruss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(14, 0, arm, pll0_sysclk6, NULL, LPSC_ALWAYS_ENABLED),
+ { }
+};
+
+static int da830_psc0_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, da830_psc0_info, 16, base);
+}
+
+static struct clk_bulk_data da830_psc0_parent_clks[] = {
+ { .id = "pll0_sysclk2" },
+ { .id = "pll0_sysclk3" },
+ { .id = "pll0_sysclk4" },
+ { .id = "pll0_sysclk6" },
+};
+
+const struct davinci_psc_init_data da830_psc0_init_data = {
+ .parent_clks = da830_psc0_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(da830_psc0_parent_clks),
+ .psc_init = &da830_psc0_init,
+};
+
+LPSC_CLKDEV2(usb0_clkdev, NULL, "musb-da8xx",
+ NULL, "cppi41-dmaengine");
+LPSC_CLKDEV1(usb1_clkdev, NULL, "ohci-da8xx");
+/* REVISIT: gpio-davinci.c should be modified to drop con_id */
+LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
+LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
+ "fck", "davinci_mdio.0");
+LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
+LPSC_CLKDEV1(mcasp1_clkdev, NULL, "davinci-mcasp.1");
+LPSC_CLKDEV1(mcasp2_clkdev, NULL, "davinci-mcasp.2");
+LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
+LPSC_CLKDEV1(i2c1_clkdev, NULL, "i2c_davinci.2");
+LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
+LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
+LPSC_CLKDEV1(lcdc_clkdev, "fck", "da8xx_lcdc.0");
+LPSC_CLKDEV2(pwm_clkdev, "fck", "ehrpwm.0",
+ "fck", "ehrpwm.1");
+LPSC_CLKDEV3(ecap_clkdev, "fck", "ecap.0",
+ "fck", "ecap.1",
+ "fck", "ecap.2");
+LPSC_CLKDEV2(eqep_clkdev, NULL, "eqep.0",
+ NULL, "eqep.1");
+
+static const struct davinci_lpsc_clk_info da830_psc1_info[] = {
+ LPSC(1, 0, usb0, pll0_sysclk2, usb0_clkdev, 0),
+ LPSC(2, 0, usb1, pll0_sysclk4, usb1_clkdev, 0),
+ LPSC(3, 0, gpio, pll0_sysclk4, gpio_clkdev, 0),
+ LPSC(5, 0, emac, pll0_sysclk4, emac_clkdev, 0),
+ LPSC(6, 0, emif3, pll0_sysclk5, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(7, 0, mcasp0, pll0_sysclk2, mcasp0_clkdev, 0),
+ LPSC(8, 0, mcasp1, pll0_sysclk2, mcasp1_clkdev, 0),
+ LPSC(9, 0, mcasp2, pll0_sysclk2, mcasp2_clkdev, 0),
+ LPSC(10, 0, spi1, pll0_sysclk2, spi1_clkdev, 0),
+ LPSC(11, 0, i2c1, pll0_sysclk4, i2c1_clkdev, 0),
+ LPSC(12, 0, uart1, pll0_sysclk2, uart1_clkdev, 0),
+ LPSC(13, 0, uart2, pll0_sysclk2, uart2_clkdev, 0),
+ LPSC(16, 0, lcdc, pll0_sysclk2, lcdc_clkdev, 0),
+ LPSC(17, 0, pwm, pll0_sysclk2, pwm_clkdev, 0),
+ LPSC(20, 0, ecap, pll0_sysclk2, ecap_clkdev, 0),
+ LPSC(21, 0, eqep, pll0_sysclk2, eqep_clkdev, 0),
+ { }
+};
+
+static int da830_psc1_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, da830_psc1_info, 32, base);
+}
+
+static struct clk_bulk_data da830_psc1_parent_clks[] = {
+ { .id = "pll0_sysclk2" },
+ { .id = "pll0_sysclk4" },
+ { .id = "pll0_sysclk5" },
+};
+
+const struct davinci_psc_init_data da830_psc1_init_data = {
+ .parent_clks = da830_psc1_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(da830_psc1_parent_clks),
+ .psc_init = &da830_psc1_init,
+};
diff --git a/drivers/clk/davinci/psc-da850.c b/drivers/clk/davinci/psc-da850.c
new file mode 100644
index 000000000000..d196dcbed560
--- /dev/null
+++ b/drivers/clk/davinci/psc-da850.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PSC clock descriptions for TI DA850/OMAP-L138/AM18XX
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/reset-controller.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+LPSC_CLKDEV2(emifa_clkdev, NULL, "ti-aemif",
+ "aemif", "davinci_nand.0");
+LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
+LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "da830-mmc.0");
+LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
+/* REVISIT: used dev_id instead of con_id */
+LPSC_CLKDEV1(arm_clkdev, "arm", NULL);
+LPSC_CLKDEV1(dsp_clkdev, NULL, "davinci-rproc.0");
+
+static const struct davinci_lpsc_clk_info da850_psc0_info[] = {
+ LPSC(0, 0, tpcc0, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(1, 0, tptc0, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(2, 0, tptc1, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(3, 0, emifa, async1, emifa_clkdev, 0),
+ LPSC(4, 0, spi0, pll0_sysclk2, spi0_clkdev, 0),
+ LPSC(5, 0, mmcsd0, pll0_sysclk2, mmcsd0_clkdev, 0),
+ LPSC(6, 0, aintc, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(7, 0, arm_rom, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(9, 0, uart0, pll0_sysclk2, uart0_clkdev, 0),
+ LPSC(13, 0, pruss, pll0_sysclk2, NULL, 0),
+ LPSC(14, 0, arm, pll0_sysclk6, arm_clkdev, LPSC_ALWAYS_ENABLED | LPSC_SET_RATE_PARENT),
+ LPSC(15, 1, dsp, pll0_sysclk1, dsp_clkdev, LPSC_FORCE | LPSC_LOCAL_RESET),
+ { }
+};
+
+LPSC_CLKDEV3(usb0_clkdev, "fck", "da830-usb-phy-clks",
+ NULL, "musb-da8xx",
+ NULL, "cppi41-dmaengine");
+LPSC_CLKDEV1(usb1_clkdev, NULL, "ohci-da8xx");
+/* REVISIT: gpio-davinci.c should be modified to drop con_id */
+LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
+LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
+ "fck", "davinci_mdio.0");
+LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
+LPSC_CLKDEV1(sata_clkdev, "fck", "ahci_da850");
+LPSC_CLKDEV1(vpif_clkdev, NULL, "vpif");
+LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
+LPSC_CLKDEV1(i2c1_clkdev, NULL, "i2c_davinci.2");
+LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
+LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
+LPSC_CLKDEV1(mcbsp0_clkdev, NULL, "davinci-mcbsp.0");
+LPSC_CLKDEV1(mcbsp1_clkdev, NULL, "davinci-mcbsp.1");
+LPSC_CLKDEV1(lcdc_clkdev, "fck", "da8xx_lcdc.0");
+LPSC_CLKDEV3(ehrpwm_clkdev, "fck", "ehrpwm.0",
+ "fck", "ehrpwm.1",
+ NULL, "da830-tbclksync");
+LPSC_CLKDEV1(mmcsd1_clkdev, NULL, "da830-mmc.1");
+LPSC_CLKDEV3(ecap_clkdev, "fck", "ecap.0",
+ "fck", "ecap.1",
+ "fck", "ecap.2");
+
+static struct reset_control_lookup da850_psc0_reset_lookup_table[] = {
+ RESET_LOOKUP("da850-psc0", 15, "davinci-rproc.0", NULL),
+};
+
+static int da850_psc0_init(struct device *dev, void __iomem *base)
+{
+ reset_controller_add_lookup(da850_psc0_reset_lookup_table,
+ ARRAY_SIZE(da850_psc0_reset_lookup_table));
+ return davinci_psc_register_clocks(dev, da850_psc0_info, 16, base);
+}
+
+static int of_da850_psc0_init(struct device *dev, void __iomem *base)
+{
+ return of_davinci_psc_clk_init(dev, da850_psc0_info, 16, base);
+}
+
+static struct clk_bulk_data da850_psc0_parent_clks[] = {
+ { .id = "pll0_sysclk1" },
+ { .id = "pll0_sysclk2" },
+ { .id = "pll0_sysclk4" },
+ { .id = "pll0_sysclk6" },
+ { .id = "async1" },
+};
+
+const struct davinci_psc_init_data da850_psc0_init_data = {
+ .parent_clks = da850_psc0_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(da850_psc0_parent_clks),
+ .psc_init = &da850_psc0_init,
+};
+
+const struct davinci_psc_init_data of_da850_psc0_init_data = {
+ .parent_clks = da850_psc0_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(da850_psc0_parent_clks),
+ .psc_init = &of_da850_psc0_init,
+};
+
+static const struct davinci_lpsc_clk_info da850_psc1_info[] = {
+ LPSC(0, 0, tpcc1, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(1, 0, usb0, pll0_sysclk2, usb0_clkdev, 0),
+ LPSC(2, 0, usb1, pll0_sysclk4, usb1_clkdev, 0),
+ LPSC(3, 0, gpio, pll0_sysclk4, gpio_clkdev, 0),
+ LPSC(5, 0, emac, pll0_sysclk4, emac_clkdev, 0),
+ LPSC(6, 0, ddr, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(7, 0, mcasp0, async3, mcasp0_clkdev, 0),
+ LPSC(8, 0, sata, pll0_sysclk2, sata_clkdev, LPSC_FORCE),
+ LPSC(9, 0, vpif, pll0_sysclk2, vpif_clkdev, 0),
+ LPSC(10, 0, spi1, async3, spi1_clkdev, 0),
+ LPSC(11, 0, i2c1, pll0_sysclk4, i2c1_clkdev, 0),
+ LPSC(12, 0, uart1, async3, uart1_clkdev, 0),
+ LPSC(13, 0, uart2, async3, uart2_clkdev, 0),
+ LPSC(14, 0, mcbsp0, async3, mcbsp0_clkdev, 0),
+ LPSC(15, 0, mcbsp1, async3, mcbsp1_clkdev, 0),
+ LPSC(16, 0, lcdc, pll0_sysclk2, lcdc_clkdev, 0),
+ LPSC(17, 0, ehrpwm, async3, ehrpwm_clkdev, 0),
+ LPSC(18, 0, mmcsd1, pll0_sysclk2, mmcsd1_clkdev, 0),
+ LPSC(20, 0, ecap, async3, ecap_clkdev, 0),
+ LPSC(21, 0, tptc2, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ { }
+};
+
+static int da850_psc1_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, da850_psc1_info, 32, base);
+}
+
+static int of_da850_psc1_init(struct device *dev, void __iomem *base)
+{
+ return of_davinci_psc_clk_init(dev, da850_psc1_info, 32, base);
+}
+
+static struct clk_bulk_data da850_psc1_parent_clks[] = {
+ { .id = "pll0_sysclk2" },
+ { .id = "pll0_sysclk4" },
+ { .id = "async3" },
+};
+
+const struct davinci_psc_init_data da850_psc1_init_data = {
+ .parent_clks = da850_psc1_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(da850_psc1_parent_clks),
+ .psc_init = &da850_psc1_init,
+};
+
+const struct davinci_psc_init_data of_da850_psc1_init_data = {
+ .parent_clks = da850_psc1_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(da850_psc1_parent_clks),
+ .psc_init = &of_da850_psc1_init,
+};
diff --git a/drivers/clk/davinci/psc-dm355.c b/drivers/clk/davinci/psc-dm355.c
new file mode 100644
index 000000000000..6995ecea2677
--- /dev/null
+++ b/drivers/clk/davinci/psc-dm355.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PSC clock descriptions for TI DaVinci DM355
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+LPSC_CLKDEV1(vpss_master_clkdev, "master", "vpss");
+LPSC_CLKDEV1(vpss_slave_clkdev, "slave", "vpss");
+LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
+LPSC_CLKDEV1(mmcsd1_clkdev, NULL, "dm6441-mmc.1");
+LPSC_CLKDEV1(mcbsp1_clkdev, NULL, "davinci-mcbsp.1");
+LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
+LPSC_CLKDEV1(spi2_clkdev, NULL, "spi_davinci.2");
+LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "dm6441-mmc.0");
+LPSC_CLKDEV1(mcbsp0_clkdev, NULL, "davinci-mcbsp.0");
+LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
+LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
+LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
+LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
+LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
+/* REVISIT: gpio-davinci.c should be modified to drop con_id */
+LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
+LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
+LPSC_CLKDEV1(timer2_clkdev, NULL, "davinci-wdt");
+LPSC_CLKDEV1(vpss_dac_clkdev, "vpss_dac", NULL);
+
+static const struct davinci_lpsc_clk_info dm355_psc_info[] = {
+ LPSC(0, 0, vpss_master, pll1_sysclk4, vpss_master_clkdev, 0),
+ LPSC(1, 0, vpss_slave, pll1_sysclk4, vpss_slave_clkdev, 0),
+ LPSC(5, 0, timer3, pll1_auxclk, NULL, 0),
+ LPSC(6, 0, spi1, pll1_sysclk2, spi1_clkdev, 0),
+ LPSC(7, 0, mmcsd1, pll1_sysclk2, mmcsd1_clkdev, 0),
+ LPSC(8, 0, asp1, pll1_sysclk2, NULL, 0),
+ LPSC(9, 0, usb, pll1_sysclk2, usb_clkdev, 0),
+ LPSC(10, 0, pwm3, pll1_auxclk, NULL, 0),
+ LPSC(11, 0, spi2, pll1_sysclk2, spi2_clkdev, 0),
+ LPSC(12, 0, rto, pll1_auxclk, NULL, 0),
+ LPSC(14, 0, aemif, pll1_sysclk2, aemif_clkdev, 0),
+ LPSC(15, 0, mmcsd0, pll1_sysclk2, mmcsd0_clkdev, 0),
+ LPSC(17, 0, asp0, pll1_sysclk2, NULL, 0),
+ LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
+ LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
+ LPSC(20, 0, uart1, pll1_auxclk, uart1_clkdev, 0),
+ LPSC(21, 0, uart2, pll1_sysclk2, uart2_clkdev, 0),
+ LPSC(22, 0, spi0, pll1_sysclk2, spi0_clkdev, 0),
+ LPSC(23, 0, pwm0, pll1_auxclk, NULL, 0),
+ LPSC(24, 0, pwm1, pll1_auxclk, NULL, 0),
+ LPSC(25, 0, pwm2, pll1_auxclk, NULL, 0),
+ LPSC(26, 0, gpio, pll1_sysclk2, gpio_clkdev, 0),
+ LPSC(27, 0, timer0, pll1_auxclk, timer0_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(28, 0, timer1, pll1_auxclk, NULL, 0),
+ /* REVISIT: why can't this be disabled? */
+ LPSC(29, 0, timer2, pll1_auxclk, timer2_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(31, 0, arm, pll1_sysclk1, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(40, 0, mjcp, pll1_sysclk1, NULL, 0),
+ LPSC(41, 0, vpss_dac, pll1_sysclk3, vpss_dac_clkdev, 0),
+ { }
+};
+
+static int dm355_psc_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, dm355_psc_info, 42, base);
+}
+
+static struct clk_bulk_data dm355_psc_parent_clks[] = {
+ { .id = "pll1_sysclk1" },
+ { .id = "pll1_sysclk2" },
+ { .id = "pll1_sysclk3" },
+ { .id = "pll1_sysclk4" },
+ { .id = "pll1_auxclk" },
+};
+
+const struct davinci_psc_init_data dm355_psc_init_data = {
+ .parent_clks = dm355_psc_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(dm355_psc_parent_clks),
+ .psc_init = &dm355_psc_init,
+};
diff --git a/drivers/clk/davinci/psc-dm365.c b/drivers/clk/davinci/psc-dm365.c
new file mode 100644
index 000000000000..3ad915f37376
--- /dev/null
+++ b/drivers/clk/davinci/psc-dm365.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PSC clock descriptions for TI DaVinci DM365
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+LPSC_CLKDEV1(vpss_slave_clkdev, "slave", "vpss");
+LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
+LPSC_CLKDEV1(mmcsd1_clkdev, NULL, "da830-mmc.1");
+LPSC_CLKDEV1(asp0_clkdev, NULL, "davinci-mcbsp");
+LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
+LPSC_CLKDEV1(spi2_clkdev, NULL, "spi_davinci.2");
+LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "da830-mmc.0");
+LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
+LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
+LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
+LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
+/* REVISIT: gpio-davinci.c should be modified to drop con_id */
+LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
+LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
+LPSC_CLKDEV1(timer2_clkdev, NULL, "davinci-wdt");
+LPSC_CLKDEV1(spi3_clkdev, NULL, "spi_davinci.3");
+LPSC_CLKDEV1(spi4_clkdev, NULL, "spi_davinci.4");
+LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
+ "fck", "davinci_mdio.0");
+LPSC_CLKDEV1(voice_codec_clkdev, NULL, "davinci_voicecodec");
+LPSC_CLKDEV1(vpss_dac_clkdev, "vpss_dac", NULL);
+LPSC_CLKDEV1(vpss_master_clkdev, "master", "vpss");
+
+static const struct davinci_lpsc_clk_info dm365_psc_info[] = {
+ LPSC(1, 0, vpss_slave, pll1_sysclk5, vpss_slave_clkdev, 0),
+ LPSC(5, 0, timer3, pll1_auxclk, NULL, 0),
+ LPSC(6, 0, spi1, pll1_sysclk4, spi1_clkdev, 0),
+ LPSC(7, 0, mmcsd1, pll1_sysclk4, mmcsd1_clkdev, 0),
+ LPSC(8, 0, asp0, pll1_sysclk4, asp0_clkdev, 0),
+ LPSC(9, 0, usb, pll1_auxclk, usb_clkdev, 0),
+ LPSC(10, 0, pwm3, pll1_auxclk, NULL, 0),
+ LPSC(11, 0, spi2, pll1_sysclk4, spi2_clkdev, 0),
+ LPSC(12, 0, rto, pll1_sysclk4, NULL, 0),
+ LPSC(14, 0, aemif, pll1_sysclk4, aemif_clkdev, 0),
+ LPSC(15, 0, mmcsd0, pll1_sysclk8, mmcsd0_clkdev, 0),
+ LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
+ LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
+ LPSC(20, 0, uart1, pll1_sysclk4, uart1_clkdev, 0),
+ LPSC(22, 0, spi0, pll1_sysclk4, spi0_clkdev, 0),
+ LPSC(23, 0, pwm0, pll1_auxclk, NULL, 0),
+ LPSC(24, 0, pwm1, pll1_auxclk, NULL, 0),
+ LPSC(25, 0, pwm2, pll1_auxclk, NULL, 0),
+ LPSC(26, 0, gpio, pll1_sysclk4, gpio_clkdev, 0),
+ LPSC(27, 0, timer0, pll1_auxclk, timer0_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(28, 0, timer1, pll1_auxclk, NULL, 0),
+ /* REVISIT: why can't this be disabled? */
+ LPSC(29, 0, timer2, pll1_auxclk, timer2_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(31, 0, arm, pll2_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(38, 0, spi3, pll1_sysclk4, spi3_clkdev, 0),
+ LPSC(39, 0, spi4, pll1_auxclk, spi4_clkdev, 0),
+ LPSC(40, 0, emac, pll2_sysclk4, emac_clkdev, 0),
+ LPSC(44, 1, voice_codec, pll1_sysclk3, voice_codec_clkdev, 0),
+ LPSC(46, 1, vpss_dac, pll1_sysclk3, vpss_dac_clkdev, 0),
+ LPSC(47, 0, vpss_master, pll1_sysclk5, vpss_master_clkdev, 0),
+ LPSC(50, 0, mjcp, pll1_sysclk3, NULL, 0),
+ { }
+};
+
+static int dm365_psc_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, dm365_psc_info, 52, base);
+}
+
+static struct clk_bulk_data dm365_psc_parent_clks[] = {
+ { .id = "pll1_sysclk1" },
+ { .id = "pll1_sysclk3" },
+ { .id = "pll1_sysclk4" },
+ { .id = "pll1_sysclk5" },
+ { .id = "pll1_sysclk8" },
+ { .id = "pll2_sysclk2" },
+ { .id = "pll2_sysclk4" },
+ { .id = "pll1_auxclk" },
+};
+
+const struct davinci_psc_init_data dm365_psc_init_data = {
+ .parent_clks = dm365_psc_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(dm365_psc_parent_clks),
+ .psc_init = &dm365_psc_init,
+};
diff --git a/drivers/clk/davinci/psc-dm644x.c b/drivers/clk/davinci/psc-dm644x.c
new file mode 100644
index 000000000000..c22367baa46f
--- /dev/null
+++ b/drivers/clk/davinci/psc-dm644x.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PSC clock descriptions for TI DaVinci DM644x
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+LPSC_CLKDEV1(vpss_master_clkdev, "master", "vpss");
+LPSC_CLKDEV1(vpss_slave_clkdev, "slave", "vpss");
+LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
+ "fck", "davinci_mdio.0");
+LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
+LPSC_CLKDEV1(ide_clkdev, NULL, "palm_bk3710");
+LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV1(mmcsd_clkdev, NULL, "dm6441-mmc.0");
+LPSC_CLKDEV1(asp0_clkdev, NULL, "davinci-mcbsp");
+LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
+LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
+LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
+LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
+/* REVISIT: gpio-davinci.c should be modified to drop con_id */
+LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
+LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
+LPSC_CLKDEV1(timer2_clkdev, NULL, "davinci-wdt");
+
+static const struct davinci_lpsc_clk_info dm644x_psc_info[] = {
+ LPSC(0, 0, vpss_master, pll1_sysclk3, vpss_master_clkdev, 0),
+ LPSC(1, 0, vpss_slave, pll1_sysclk3, vpss_slave_clkdev, 0),
+ LPSC(6, 0, emac, pll1_sysclk5, emac_clkdev, 0),
+ LPSC(9, 0, usb, pll1_sysclk5, usb_clkdev, 0),
+ LPSC(10, 0, ide, pll1_sysclk5, ide_clkdev, 0),
+ LPSC(11, 0, vlynq, pll1_sysclk5, NULL, 0),
+ LPSC(14, 0, aemif, pll1_sysclk5, aemif_clkdev, 0),
+ LPSC(15, 0, mmcsd, pll1_sysclk5, mmcsd_clkdev, 0),
+ LPSC(17, 0, asp0, pll1_sysclk5, asp0_clkdev, 0),
+ LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
+ LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
+ LPSC(20, 0, uart1, pll1_auxclk, uart1_clkdev, 0),
+ LPSC(21, 0, uart2, pll1_auxclk, uart2_clkdev, 0),
+ LPSC(22, 0, spi, pll1_sysclk5, NULL, 0),
+ LPSC(23, 0, pwm0, pll1_auxclk, NULL, 0),
+ LPSC(24, 0, pwm1, pll1_auxclk, NULL, 0),
+ LPSC(25, 0, pwm2, pll1_auxclk, NULL, 0),
+ LPSC(26, 0, gpio, pll1_sysclk5, gpio_clkdev, 0),
+ LPSC(27, 0, timer0, pll1_auxclk, timer0_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(28, 0, timer1, pll1_auxclk, NULL, 0),
+ /* REVISIT: why can't this be disabled? */
+ LPSC(29, 0, timer2, pll1_auxclk, timer2_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(31, 0, arm, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ /* REVISIT how to disable? */
+ LPSC(39, 1, dsp, pll1_sysclk1, NULL, LPSC_ALWAYS_ENABLED),
+ /* REVISIT how to disable? */
+ LPSC(40, 1, vicp, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ { }
+};
+
+static int dm644x_psc_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, dm644x_psc_info, 41, base);
+}
+
+static struct clk_bulk_data dm644x_psc_parent_clks[] = {
+ { .id = "pll1_sysclk1" },
+ { .id = "pll1_sysclk2" },
+ { .id = "pll1_sysclk3" },
+ { .id = "pll1_sysclk5" },
+ { .id = "pll1_auxclk" },
+};
+
+const struct davinci_psc_init_data dm644x_psc_init_data = {
+ .parent_clks = dm644x_psc_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(dm644x_psc_parent_clks),
+ .psc_init = &dm644x_psc_init,
+};
diff --git a/drivers/clk/davinci/psc-dm646x.c b/drivers/clk/davinci/psc-dm646x.c
new file mode 100644
index 000000000000..468ef86ea40b
--- /dev/null
+++ b/drivers/clk/davinci/psc-dm646x.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PSC clock descriptions for TI DaVinci DM646x
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+LPSC_CLKDEV1(ide_clkdev, NULL, "palm_bk3710");
+LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
+ "fck", "davinci_mdio.0");
+LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
+LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
+LPSC_CLKDEV1(mcasp1_clkdev, NULL, "davinci-mcasp.1");
+LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
+LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
+LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
+LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
+/* REVISIT: gpio-davinci.c should be modified to drop con_id */
+LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
+LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
+
+static const struct davinci_lpsc_clk_info dm646x_psc_info[] = {
+ LPSC(0, 0, arm, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ /* REVISIT how to disable? */
+ LPSC(1, 0, dsp, pll1_sysclk1, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(4, 0, edma_cc, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(5, 0, edma_tc0, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(6, 0, edma_tc1, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(7, 0, edma_tc2, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(8, 0, edma_tc3, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(10, 0, ide, pll1_sysclk4, ide_clkdev, 0),
+ LPSC(14, 0, emac, pll1_sysclk3, emac_clkdev, 0),
+ LPSC(16, 0, vpif0, ref_clk, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(17, 0, vpif1, ref_clk, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(21, 0, aemif, pll1_sysclk3, aemif_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(22, 0, mcasp0, pll1_sysclk3, mcasp0_clkdev, 0),
+ LPSC(23, 0, mcasp1, pll1_sysclk3, mcasp1_clkdev, 0),
+ LPSC(26, 0, uart0, aux_clkin, uart0_clkdev, 0),
+ LPSC(27, 0, uart1, aux_clkin, uart1_clkdev, 0),
+ LPSC(28, 0, uart2, aux_clkin, uart2_clkdev, 0),
+ /* REVIST: disabling hangs system */
+ LPSC(29, 0, pwm0, pll1_sysclk3, NULL, LPSC_ALWAYS_ENABLED),
+ /* REVIST: disabling hangs system */
+ LPSC(30, 0, pwm1, pll1_sysclk3, NULL, LPSC_ALWAYS_ENABLED),
+ LPSC(31, 0, i2c, pll1_sysclk3, i2c_clkdev, 0),
+ LPSC(33, 0, gpio, pll1_sysclk3, gpio_clkdev, 0),
+ LPSC(34, 0, timer0, pll1_sysclk3, timer0_clkdev, LPSC_ALWAYS_ENABLED),
+ LPSC(35, 0, timer1, pll1_sysclk3, NULL, 0),
+ { }
+};
+
+static int dm646x_psc_init(struct device *dev, void __iomem *base)
+{
+ return davinci_psc_register_clocks(dev, dm646x_psc_info, 46, base);
+}
+
+static struct clk_bulk_data dm646x_psc_parent_clks[] = {
+ { .id = "ref_clk" },
+ { .id = "aux_clkin" },
+ { .id = "pll1_sysclk1" },
+ { .id = "pll1_sysclk2" },
+ { .id = "pll1_sysclk3" },
+ { .id = "pll1_sysclk4" },
+ { .id = "pll1_sysclk5" },
+};
+
+const struct davinci_psc_init_data dm646x_psc_init_data = {
+ .parent_clks = dm646x_psc_parent_clks,
+ .num_parent_clks = ARRAY_SIZE(dm646x_psc_parent_clks),
+ .psc_init = &dm646x_psc_init,
+};
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
new file mode 100644
index 000000000000..ce170e600f09
--- /dev/null
+++ b/drivers/clk/davinci/psc.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock driver for TI Davinci PSC controllers
+ *
+ * Copyright (C) 2017 David Lechner <david@lechnology.com>
+ *
+ * Based on: drivers/clk/keystone/gate.c
+ * Copyright (C) 2013 Texas Instruments.
+ * Murali Karicheri <m-karicheri2@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * And: arch/arm/mach-davinci/psc.c
+ * Copyright (C) 2006 Texas Instruments.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "psc.h"
+
+/* PSC register offsets */
+#define EPCPR 0x070
+#define PTCMD 0x120
+#define PTSTAT 0x128
+#define PDSTAT(n) (0x200 + 4 * (n))
+#define PDCTL(n) (0x300 + 4 * (n))
+#define MDSTAT(n) (0x800 + 4 * (n))
+#define MDCTL(n) (0xa00 + 4 * (n))
+
+/* PSC module states */
+enum davinci_lpsc_state {
+ LPSC_STATE_SWRSTDISABLE = 0,
+ LPSC_STATE_SYNCRST = 1,
+ LPSC_STATE_DISABLE = 2,
+ LPSC_STATE_ENABLE = 3,
+};
+
+#define MDSTAT_STATE_MASK GENMASK(5, 0)
+#define MDSTAT_MCKOUT BIT(12)
+#define PDSTAT_STATE_MASK GENMASK(4, 0)
+#define MDCTL_FORCE BIT(31)
+#define MDCTL_LRESET BIT(8)
+#define PDCTL_EPCGOOD BIT(8)
+#define PDCTL_NEXT BIT(0)
+
+struct davinci_psc_data {
+ struct clk_onecell_data clk_data;
+ struct genpd_onecell_data pm_data;
+ struct reset_controller_dev rcdev;
+};
+
+/**
+ * struct davinci_lpsc_clk - LPSC clock structure
+ * @dev: the device that provides this LPSC
+ * @hw: clk_hw for the LPSC
+ * @pm_domain: power domain for the LPSC
+ * @genpd_clk: clock reference owned by @pm_domain
+ * @regmap: PSC MMIO region
+ * @md: Module domain (LPSC module id)
+ * @pd: Power domain
+ * @flags: LPSC_* quirk flags
+ */
+struct davinci_lpsc_clk {
+ struct device *dev;
+ struct clk_hw hw;
+ struct generic_pm_domain pm_domain;
+ struct clk *genpd_clk;
+ struct regmap *regmap;
+ u32 md;
+ u32 pd;
+ u32 flags;
+};
+
+#define to_davinci_psc_data(x) container_of(x, struct davinci_psc_data, x)
+#define to_davinci_lpsc_clk(x) container_of(x, struct davinci_lpsc_clk, x)
+
+/**
+ * best_dev_name - get the "best" device name.
+ * @dev: the device
+ *
+ * Returns the device tree compatible name if the device has a DT node,
+ * otherwise return the device name. This is mainly needed because clkdev
+ * lookups are limited to 20 chars for dev_id and when using device tree,
+ * dev_name(dev) is much longer than that.
+ */
+static inline const char *best_dev_name(struct device *dev)
+{
+ const char *compatible;
+
+ if (!of_property_read_string(dev->of_node, "compatible", &compatible))
+ return compatible;
+
+ return dev_name(dev);
+}
+
+static void davinci_lpsc_config(struct davinci_lpsc_clk *lpsc,
+ enum davinci_lpsc_state next_state)
+{
+ u32 epcpr, pdstat, mdstat, ptstat;
+
+ regmap_write_bits(lpsc->regmap, MDCTL(lpsc->md), MDSTAT_STATE_MASK,
+ next_state);
+
+ if (lpsc->flags & LPSC_FORCE)
+ regmap_write_bits(lpsc->regmap, MDCTL(lpsc->md), MDCTL_FORCE,
+ MDCTL_FORCE);
+
+ regmap_read(lpsc->regmap, PDSTAT(lpsc->pd), &pdstat);
+ if ((pdstat & PDSTAT_STATE_MASK) == 0) {
+ regmap_write_bits(lpsc->regmap, PDCTL(lpsc->pd), PDCTL_NEXT,
+ PDCTL_NEXT);
+
+ regmap_write(lpsc->regmap, PTCMD, BIT(lpsc->pd));
+
+ regmap_read_poll_timeout(lpsc->regmap, EPCPR, epcpr,
+ epcpr & BIT(lpsc->pd), 0, 0);
+
+ regmap_write_bits(lpsc->regmap, PDCTL(lpsc->pd), PDCTL_EPCGOOD,
+ PDCTL_EPCGOOD);
+ } else {
+ regmap_write(lpsc->regmap, PTCMD, BIT(lpsc->pd));
+ }
+
+ regmap_read_poll_timeout(lpsc->regmap, PTSTAT, ptstat,
+ !(ptstat & BIT(lpsc->pd)), 0, 0);
+
+ regmap_read_poll_timeout(lpsc->regmap, MDSTAT(lpsc->md), mdstat,
+ (mdstat & MDSTAT_STATE_MASK) == next_state,
+ 0, 0);
+}
+
+static int davinci_lpsc_clk_enable(struct clk_hw *hw)
+{
+ struct davinci_lpsc_clk *lpsc = to_davinci_lpsc_clk(hw);
+
+ davinci_lpsc_config(lpsc, LPSC_STATE_ENABLE);
+
+ return 0;
+}
+
+static void davinci_lpsc_clk_disable(struct clk_hw *hw)
+{
+ struct davinci_lpsc_clk *lpsc = to_davinci_lpsc_clk(hw);
+
+ davinci_lpsc_config(lpsc, LPSC_STATE_DISABLE);
+}
+
+static int davinci_lpsc_clk_is_enabled(struct clk_hw *hw)
+{
+ struct davinci_lpsc_clk *lpsc = to_davinci_lpsc_clk(hw);
+ u32 mdstat;
+
+ regmap_read(lpsc->regmap, MDSTAT(lpsc->md), &mdstat);
+
+ return (mdstat & MDSTAT_MCKOUT) ? 1 : 0;
+}
+
+static const struct clk_ops davinci_lpsc_clk_ops = {
+ .enable = davinci_lpsc_clk_enable,
+ .disable = davinci_lpsc_clk_disable,
+ .is_enabled = davinci_lpsc_clk_is_enabled,
+};
+
+static int davinci_psc_genpd_attach_dev(struct generic_pm_domain *pm_domain,
+ struct device *dev)
+{
+ struct davinci_lpsc_clk *lpsc = to_davinci_lpsc_clk(pm_domain);
+ struct clk *clk;
+ int ret;
+
+ /*
+ * pm_clk_remove_clk() will call clk_put(), so we have to use clk_get()
+ * to get the clock instead of using lpsc->hw.clk directly.
+ */
+ clk = clk_get_sys(best_dev_name(lpsc->dev), clk_hw_get_name(&lpsc->hw));
+ if (IS_ERR(clk))
+ return (PTR_ERR(clk));
+
+ ret = pm_clk_create(dev);
+ if (ret < 0)
+ goto fail_clk_put;
+
+ ret = pm_clk_add_clk(dev, clk);
+ if (ret < 0)
+ goto fail_pm_clk_destroy;
+
+ lpsc->genpd_clk = clk;
+
+ return 0;
+
+fail_pm_clk_destroy:
+ pm_clk_destroy(dev);
+fail_clk_put:
+ clk_put(clk);
+
+ return ret;
+}
+
+static void davinci_psc_genpd_detach_dev(struct generic_pm_domain *pm_domain,
+ struct device *dev)
+{
+ struct davinci_lpsc_clk *lpsc = to_davinci_lpsc_clk(pm_domain);
+
+ pm_clk_remove_clk(dev, lpsc->genpd_clk);
+ pm_clk_destroy(dev);
+
+ lpsc->genpd_clk = NULL;
+}
+
+/**
+ * davinci_lpsc_clk_register - register LPSC clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @regmap: PSC MMIO region
+ * @md: local PSC number
+ * @pd: power domain
+ * @flags: LPSC_* flags
+ */
+static struct davinci_lpsc_clk *
+davinci_lpsc_clk_register(struct device *dev, const char *name,
+ const char *parent_name, struct regmap *regmap,
+ u32 md, u32 pd, u32 flags)
+{
+ struct clk_init_data init;
+ struct davinci_lpsc_clk *lpsc;
+ int ret;
+ bool is_on;
+
+ lpsc = devm_kzalloc(dev, sizeof(*lpsc), GFP_KERNEL);
+ if (!lpsc)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &davinci_lpsc_clk_ops;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ init.flags = 0;
+
+ if (flags & LPSC_ALWAYS_ENABLED)
+ init.flags |= CLK_IS_CRITICAL;
+
+ if (flags & LPSC_SET_RATE_PARENT)
+ init.flags |= CLK_SET_RATE_PARENT;
+
+ lpsc->dev = dev;
+ lpsc->regmap = regmap;
+ lpsc->hw.init = &init;
+ lpsc->md = md;
+ lpsc->pd = pd;
+ lpsc->flags = flags;
+
+ ret = devm_clk_hw_register(dev, &lpsc->hw);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* genpd attach needs a way to look up this clock */
+ ret = clk_hw_register_clkdev(&lpsc->hw, name, best_dev_name(dev));
+
+ lpsc->pm_domain.name = devm_kasprintf(dev, GFP_KERNEL, "%s: %s",
+ best_dev_name(dev), name);
+ lpsc->pm_domain.attach_dev = davinci_psc_genpd_attach_dev;
+ lpsc->pm_domain.detach_dev = davinci_psc_genpd_detach_dev;
+ lpsc->pm_domain.flags = GENPD_FLAG_PM_CLK;
+
+ is_on = davinci_lpsc_clk_is_enabled(&lpsc->hw);
+ pm_genpd_init(&lpsc->pm_domain, NULL, is_on);
+
+ return lpsc;
+}
+
+static int davinci_lpsc_clk_reset(struct clk *clk, bool reset)
+{
+ struct clk_hw *hw = __clk_get_hw(clk);
+ struct davinci_lpsc_clk *lpsc = to_davinci_lpsc_clk(hw);
+ u32 mdctl;
+
+ if (IS_ERR_OR_NULL(lpsc))
+ return -EINVAL;
+
+ mdctl = reset ? 0 : MDCTL_LRESET;
+ regmap_write_bits(lpsc->regmap, MDCTL(lpsc->md), MDCTL_LRESET, mdctl);
+
+ return 0;
+}
+
+/*
+ * REVISIT: These exported functions can be removed after a non-DT lookup is
+ * added to the reset controller framework and the davinci-rproc driver is
+ * updated to use the generic reset controller framework.
+ */
+
+int davinci_clk_reset_assert(struct clk *clk)
+{
+ return davinci_lpsc_clk_reset(clk, true);
+}
+EXPORT_SYMBOL(davinci_clk_reset_assert);
+
+int davinci_clk_reset_deassert(struct clk *clk)
+{
+ return davinci_lpsc_clk_reset(clk, false);
+}
+EXPORT_SYMBOL(davinci_clk_reset_deassert);
+
+static int davinci_psc_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct davinci_psc_data *psc = to_davinci_psc_data(rcdev);
+ struct clk *clk = psc->clk_data.clks[id];
+
+ return davinci_lpsc_clk_reset(clk, true);
+}
+
+static int davinci_psc_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct davinci_psc_data *psc = to_davinci_psc_data(rcdev);
+ struct clk *clk = psc->clk_data.clks[id];
+
+ return davinci_lpsc_clk_reset(clk, false);
+}
+
+static const struct reset_control_ops davinci_psc_reset_ops = {
+ .assert = davinci_psc_reset_assert,
+ .deassert = davinci_psc_reset_deassert,
+};
+
+static int davinci_psc_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct of_phandle_args clkspec = *reset_spec; /* discard const qualifier */
+ struct clk *clk;
+ struct clk_hw *hw;
+ struct davinci_lpsc_clk *lpsc;
+
+ /* the clock node is the same as the reset node */
+ clk = of_clk_get_from_provider(&clkspec);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ hw = __clk_get_hw(clk);
+ lpsc = to_davinci_lpsc_clk(hw);
+ clk_put(clk);
+
+ /* not all modules support local reset */
+ if (!(lpsc->flags & LPSC_LOCAL_RESET))
+ return -EINVAL;
+
+ return lpsc->md;
+}
+
+static const struct regmap_config davinci_psc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static struct davinci_psc_data *
+__davinci_psc_register_clocks(struct device *dev,
+ const struct davinci_lpsc_clk_info *info,
+ int num_clks,
+ void __iomem *base)
+{
+ struct davinci_psc_data *psc;
+ struct clk **clks;
+ struct generic_pm_domain **pm_domains;
+ struct regmap *regmap;
+ int i, ret;
+
+ psc = devm_kzalloc(dev, sizeof(*psc), GFP_KERNEL);
+ if (!psc)
+ return ERR_PTR(-ENOMEM);
+
+ clks = devm_kmalloc_array(dev, num_clks, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return ERR_PTR(-ENOMEM);
+
+ psc->clk_data.clks = clks;
+ psc->clk_data.clk_num = num_clks;
+
+ /*
+ * init array with error so that of_clk_src_onecell_get() doesn't
+ * return NULL for gaps in the sparse array
+ */
+ for (i = 0; i < num_clks; i++)
+ clks[i] = ERR_PTR(-ENOENT);
+
+ pm_domains = devm_kcalloc(dev, num_clks, sizeof(*pm_domains), GFP_KERNEL);
+ if (!pm_domains)
+ return ERR_PTR(-ENOMEM);
+
+ psc->pm_data.domains = pm_domains;
+ psc->pm_data.num_domains = num_clks;
+
+ regmap = devm_regmap_init_mmio(dev, base, &davinci_psc_regmap_config);
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ for (; info->name; info++) {
+ struct davinci_lpsc_clk *lpsc;
+
+ lpsc = davinci_lpsc_clk_register(dev, info->name, info->parent,
+ regmap, info->md, info->pd,
+ info->flags);
+ if (IS_ERR(lpsc)) {
+ dev_warn(dev, "Failed to register %s (%ld)\n",
+ info->name, PTR_ERR(lpsc));
+ continue;
+ }
+
+ clks[info->md] = lpsc->hw.clk;
+ pm_domains[info->md] = &lpsc->pm_domain;
+ }
+
+ psc->rcdev.ops = &davinci_psc_reset_ops;
+ psc->rcdev.owner = THIS_MODULE;
+ psc->rcdev.dev = dev;
+ psc->rcdev.of_node = dev->of_node;
+ psc->rcdev.of_reset_n_cells = 1;
+ psc->rcdev.of_xlate = davinci_psc_reset_of_xlate;
+ psc->rcdev.nr_resets = num_clks;
+
+ ret = devm_reset_controller_register(dev, &psc->rcdev);
+ if (ret < 0)
+ dev_warn(dev, "Failed to register reset controller (%d)\n", ret);
+
+ return psc;
+}
+
+int davinci_psc_register_clocks(struct device *dev,
+ const struct davinci_lpsc_clk_info *info,
+ u8 num_clks,
+ void __iomem *base)
+{
+ struct davinci_psc_data *psc;
+
+ psc = __davinci_psc_register_clocks(dev, info, num_clks, base);
+ if (IS_ERR(psc))
+ return PTR_ERR(psc);
+
+ for (; info->name; info++) {
+ const struct davinci_lpsc_clkdev_info *cdevs = info->cdevs;
+ struct clk *clk = psc->clk_data.clks[info->md];
+
+ if (!cdevs || IS_ERR_OR_NULL(clk))
+ continue;
+
+ for (; cdevs->con_id || cdevs->dev_id; cdevs++)
+ clk_register_clkdev(clk, cdevs->con_id, cdevs->dev_id);
+ }
+
+ return 0;
+}
+
+int of_davinci_psc_clk_init(struct device *dev,
+ const struct davinci_lpsc_clk_info *info,
+ u8 num_clks,
+ void __iomem *base)
+{
+ struct device_node *node = dev->of_node;
+ struct davinci_psc_data *psc;
+
+ psc = __davinci_psc_register_clocks(dev, info, num_clks, base);
+ if (IS_ERR(psc))
+ return PTR_ERR(psc);
+
+ of_genpd_add_provider_onecell(node, &psc->pm_data);
+
+ of_clk_add_provider(node, of_clk_src_onecell_get, &psc->clk_data);
+
+ return 0;
+}
+
+static const struct of_device_id davinci_psc_of_match[] = {
+ { .compatible = "ti,da850-psc0", .data = &of_da850_psc0_init_data },
+ { .compatible = "ti,da850-psc1", .data = &of_da850_psc1_init_data },
+ { }
+};
+
+static const struct platform_device_id davinci_psc_id_table[] = {
+ { .name = "da830-psc0", .driver_data = (kernel_ulong_t)&da830_psc0_init_data },
+ { .name = "da830-psc1", .driver_data = (kernel_ulong_t)&da830_psc1_init_data },
+ { .name = "da850-psc0", .driver_data = (kernel_ulong_t)&da850_psc0_init_data },
+ { .name = "da850-psc1", .driver_data = (kernel_ulong_t)&da850_psc1_init_data },
+ { .name = "dm355-psc", .driver_data = (kernel_ulong_t)&dm355_psc_init_data },
+ { .name = "dm365-psc", .driver_data = (kernel_ulong_t)&dm365_psc_init_data },
+ { .name = "dm644x-psc", .driver_data = (kernel_ulong_t)&dm644x_psc_init_data },
+ { .name = "dm646x-psc", .driver_data = (kernel_ulong_t)&dm646x_psc_init_data },
+ { }
+};
+
+static int davinci_psc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ const struct davinci_psc_init_data *init_data = NULL;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ of_id = of_match_device(davinci_psc_of_match, dev);
+ if (of_id)
+ init_data = of_id->data;
+ else if (pdev->id_entry)
+ init_data = (void *)pdev->id_entry->driver_data;
+
+ if (!init_data) {
+ dev_err(dev, "unable to find driver init data\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = devm_clk_bulk_get(dev, init_data->num_parent_clks,
+ init_data->parent_clks);
+ if (ret < 0)
+ return ret;
+
+ return init_data->psc_init(dev, base);
+}
+
+static struct platform_driver davinci_psc_driver = {
+ .probe = davinci_psc_probe,
+ .driver = {
+ .name = "davinci-psc-clk",
+ .of_match_table = davinci_psc_of_match,
+ },
+ .id_table = davinci_psc_id_table,
+};
+
+static int __init davinci_psc_driver_init(void)
+{
+ return platform_driver_register(&davinci_psc_driver);
+}
+
+/* has to be postcore_initcall because davinci_gpio depend on PSC clocks */
+postcore_initcall(davinci_psc_driver_init);
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
new file mode 100644
index 000000000000..c2a7df6413fe
--- /dev/null
+++ b/drivers/clk/davinci/psc.h
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock driver for TI Davinci PSC controllers
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __CLK_DAVINCI_PSC_H__
+#define __CLK_DAVINCI_PSC_H__
+
+#include <linux/clk-provider.h>
+#include <linux/types.h>
+
+/* PSC quirk flags */
+#define LPSC_ALWAYS_ENABLED BIT(0) /* never disable this clock */
+#define LPSC_SET_RATE_PARENT BIT(1) /* propagate set_rate to parent clock */
+#define LPSC_FORCE BIT(2) /* requires MDCTL FORCE bit */
+#define LPSC_LOCAL_RESET BIT(3) /* acts as reset provider */
+
+struct davinci_lpsc_clkdev_info {
+ const char *con_id;
+ const char *dev_id;
+};
+
+#define LPSC_CLKDEV(c, d) { \
+ .con_id = (c), \
+ .dev_id = (d) \
+}
+
+#define LPSC_CLKDEV1(n, c, d) \
+static const struct davinci_lpsc_clkdev_info n[] __initconst = { \
+ LPSC_CLKDEV((c), (d)), \
+ { } \
+}
+
+#define LPSC_CLKDEV2(n, c1, d1, c2, d2) \
+static const struct davinci_lpsc_clkdev_info n[] __initconst = { \
+ LPSC_CLKDEV((c1), (d1)), \
+ LPSC_CLKDEV((c2), (d2)), \
+ { } \
+}
+
+#define LPSC_CLKDEV3(n, c1, d1, c2, d2, c3, d3) \
+static const struct davinci_lpsc_clkdev_info n[] __initconst = { \
+ LPSC_CLKDEV((c1), (d1)), \
+ LPSC_CLKDEV((c2), (d2)), \
+ LPSC_CLKDEV((c3), (d3)), \
+ { } \
+}
+
+/**
+ * davinci_lpsc_clk_info - LPSC module-specific clock information
+ * @name: the clock name
+ * @parent: the parent clock name
+ * @cdevs: optional array of clkdev lookup table info
+ * @md: the local module domain (LPSC id)
+ * @pd: the power domain id
+ * @flags: bitmask of LPSC_* flags
+ */
+struct davinci_lpsc_clk_info {
+ const char *name;
+ const char *parent;
+ const struct davinci_lpsc_clkdev_info *cdevs;
+ u32 md;
+ u32 pd;
+ unsigned long flags;
+};
+
+#define LPSC(m, d, n, p, c, f) \
+{ \
+ .name = #n, \
+ .parent = #p, \
+ .cdevs = (c), \
+ .md = (m), \
+ .pd = (d), \
+ .flags = (f), \
+}
+
+int davinci_psc_register_clocks(struct device *dev,
+ const struct davinci_lpsc_clk_info *info,
+ u8 num_clks,
+ void __iomem *base);
+
+int of_davinci_psc_clk_init(struct device *dev,
+ const struct davinci_lpsc_clk_info *info,
+ u8 num_clks,
+ void __iomem *base);
+
+/* Device-specific data */
+
+struct davinci_psc_init_data {
+ struct clk_bulk_data *parent_clks;
+ int num_parent_clks;
+ int (*psc_init)(struct device *dev, void __iomem *base);
+};
+
+extern const struct davinci_psc_init_data da830_psc0_init_data;
+extern const struct davinci_psc_init_data da830_psc1_init_data;
+extern const struct davinci_psc_init_data da850_psc0_init_data;
+extern const struct davinci_psc_init_data da850_psc1_init_data;
+extern const struct davinci_psc_init_data of_da850_psc0_init_data;
+extern const struct davinci_psc_init_data of_da850_psc1_init_data;
+extern const struct davinci_psc_init_data dm355_psc_init_data;
+extern const struct davinci_psc_init_data dm365_psc_init_data;
+extern const struct davinci_psc_init_data dm644x_psc_init_data;
+extern const struct davinci_psc_init_data dm646x_psc_init_data;
+
+#endif /* __CLK_DAVINCI_PSC_H__ */
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index 4806fc2cb4ac..2a714c0f9657 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -3,7 +3,7 @@
# Hisilicon Clock specific Makefile
#
-obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o
+obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o clk-hisi-phase.o
obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o
obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o
diff --git a/drivers/clk/hisilicon/clk-hisi-phase.c b/drivers/clk/hisilicon/clk-hisi-phase.c
new file mode 100644
index 000000000000..5bce9297b78b
--- /dev/null
+++ b/drivers/clk/hisilicon/clk-hisi-phase.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 HiSilicon Technologies Co., Ltd.
+ *
+ * Simple HiSilicon phase clock implementation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+struct clk_hisi_phase {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u32 *phase_degrees;
+ u32 *phase_regvals;
+ u8 phase_num;
+ u32 mask;
+ u8 shift;
+ u8 flags;
+ spinlock_t *lock;
+};
+
+#define to_clk_hisi_phase(_hw) container_of(_hw, struct clk_hisi_phase, hw)
+
+static int hisi_phase_regval_to_degrees(struct clk_hisi_phase *phase,
+ u32 regval)
+{
+ int i;
+
+ for (i = 0; i < phase->phase_num; i++)
+ if (phase->phase_regvals[i] == regval)
+ return phase->phase_degrees[i];
+
+ return -EINVAL;
+}
+
+static int hisi_clk_get_phase(struct clk_hw *hw)
+{
+ struct clk_hisi_phase *phase = to_clk_hisi_phase(hw);
+ u32 regval;
+
+ regval = readl(phase->reg);
+ regval = (regval & phase->mask) >> phase->shift;
+
+ return hisi_phase_regval_to_degrees(phase, regval);
+}
+
+static int hisi_phase_degrees_to_regval(struct clk_hisi_phase *phase,
+ int degrees)
+{
+ int i;
+
+ for (i = 0; i < phase->phase_num; i++)
+ if (phase->phase_degrees[i] == degrees)
+ return phase->phase_regvals[i];
+
+ return -EINVAL;
+}
+
+static int hisi_clk_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct clk_hisi_phase *phase = to_clk_hisi_phase(hw);
+ unsigned long flags = 0;
+ int regval;
+ u32 val;
+
+ regval = hisi_phase_degrees_to_regval(phase, degrees);
+ if (regval < 0)
+ return regval;
+
+ spin_lock_irqsave(phase->lock, flags);
+
+ val = clk_readl(phase->reg);
+ val &= ~phase->mask;
+ val |= regval << phase->shift;
+ clk_writel(val, phase->reg);
+
+ spin_unlock_irqrestore(phase->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_phase_ops = {
+ .get_phase = hisi_clk_get_phase,
+ .set_phase = hisi_clk_set_phase,
+};
+
+struct clk *clk_register_hisi_phase(struct device *dev,
+ const struct hisi_phase_clock *clks,
+ void __iomem *base, spinlock_t *lock)
+{
+ struct clk_hisi_phase *phase;
+ struct clk_init_data init;
+
+ phase = devm_kzalloc(dev, sizeof(struct clk_hisi_phase), GFP_KERNEL);
+ if (!phase)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = clks->name;
+ init.ops = &clk_phase_ops;
+ init.flags = clks->flags | CLK_IS_BASIC;
+ init.parent_names = clks->parent_names ? &clks->parent_names : NULL;
+ init.num_parents = clks->parent_names ? 1 : 0;
+
+ phase->reg = base + clks->offset;
+ phase->shift = clks->shift;
+ phase->mask = (BIT(clks->width) - 1) << clks->shift;
+ phase->lock = lock;
+ phase->phase_degrees = clks->phase_degrees;
+ phase->phase_regvals = clks->phase_regvals;
+ phase->phase_num = clks->phase_num;
+ phase->hw.init = &init;
+
+ return devm_clk_register(dev, &phase->hw);
+}
+EXPORT_SYMBOL_GPL(clk_register_hisi_phase);
diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c
index b73c1dfae7f1..953c8dacef8b 100644
--- a/drivers/clk/hisilicon/clk.c
+++ b/drivers/clk/hisilicon/clk.c
@@ -49,6 +49,8 @@ struct hisi_clock_data *hisi_clk_alloc(struct platform_device *pdev,
return NULL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return NULL;
clk_data->base = devm_ioremap(&pdev->dev,
res->start, resource_size(res));
if (!clk_data->base)
@@ -197,6 +199,30 @@ err:
}
EXPORT_SYMBOL_GPL(hisi_clk_register_mux);
+int hisi_clk_register_phase(struct device *dev,
+ const struct hisi_phase_clock *clks,
+ int nums, struct hisi_clock_data *data)
+{
+ void __iomem *base = data->base;
+ struct clk *clk;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = clk_register_hisi_phase(dev, &clks[i], base,
+ &hisi_clk_lock);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ clks[i].name);
+ return PTR_ERR(clk);
+ }
+
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_clk_register_phase);
+
int hisi_clk_register_divider(const struct hisi_divider_clock *clks,
int nums, struct hisi_clock_data *data)
{
diff --git a/drivers/clk/hisilicon/clk.h b/drivers/clk/hisilicon/clk.h
index 4e1d1affc6f5..8d7ee5c3231b 100644
--- a/drivers/clk/hisilicon/clk.h
+++ b/drivers/clk/hisilicon/clk.h
@@ -68,6 +68,19 @@ struct hisi_mux_clock {
const char *alias;
};
+struct hisi_phase_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_names;
+ unsigned long flags;
+ unsigned long offset;
+ u8 shift;
+ u8 width;
+ u32 *phase_degrees;
+ u32 *phase_regvals;
+ u8 phase_num;
+};
+
struct hisi_divider_clock {
unsigned int id;
const char *name;
@@ -120,6 +133,12 @@ int hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *,
int, struct hisi_clock_data *);
int hisi_clk_register_mux(const struct hisi_mux_clock *, int,
struct hisi_clock_data *);
+struct clk *clk_register_hisi_phase(struct device *dev,
+ const struct hisi_phase_clock *clks,
+ void __iomem *base, spinlock_t *lock);
+int hisi_clk_register_phase(struct device *dev,
+ const struct hisi_phase_clock *clks,
+ int nums, struct hisi_clock_data *data);
int hisi_clk_register_divider(const struct hisi_divider_clock *,
int, struct hisi_clock_data *);
int hisi_clk_register_gate(const struct hisi_gate_clock *,
diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c
index 2007123832bb..53450b651e4c 100644
--- a/drivers/clk/hisilicon/crg-hi3516cv300.c
+++ b/drivers/clk/hisilicon/crg-hi3516cv300.c
@@ -204,7 +204,7 @@ static const struct hisi_crg_funcs hi3516cv300_crg_funcs = {
/* hi3516CV300 sysctrl CRG */
#define HI3516CV300_SYSCTRL_NR_CLKS 16
-static const char *wdt_mux_p[] __initconst = { "3m", "apb" };
+static const char *const wdt_mux_p[] __initconst = { "3m", "apb" };
static u32 wdt_mux_table[] = {0, 1};
static const struct hisi_mux_clock hi3516cv300_sysctrl_mux_clks[] = {
diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c
index 8478948e858e..743eec131528 100644
--- a/drivers/clk/hisilicon/crg-hi3798cv200.c
+++ b/drivers/clk/hisilicon/crg-hi3798cv200.c
@@ -27,30 +27,31 @@
#include "reset.h"
/* hi3798CV200 core CRG */
-#define HI3798CV200_INNER_CLK_OFFSET 64
-#define HI3798CV200_FIXED_24M 65
-#define HI3798CV200_FIXED_25M 66
-#define HI3798CV200_FIXED_50M 67
-#define HI3798CV200_FIXED_75M 68
-#define HI3798CV200_FIXED_100M 69
-#define HI3798CV200_FIXED_150M 70
-#define HI3798CV200_FIXED_200M 71
-#define HI3798CV200_FIXED_250M 72
-#define HI3798CV200_FIXED_300M 73
-#define HI3798CV200_FIXED_400M 74
-#define HI3798CV200_MMC_MUX 75
-#define HI3798CV200_ETH_PUB_CLK 76
-#define HI3798CV200_ETH_BUS_CLK 77
-#define HI3798CV200_ETH_BUS0_CLK 78
-#define HI3798CV200_ETH_BUS1_CLK 79
-#define HI3798CV200_COMBPHY1_MUX 80
-#define HI3798CV200_FIXED_12M 81
-#define HI3798CV200_FIXED_48M 82
-#define HI3798CV200_FIXED_60M 83
-#define HI3798CV200_FIXED_166P5M 84
-#define HI3798CV200_SDIO0_MUX 85
-
-#define HI3798CV200_CRG_NR_CLKS 128
+#define HI3798CV200_INNER_CLK_OFFSET 64
+#define HI3798CV200_FIXED_24M 65
+#define HI3798CV200_FIXED_25M 66
+#define HI3798CV200_FIXED_50M 67
+#define HI3798CV200_FIXED_75M 68
+#define HI3798CV200_FIXED_100M 69
+#define HI3798CV200_FIXED_150M 70
+#define HI3798CV200_FIXED_200M 71
+#define HI3798CV200_FIXED_250M 72
+#define HI3798CV200_FIXED_300M 73
+#define HI3798CV200_FIXED_400M 74
+#define HI3798CV200_MMC_MUX 75
+#define HI3798CV200_ETH_PUB_CLK 76
+#define HI3798CV200_ETH_BUS_CLK 77
+#define HI3798CV200_ETH_BUS0_CLK 78
+#define HI3798CV200_ETH_BUS1_CLK 79
+#define HI3798CV200_COMBPHY1_MUX 80
+#define HI3798CV200_FIXED_12M 81
+#define HI3798CV200_FIXED_48M 82
+#define HI3798CV200_FIXED_60M 83
+#define HI3798CV200_FIXED_166P5M 84
+#define HI3798CV200_SDIO0_MUX 85
+#define HI3798CV200_COMBPHY0_MUX 86
+
+#define HI3798CV200_CRG_NR_CLKS 128
static const struct hisi_fixed_rate_clock hi3798cv200_fixed_rate_clks[] = {
{ HISTB_OSC_CLK, "clk_osc", NULL, 0, 24000000, },
@@ -74,9 +75,9 @@ static const char *const mmc_mux_p[] = {
"100m", "50m", "25m", "200m", "150m" };
static u32 mmc_mux_table[] = {0, 1, 2, 3, 6};
-static const char *const comphy1_mux_p[] = {
+static const char *const comphy_mux_p[] = {
"100m", "25m"};
-static u32 comphy1_mux_table[] = {2, 3};
+static u32 comphy_mux_table[] = {2, 3};
static const char *const sdio_mux_p[] = {
"100m", "50m", "150m", "166p5m" };
@@ -85,14 +86,29 @@ static u32 sdio_mux_table[] = {0, 1, 2, 3};
static struct hisi_mux_clock hi3798cv200_mux_clks[] = {
{ HI3798CV200_MMC_MUX, "mmc_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
CLK_SET_RATE_PARENT, 0xa0, 8, 3, 0, mmc_mux_table, },
+ { HI3798CV200_COMBPHY0_MUX, "combphy0_mux",
+ comphy_mux_p, ARRAY_SIZE(comphy_mux_p),
+ CLK_SET_RATE_PARENT, 0x188, 2, 2, 0, comphy_mux_table, },
{ HI3798CV200_COMBPHY1_MUX, "combphy1_mux",
- comphy1_mux_p, ARRAY_SIZE(comphy1_mux_p),
- CLK_SET_RATE_PARENT, 0x188, 10, 2, 0, comphy1_mux_table, },
+ comphy_mux_p, ARRAY_SIZE(comphy_mux_p),
+ CLK_SET_RATE_PARENT, 0x188, 10, 2, 0, comphy_mux_table, },
{ HI3798CV200_SDIO0_MUX, "sdio0_mux", sdio_mux_p,
ARRAY_SIZE(sdio_mux_p), CLK_SET_RATE_PARENT,
0x9c, 8, 2, 0, sdio_mux_table, },
};
+static u32 mmc_phase_regvals[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static u32 mmc_phase_degrees[] = {0, 45, 90, 135, 180, 225, 270, 315};
+
+static struct hisi_phase_clock hi3798cv200_phase_clks[] = {
+ { HISTB_MMC_SAMPLE_CLK, "mmc_sample", "clk_mmc_ciu",
+ CLK_SET_RATE_PARENT, 0xa0, 12, 3, mmc_phase_degrees,
+ mmc_phase_regvals, ARRAY_SIZE(mmc_phase_regvals) },
+ { HISTB_MMC_DRV_CLK, "mmc_drive", "clk_mmc_ciu",
+ CLK_SET_RATE_PARENT, 0xa0, 16, 3, mmc_phase_degrees,
+ mmc_phase_regvals, ARRAY_SIZE(mmc_phase_regvals) },
+};
+
static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
/* UART */
{ HISTB_UART2_CLK, "clk_uart2", "75m",
@@ -147,6 +163,9 @@ static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
CLK_SET_RATE_PARENT, 0xcc, 4, 0, },
{ HISTB_ETH1_MACIF_CLK, "clk_macif1", "clk_bus_m1",
CLK_SET_RATE_PARENT, 0xcc, 25, 0, },
+ /* COMBPHY0 */
+ { HISTB_COMBPHY0_CLK, "clk_combphy0", "combphy0_mux",
+ CLK_SET_RATE_PARENT, 0x188, 0, 0, },
/* COMBPHY1 */
{ HISTB_COMBPHY1_CLK, "clk_combphy1", "combphy1_mux",
CLK_SET_RATE_PARENT, 0x188, 8, 0, },
@@ -161,6 +180,8 @@ static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
CLK_SET_RATE_PARENT, 0xb8, 1, 0 },
{ HISTB_USB2_UTMI_CLK, "clk_u2_utmi", "60m",
CLK_SET_RATE_PARENT, 0xb8, 5, 0 },
+ { HISTB_USB2_OTG_UTMI_CLK, "clk_u2_otg_utmi", "60m",
+ CLK_SET_RATE_PARENT, 0xb8, 3, 0 },
{ HISTB_USB2_PHY1_REF_CLK, "clk_u2_phy1_ref", "24m",
CLK_SET_RATE_PARENT, 0xbc, 0, 0 },
{ HISTB_USB2_PHY2_REF_CLK, "clk_u2_phy2_ref", "24m",
@@ -177,6 +198,14 @@ static struct hisi_clock_data *hi3798cv200_clk_register(
if (!clk_data)
return ERR_PTR(-ENOMEM);
+ /* hisi_phase_clock is resource managed */
+ ret = hisi_clk_register_phase(&pdev->dev,
+ hi3798cv200_phase_clks,
+ ARRAY_SIZE(hi3798cv200_phase_clks),
+ clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
ret = hisi_clk_register_fixed_rate(hi3798cv200_fixed_rate_clks,
ARRAY_SIZE(hi3798cv200_fixed_rate_clks),
clk_data);
@@ -202,18 +231,17 @@ static struct hisi_clock_data *hi3798cv200_clk_register(
return clk_data;
-unregister_fixed_rate:
- hisi_clk_unregister_fixed_rate(hi3798cv200_fixed_rate_clks,
- ARRAY_SIZE(hi3798cv200_fixed_rate_clks),
+unregister_gate:
+ hisi_clk_unregister_gate(hi3798cv200_gate_clks,
+ ARRAY_SIZE(hi3798cv200_gate_clks),
clk_data);
-
unregister_mux:
hisi_clk_unregister_mux(hi3798cv200_mux_clks,
ARRAY_SIZE(hi3798cv200_mux_clks),
clk_data);
-unregister_gate:
- hisi_clk_unregister_gate(hi3798cv200_gate_clks,
- ARRAY_SIZE(hi3798cv200_gate_clks),
+unregister_fixed_rate:
+ hisi_clk_unregister_fixed_rate(hi3798cv200_fixed_rate_clks,
+ ARRAY_SIZE(hi3798cv200_fixed_rate_clks),
clk_data);
return ERR_PTR(ret);
}
@@ -245,7 +273,7 @@ static const struct hisi_crg_funcs hi3798cv200_crg_funcs = {
#define HI3798CV200_SYSCTRL_NR_CLKS 16
static const struct hisi_gate_clock hi3798cv200_sysctrl_gate_clks[] = {
- { HISTB_IR_CLK, "clk_ir", "100m",
+ { HISTB_IR_CLK, "clk_ir", "24m",
CLK_SET_RATE_PARENT, 0x48, 4, 0, },
{ HISTB_TIMER01_CLK, "clk_timer01", "24m",
CLK_SET_RATE_PARENT, 0x48, 6, 0, },
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index f91f2b2e11cd..8c3baa7e6496 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_SOC_IMX35) += clk-imx35.o
obj-$(CONFIG_SOC_IMX5) += clk-imx51-imx53.o
obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o
obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o
+obj-$(CONFIG_SOC_IMX6SLL) += clk-imx6sll.o
obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o
obj-$(CONFIG_SOC_IMX6UL) += clk-imx6ul.o
obj-$(CONFIG_SOC_IMX7D) += clk-imx7d.o
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index 6df3389687bc..99036527eb0d 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -101,7 +101,7 @@ struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
init.name = name;
init.ops = &clk_busy_divider_ops;
- init.flags = CLK_SET_RATE_PARENT;
+ init.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -175,7 +175,7 @@ struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
init.name = name;
init.ops = &clk_busy_mux_ops;
- init.flags = 0;
+ init.flags = CLK_IS_CRITICAL;
init.parent_names = parent_names;
init.num_parents = num_parents;
diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
new file mode 100644
index 000000000000..3651c77fbabe
--- /dev/null
+++ b/drivers/clk/imx/clk-imx6sll.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP.
+ */
+
+#include <dt-bindings/clock/imx6sll-clock.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk.h"
+
+#define CCM_ANALOG_PLL_BYPASS (0x1 << 16)
+#define BM_CCM_CCDR_MMDC_CH0_MASK (0x2 << 16)
+#define xPLL_CLR(offset) (offset + 0x8)
+
+static const char *pll_bypass_src_sels[] = { "osc", "dummy", };
+static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", };
+static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", };
+static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", };
+static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", };
+static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", };
+static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", };
+static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
+static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
+static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
+static const char *axi_alt_sels[] = { "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *axi_sels[] = {"periph", "axi_alt_sel", };
+static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
+static const char *periph2_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll4_audio_div", };
+static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", };
+static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "osc", };
+static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
+static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
+static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *ssi_sels[] = {"pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio_div", "dummy",};
+static const char *spdif_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll5_video_div", "pll3_usb_otg", };
+static const char *ldb_di0_div_sels[] = { "ldb_di0_div_3_5", "ldb_di0_div_7", };
+static const char *ldb_di1_div_sels[] = { "ldb_di1_div_3_5", "ldb_di1_div_7", };
+static const char *ldb_di0_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_pfd3_594m", "pll2_pfd1_594m", "pll3_pfd3_454m", };
+static const char *ldb_di1_sels[] = { "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_bus", "pll3_pfd3_454m", "pll3_pfd2_508m", };
+static const char *lcdif_pre_sels[] = { "pll2_bus", "pll3_pfd3_454m", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd1_594m", "pll3_pfd1_540m", };
+static const char *ecspi_sels[] = { "pll3_60m", "osc", };
+static const char *uart_sels[] = { "pll3_80m", "osc", };
+static const char *perclk_sels[] = { "ipg", "osc", };
+static const char *lcdif_sels[] = { "lcdif_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
+
+static const char *epdc_pre_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd2_508m", };
+static const char *epdc_sels[] = { "epdc_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
+
+static struct clk *clks[IMX6SLL_CLK_END];
+static struct clk_onecell_data clk_data;
+
+static const struct clk_div_table post_div_table[] = {
+ { .val = 2, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 0, .div = 4, },
+ { }
+};
+
+static const struct clk_div_table video_div_table[] = {
+ { .val = 0, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 2, .div = 1, },
+ { .val = 3, .div = 4, },
+ { }
+};
+
+static u32 share_count_audio;
+static u32 share_count_ssi1;
+static u32 share_count_ssi2;
+static u32 share_count_ssi3;
+
+static void __init imx6sll_clocks_init(struct device_node *ccm_node)
+{
+ struct device_node *np;
+ void __iomem *base;
+
+ clks[IMX6SLL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
+
+ clks[IMX6SLL_CLK_CKIL] = of_clk_get_by_name(ccm_node, "ckil");
+ clks[IMX6SLL_CLK_OSC] = of_clk_get_by_name(ccm_node, "osc");
+
+ /* ipp_di clock is external input */
+ clks[IMX6SLL_CLK_IPP_DI0] = of_clk_get_by_name(ccm_node, "ipp_di0");
+ clks[IMX6SLL_CLK_IPP_DI1] = of_clk_get_by_name(ccm_node, "ipp_di1");
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6sll-anatop");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ /* Do not bypass PLLs initially */
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x0));
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x10));
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x20));
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x30));
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0x70));
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0xa0));
+ writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0xe0));
+
+ clks[IMX6SLL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SLL_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SLL_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SLL_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SLL_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SLL_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SLL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+
+ clks[IMX6SLL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f);
+ clks[IMX6SLL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1);
+ clks[IMX6SLL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3);
+ clks[IMX6SLL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f);
+ clks[IMX6SLL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f);
+ clks[IMX6SLL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3);
+ clks[IMX6SLL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3);
+
+ clks[IMX6SLL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SLL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SLL_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SLL_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SLL_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SLL_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
+ clks[IMX6SLL_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
+
+ clks[IMX6SLL_CLK_PLL1_SYS] = imx_clk_fixed_factor("pll1_sys", "pll1_bypass", 1, 1);
+ clks[IMX6SLL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13);
+ clks[IMX6SLL_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13);
+ clks[IMX6SLL_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13);
+ clks[IMX6SLL_CLK_PLL5_VIDEO] = imx_clk_gate("pll5_video", "pll5_bypass", base + 0xa0, 13);
+ clks[IMX6SLL_CLK_PLL6_ENET] = imx_clk_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13);
+ clks[IMX6SLL_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13);
+
+ /*
+ * Bit 20 is the reserved and read-only bit, we do this only for:
+ * - Do nothing for usbphy clk_enable/disable
+ * - Keep refcount when do usbphy clk_enable/disable, in that case,
+ * the clk framework many need to enable/disable usbphy's parent
+ */
+ clks[IMX6SLL_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20);
+ clks[IMX6SLL_CLK_USBPHY2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20);
+
+ /*
+ * usbphy*_gate needs to be on after system boots up, and software
+ * never needs to control it anymore.
+ */
+ if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
+ clks[IMX6SLL_CLK_USBPHY1_GATE] = imx_clk_gate_flags("usbphy1_gate", "dummy", base + 0x10, 6, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_USBPHY2_GATE] = imx_clk_gate_flags("usbphy2_gate", "dummy", base + 0x20, 6, CLK_IS_CRITICAL);
+ }
+
+ /* name parent_name reg idx */
+ clks[IMX6SLL_CLK_PLL2_PFD0] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
+ clks[IMX6SLL_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
+ clks[IMX6SLL_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2);
+ clks[IMX6SLL_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3);
+ clks[IMX6SLL_CLK_PLL3_PFD0] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0);
+ clks[IMX6SLL_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1);
+ clks[IMX6SLL_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2);
+ clks[IMX6SLL_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3);
+
+ clks[IMX6SLL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
+ clks[IMX6SLL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 15, 1, 0, &imx_ccm_lock);
+ clks[IMX6SLL_CLK_PLL5_POST_DIV] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock);
+ clks[IMX6SLL_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
+
+ /* name parent_name mult div */
+ clks[IMX6SLL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
+ clks[IMX6SLL_CLK_PLL3_120M] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4);
+ clks[IMX6SLL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
+ clks[IMX6SLL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
+
+ np = ccm_node;
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ clks[IMX6SLL_CLK_STEP] = imx_clk_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels));
+ clks[IMX6SLL_CLK_PLL1_SW] = imx_clk_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0);
+ clks[IMX6SLL_CLK_AXI_ALT_SEL] = imx_clk_mux("axi_alt_sel", base + 0x14, 7, 1, axi_alt_sels, ARRAY_SIZE(axi_alt_sels));
+ clks[IMX6SLL_CLK_AXI_SEL] = imx_clk_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0);
+ clks[IMX6SLL_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
+ clks[IMX6SLL_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels));
+ clks[IMX6SLL_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
+ clks[IMX6SLL_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
+ clks[IMX6SLL_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clks[IMX6SLL_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clks[IMX6SLL_CLK_USDHC3_SEL] = imx_clk_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clks[IMX6SLL_CLK_SSI1_SEL] = imx_clk_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clks[IMX6SLL_CLK_SSI2_SEL] = imx_clk_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clks[IMX6SLL_CLK_SSI3_SEL] = imx_clk_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clks[IMX6SLL_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
+ clks[IMX6SLL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
+ clks[IMX6SLL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels));
+ clks[IMX6SLL_CLK_EXTERN_AUDIO_SEL] = imx_clk_mux("extern_audio_sel", base + 0x30, 7, 2, spdif_sels, ARRAY_SIZE(spdif_sels));
+ clks[IMX6SLL_CLK_EPDC_PRE_SEL] = imx_clk_mux("epdc_pre_sel", base + 0x34, 15, 3, epdc_pre_sels, ARRAY_SIZE(epdc_pre_sels));
+ clks[IMX6SLL_CLK_EPDC_SEL] = imx_clk_mux("epdc_sel", base + 0x34, 9, 3, epdc_sels, ARRAY_SIZE(epdc_sels));
+ clks[IMX6SLL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
+ clks[IMX6SLL_CLK_LCDIF_PRE_SEL] = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels));
+ clks[IMX6SLL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
+
+ clks[IMX6SLL_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels));
+ clks[IMX6SLL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
+
+ clks[IMX6SLL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
+ clks[IMX6SLL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
+ clks[IMX6SLL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
+ clks[IMX6SLL_CLK_LCDIF_PODF] = imx_clk_divider("lcdif_podf", "lcdif_pred", base + 0x18, 23, 3);
+ clks[IMX6SLL_CLK_PERCLK] = imx_clk_divider("perclk", "perclk_sel", base + 0x1c, 0, 6);
+ clks[IMX6SLL_CLK_USDHC3_PODF] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3);
+ clks[IMX6SLL_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3);
+ clks[IMX6SLL_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3);
+ clks[IMX6SLL_CLK_UART_PODF] = imx_clk_divider("uart_podf", "uart_sel", base + 0x24, 0, 6);
+ clks[IMX6SLL_CLK_SSI3_PRED] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3);
+ clks[IMX6SLL_CLK_SSI3_PODF] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6);
+ clks[IMX6SLL_CLK_SSI1_PRED] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3);
+ clks[IMX6SLL_CLK_SSI1_PODF] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6);
+ clks[IMX6SLL_CLK_SSI2_PRED] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3);
+ clks[IMX6SLL_CLK_SSI2_PODF] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6);
+ clks[IMX6SLL_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
+ clks[IMX6SLL_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
+ clks[IMX6SLL_CLK_EXTERN_AUDIO_PRED] = imx_clk_divider("extern_audio_pred", "extern_audio_sel", base + 0x30, 12, 3);
+ clks[IMX6SLL_CLK_EXTERN_AUDIO_PODF] = imx_clk_divider("extern_audio_podf", "extern_audio_pred", base + 0x30, 9, 3);
+ clks[IMX6SLL_CLK_EPDC_PODF] = imx_clk_divider("epdc_podf", "epdc_pre_sel", base + 0x34, 12, 3);
+ clks[IMX6SLL_CLK_ECSPI_PODF] = imx_clk_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6);
+ clks[IMX6SLL_CLK_LCDIF_PRED] = imx_clk_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3);
+
+ clks[IMX6SLL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
+ clks[IMX6SLL_CLK_MMDC_PODF] = imx_clk_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
+ clks[IMX6SLL_CLK_AXI_PODF] = imx_clk_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
+ clks[IMX6SLL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
+
+ clks[IMX6SLL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+ clks[IMX6SLL_CLK_LDB_DI0_DIV_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7);
+ clks[IMX6SLL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+ clks[IMX6SLL_CLK_LDB_DI1_DIV_7] = imx_clk_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7);
+
+ clks[IMX6SLL_CLK_LDB_DI0_SEL] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels));
+ clks[IMX6SLL_CLK_LDB_DI1_SEL] = imx_clk_mux("ldb_di1_sel", base + 0x1c, 7, 3, ldb_di1_sels, ARRAY_SIZE(ldb_di1_sels));
+ clks[IMX6SLL_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
+ clks[IMX6SLL_CLK_LDB_DI1_DIV_SEL] = imx_clk_mux("ldb_di1_div_sel", base + 0x20, 10, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels));
+
+ /* CCGR0 */
+ clks[IMX6SLL_CLK_AIPSTZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_AIPSTZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_DCP] = imx_clk_gate2("dcp", "ahb", base + 0x68, 10);
+ clks[IMX6SLL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28);
+ clks[IMX6SLL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
+
+ /* CCGR1 */
+ clks[IMX6SLL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
+ clks[IMX6SLL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2);
+ clks[IMX6SLL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4);
+ clks[IMX6SLL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6);
+ clks[IMX6SLL_CLK_UART3_IPG] = imx_clk_gate2("uart3_ipg", "ipg", base + 0x6c, 10);
+ clks[IMX6SLL_CLK_UART3_SERIAL] = imx_clk_gate2("uart3_serial", "uart_podf", base + 0x6c, 10);
+ clks[IMX6SLL_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12);
+ clks[IMX6SLL_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14);
+ clks[IMX6SLL_CLK_GPT_BUS] = imx_clk_gate2("gpt1_bus", "perclk", base + 0x6c, 20);
+ clks[IMX6SLL_CLK_GPT_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
+ clks[IMX6SLL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
+ clks[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24);
+
+ /* CCGR2 */
+ clks[IMX6SLL_CLK_CSI] = imx_clk_gate2("csi", "axi", base + 0x70, 2);
+ clks[IMX6SLL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6);
+ clks[IMX6SLL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8);
+ clks[IMX6SLL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
+ clks[IMX6SLL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
+ clks[IMX6SLL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28);
+ clks[IMX6SLL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30);
+
+ /* CCGR3 */
+ clks[IMX6SLL_CLK_UART5_IPG] = imx_clk_gate2("uart5_ipg", "ipg", base + 0x74, 2);
+ clks[IMX6SLL_CLK_UART5_SERIAL] = imx_clk_gate2("uart5_serial", "uart_podf", base + 0x74, 2);
+ clks[IMX6SLL_CLK_EPDC_AXI] = imx_clk_gate2("epdc_aclk", "axi", base + 0x74, 4);
+ clks[IMX6SLL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_podf", base + 0x74, 4);
+ clks[IMX6SLL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
+ clks[IMX6SLL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16);
+ clks[IMX6SLL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_OCRAM] = imx_clk_gate_flags("ocram","ahb", base + 0x74, 28, CLK_IS_CRITICAL);
+
+ /* CCGR4 */
+ clks[IMX6SLL_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16);
+ clks[IMX6SLL_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18);
+ clks[IMX6SLL_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20);
+ clks[IMX6SLL_CLK_PWM4] = imx_clk_gate2("pwm4", "perclk", base + 0x78, 22);
+
+ /* CCGR5 */
+ clks[IMX6SLL_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
+ clks[IMX6SLL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
+ clks[IMX6SLL_CLK_WDOG2] = imx_clk_gate2("wdog2", "ipg", base + 0x7c, 10);
+ clks[IMX6SLL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
+ clks[IMX6SLL_CLK_EXTERN_AUDIO] = imx_clk_gate2_shared("extern_audio", "extern_audio_podf", base + 0x7c, 14, &share_count_audio);
+ clks[IMX6SLL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio);
+ clks[IMX6SLL_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio);
+ clks[IMX6SLL_CLK_SSI1] = imx_clk_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1);
+ clks[IMX6SLL_CLK_SSI1_IPG] = imx_clk_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1);
+ clks[IMX6SLL_CLK_SSI2] = imx_clk_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2);
+ clks[IMX6SLL_CLK_SSI2_IPG] = imx_clk_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2);
+ clks[IMX6SLL_CLK_SSI3] = imx_clk_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3);
+ clks[IMX6SLL_CLK_SSI3_IPG] = imx_clk_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3);
+ clks[IMX6SLL_CLK_UART1_IPG] = imx_clk_gate2("uart1_ipg", "ipg", base + 0x7c, 24);
+ clks[IMX6SLL_CLK_UART1_SERIAL] = imx_clk_gate2("uart1_serial", "uart_podf", base + 0x7c, 24);
+
+ /* CCGR6 */
+ clks[IMX6SLL_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0);
+ clks[IMX6SLL_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2);
+ clks[IMX6SLL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4);
+ clks[IMX6SLL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
+
+ /* mask handshake of mmdc */
+ writel_relaxed(BM_CCM_CCDR_MMDC_CH0_MASK, base + 0x4);
+
+ imx_check_clocks(clks, ARRAY_SIZE(clks));
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ /* Lower the AHB clock rate before changing the clock source. */
+ clk_set_rate(clks[IMX6SLL_CLK_AHB], 99000000);
+
+ /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */
+ clk_set_parent(clks[IMX6SLL_CLK_PERIPH_CLK2_SEL], clks[IMX6SLL_CLK_PLL3_USB_OTG]);
+ clk_set_parent(clks[IMX6SLL_CLK_PERIPH], clks[IMX6SLL_CLK_PERIPH_CLK2]);
+ clk_set_parent(clks[IMX6SLL_CLK_PERIPH_PRE], clks[IMX6SLL_CLK_PLL2_BUS]);
+ clk_set_parent(clks[IMX6SLL_CLK_PERIPH], clks[IMX6SLL_CLK_PERIPH_PRE]);
+
+ clk_set_rate(clks[IMX6SLL_CLK_AHB], 132000000);
+}
+CLK_OF_DECLARE_DRIVER(imx6sll, "fsl,imx6sll-ccm", imx6sll_clocks_init);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index e6d389e333d7..bc3f9ebf2d9e 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -63,17 +63,17 @@ static const char *lcdif2_sels[] = { "lcdif2_podf", "ipp_di0", "ipp_di1", "ldb_d
static const char *display_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll3_usb_otg", "pll3_pfd1_540m", };
static const char *csi_sels[] = { "osc", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
static const char *cko1_sels[] = {
- "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
- "dummy", "ocram", "dummy", "pxp_axi", "epdc_axi", "lcdif_pix",
- "epdc_pix", "ahb", "ipg", "perclk", "ckil", "pll4_audio_div",
+ "dummy", "dummy", "dummy", "dummy",
+ "vadc", "ocram", "qspi2", "m4", "enet_ahb", "lcdif2_pix",
+ "lcdif1_pix", "ahb", "ipg", "perclk", "ckil", "pll4_audio_div",
};
static const char *cko2_sels[] = {
"dummy", "mmdc_p0_fast", "usdhc4", "usdhc1", "dummy", "wrck",
"ecspi_root", "dummy", "usdhc3", "pcie", "arm", "csi_core",
- "lcdif_axi", "dummy", "osc", "dummy", "gpu2d_ovg_core",
- "usdhc2", "ssi1", "ssi2", "ssi3", "gpu2d_core", "dummy",
- "dummy", "dummy", "dummy", "esai_extal", "eim_slow", "uart_serial",
- "spdif", "asrc", "dummy",
+ "display_axi", "dummy", "osc", "dummy", "dummy",
+ "usdhc2", "ssi1", "ssi2", "ssi3", "gpu_axi_podf", "dummy",
+ "can_podf", "lvds1_out", "qspi1", "esai_extal", "eim_slow",
+ "uart_serial", "spdif", "audio", "dummy",
};
static const char *cko_sels[] = { "cko1", "cko2", };
static const char *lvds_sels[] = {
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 85c118164469..114ecbb94ec5 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -308,7 +308,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_SAI2_PODF] = imx_clk_divider("sai2_podf", "sai2_pred", base + 0x2c, 0, 6);
clks[IMX6UL_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
clks[IMX6UL_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
- clks[IMX6UL_CLK_SIM_PODF] = imx_clk_divider("sim_podf", "sim_pre_sel", base + 0x34, 12, 3);
+ if (clk_on_imx6ul())
+ clks[IMX6UL_CLK_SIM_PODF] = imx_clk_divider("sim_podf", "sim_pre_sel", base + 0x34, 12, 3);
+ else if (clk_on_imx6ull())
+ clks[IMX6ULL_CLK_EPDC_PODF] = imx_clk_divider("epdc_podf", "epdc_pre_sel", base + 0x34, 12, 3);
clks[IMX6UL_CLK_ECSPI_PODF] = imx_clk_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6);
clks[IMX6UL_CLK_LCDIF_PRED] = imx_clk_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3);
clks[IMX6UL_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 617beb234259..975a20d3cc94 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -51,20 +51,20 @@ static const char *arm_a7_sel[] = { "osc", "pll_arm_main_clk",
static const char *arm_m4_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_enet_250m_clk", "pll_sys_pfd2_270m_clk",
- "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_usb_main_clk", };
static const char *axi_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk", "pll_sys_pfd5_clk",
- "pll_audio_post_div", "pll_video_main_clk", "pll_sys_pfd7_clk", };
+ "pll_audio_post_div", "pll_video_post_div", "pll_sys_pfd7_clk", };
static const char *disp_axi_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk", "pll_sys_pfd6_clk",
- "pll_sys_pfd7_clk", "pll_audio_post_div", "pll_video_main_clk", };
+ "pll_sys_pfd7_clk", "pll_audio_post_div", "pll_video_post_div", };
static const char *enet_axi_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk",
- "pll_sys_main_240m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_sys_main_240m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_sys_pfd4_clk", };
static const char *nand_usdhc_bus_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
@@ -74,8 +74,8 @@ static const char *nand_usdhc_bus_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
static const char *ahb_channel_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_sys_pfd0_392m_clk",
- "pll_enet_125m_clk", "pll_usb_main_clk", "pll_audio_post_div",
- "pll_video_main_clk", };
+ "pll_enet_250m_clk", "pll_usb_main_clk", "pll_audio_post_div",
+ "pll_video_post_div", };
static const char *dram_phym_sel[] = { "pll_dram_main_clk",
"dram_phym_alt_clk", };
@@ -86,7 +86,7 @@ static const char *dram_sel[] = { "pll_dram_main_clk",
static const char *dram_phym_alt_sel[] = { "osc", "pll_dram_533m_clk",
"pll_sys_main_clk", "pll_enet_500m_clk",
"pll_usb_main_clk", "pll_sys_pfd7_clk", "pll_audio_post_div",
- "pll_video_main_clk", };
+ "pll_video_post_div", };
static const char *dram_alt_sel[] = { "osc", "pll_dram_533m_clk",
"pll_sys_main_clk", "pll_enet_500m_clk",
@@ -108,62 +108,62 @@ static const char *pcie_phy_sel[] = { "osc", "pll_enet_100m_clk",
static const char *epdc_pixel_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_sys_main_clk", "pll_sys_pfd5_clk",
- "pll_sys_pfd6_clk", "pll_sys_pfd7_clk", "pll_video_main_clk", };
+ "pll_sys_pfd6_clk", "pll_sys_pfd7_clk", "pll_video_post_div", };
static const char *lcdif_pixel_sel[] = { "osc", "pll_sys_pfd5_clk",
"pll_dram_533m_clk", "ext_clk_3", "pll_sys_pfd4_clk",
- "pll_sys_pfd2_270m_clk", "pll_video_main_clk",
+ "pll_sys_pfd2_270m_clk", "pll_video_post_div",
"pll_usb_main_clk", };
static const char *mipi_dsi_sel[] = { "osc", "pll_sys_pfd5_clk",
"pll_sys_pfd3_clk", "pll_sys_main_clk", "pll_sys_pfd0_196m_clk",
- "pll_dram_533m_clk", "pll_video_main_clk", "pll_audio_post_div", };
+ "pll_dram_533m_clk", "pll_video_post_div", "pll_audio_post_div", };
static const char *mipi_csi_sel[] = { "osc", "pll_sys_pfd4_clk",
"pll_sys_pfd3_clk", "pll_sys_main_clk", "pll_sys_pfd0_196m_clk",
- "pll_dram_533m_clk", "pll_video_main_clk", "pll_audio_post_div", };
+ "pll_dram_533m_clk", "pll_video_post_div", "pll_audio_post_div", };
static const char *mipi_dphy_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_dram_533m_clk", "pll_sys_pfd5_clk", "ref_1m_clk", "ext_clk_2",
- "pll_video_main_clk", "ext_clk_3", };
+ "pll_video_post_div", "ext_clk_3", };
static const char *sai1_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
- "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_main_clk",
+ "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_post_div",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_clk_2", };
static const char *sai2_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
- "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_main_clk",
+ "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_post_div",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_clk_2", };
static const char *sai3_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
- "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_main_clk",
+ "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_post_div",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_clk_3", };
static const char *spdif_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
- "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_main_clk",
+ "pll_audio_post_div", "pll_dram_533m_clk", "pll_video_post_div",
"pll_sys_pfd4_clk", "pll_enet_125m_clk", "ext_3_clk", };
static const char *enet1_ref_sel[] = { "osc", "pll_enet_125m_clk",
"pll_enet_50m_clk", "pll_enet_25m_clk",
- "pll_sys_main_120m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_sys_main_120m_clk", "pll_audio_post_div", "pll_video_post_div",
"ext_clk_4", };
static const char *enet1_time_sel[] = { "osc", "pll_enet_100m_clk",
"pll_audio_post_div", "ext_clk_1", "ext_clk_2", "ext_clk_3",
- "ext_clk_4", "pll_video_main_clk", };
+ "ext_clk_4", "pll_video_post_div", };
static const char *enet2_ref_sel[] = { "osc", "pll_enet_125m_clk",
"pll_enet_50m_clk", "pll_enet_25m_clk",
- "pll_sys_main_120m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_sys_main_120m_clk", "pll_audio_post_div", "pll_video_post_div",
"ext_clk_4", };
static const char *enet2_time_sel[] = { "osc", "pll_enet_100m_clk",
"pll_audio_post_div", "ext_clk_1", "ext_clk_2", "ext_clk_3",
- "ext_clk_4", "pll_video_main_clk", };
+ "ext_clk_4", "pll_video_post_div", };
static const char *enet_phy_ref_sel[] = { "osc", "pll_enet_25m_clk",
"pll_enet_50m_clk", "pll_enet_125m_clk",
- "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_sys_pfd3_clk", };
static const char *eim_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
@@ -174,7 +174,7 @@ static const char *eim_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
static const char *nand_sel[] = { "osc", "pll_sys_main_clk",
"pll_dram_533m_clk", "pll_sys_pfd0_392m_clk", "pll_sys_pfd3_clk",
"pll_enet_500m_clk", "pll_enet_250m_clk",
- "pll_video_main_clk", };
+ "pll_video_post_div", };
static const char *qspi_sel[] = { "osc", "pll_sys_pfd4_clk",
"pll_dram_533m_clk", "pll_enet_500m_clk", "pll_sys_pfd3_clk",
@@ -204,22 +204,22 @@ static const char *can2_sel[] = { "osc", "pll_sys_main_120m_clk",
static const char *i2c1_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
- "pll_audio_post_div", "pll_video_main_clk", "pll_usb_main_clk",
+ "pll_audio_post_div", "pll_video_post_div", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *i2c2_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
- "pll_audio_post_div", "pll_video_main_clk", "pll_usb_main_clk",
+ "pll_audio_post_div", "pll_video_post_div", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *i2c3_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
- "pll_audio_post_div", "pll_video_main_clk", "pll_usb_main_clk",
+ "pll_audio_post_div", "pll_video_post_div", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *i2c4_sel[] = { "osc", "pll_sys_main_120m_clk",
"pll_enet_50m_clk", "pll_dram_533m_clk",
- "pll_audio_post_div", "pll_video_main_clk", "pll_usb_main_clk",
+ "pll_audio_post_div", "pll_video_post_div", "pll_usb_main_clk",
"pll_sys_pfd2_135m_clk", };
static const char *uart1_sel[] = { "osc", "pll_sys_main_240m_clk",
@@ -279,27 +279,27 @@ static const char *ecspi4_sel[] = { "osc", "pll_sys_main_240m_clk",
static const char *pwm1_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
- "ext_clk_1", "ref_1m_clk", "pll_video_main_clk", };
+ "ext_clk_1", "ref_1m_clk", "pll_video_post_div", };
static const char *pwm2_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
- "ext_clk_1", "ref_1m_clk", "pll_video_main_clk", };
+ "ext_clk_1", "ref_1m_clk", "pll_video_post_div", };
static const char *pwm3_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
- "ext_clk_2", "ref_1m_clk", "pll_video_main_clk", };
+ "ext_clk_2", "ref_1m_clk", "pll_video_post_div", };
static const char *pwm4_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
- "ext_clk_2", "ref_1m_clk", "pll_video_main_clk", };
+ "ext_clk_2", "ref_1m_clk", "pll_video_post_div", };
static const char *flextimer1_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
- "ext_clk_3", "ref_1m_clk", "pll_video_main_clk", };
+ "ext_clk_3", "ref_1m_clk", "pll_video_post_div", };
static const char *flextimer2_sel[] = { "osc", "pll_enet_100m_clk",
"pll_sys_main_120m_clk", "pll_enet_40m_clk", "pll_audio_post_div",
- "ext_clk_3", "ref_1m_clk", "pll_video_main_clk", };
+ "ext_clk_3", "ref_1m_clk", "pll_video_post_div", };
static const char *sim1_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
@@ -308,23 +308,23 @@ static const char *sim1_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
static const char *sim2_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
- "pll_usb_main_clk", "pll_video_main_clk", "pll_enet_125m_clk",
+ "pll_usb_main_clk", "pll_video_post_div", "pll_enet_125m_clk",
"pll_sys_pfd7_clk", };
static const char *gpt1_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_main_clk",
+ "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_post_div",
"ref_1m_clk", "pll_audio_post_div", "ext_clk_1", };
static const char *gpt2_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_main_clk",
+ "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_post_div",
"ref_1m_clk", "pll_audio_post_div", "ext_clk_2", };
static const char *gpt3_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_main_clk",
+ "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_post_div",
"ref_1m_clk", "pll_audio_post_div", "ext_clk_3", };
static const char *gpt4_sel[] = { "osc", "pll_enet_100m_clk",
- "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_main_clk",
+ "pll_sys_pfd0_392m_clk", "pll_enet_40m_clk", "pll_video_post_div",
"ref_1m_clk", "pll_audio_post_div", "ext_clk_4", };
static const char *trace_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
@@ -339,12 +339,12 @@ static const char *wdog_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
static const char *csi_mclk_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
- "pll_enet_125m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_enet_125m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_usb_main_clk", };
static const char *audio_mclk_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
"pll_sys_main_120m_clk", "pll_dram_533m_clk",
- "pll_enet_125m_clk", "pll_audio_post_div", "pll_video_main_clk",
+ "pll_enet_125m_clk", "pll_audio_post_div", "pll_video_post_div",
"pll_usb_main_clk", };
static const char *wrclk_sel[] = { "osc", "pll_enet_40m_clk",
@@ -358,13 +358,13 @@ static const char *clko1_sel[] = { "osc", "pll_sys_main_clk",
static const char *clko2_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_sys_pfd0_392m_clk", "pll_sys_pfd1_166m_clk", "pll_sys_pfd4_clk",
- "pll_audio_post_div", "pll_video_main_clk", "ckil", };
+ "pll_audio_post_div", "pll_video_post_div", "ckil", };
static const char *lvds1_sel[] = { "pll_arm_main_clk",
"pll_sys_main_clk", "pll_sys_pfd0_392m_clk", "pll_sys_pfd1_332m_clk",
"pll_sys_pfd2_270m_clk", "pll_sys_pfd3_clk", "pll_sys_pfd4_clk",
"pll_sys_pfd5_clk", "pll_sys_pfd6_clk", "pll_sys_pfd7_clk",
- "pll_audio_post_div", "pll_video_main_clk", "pll_enet_500m_clk",
+ "pll_audio_post_div", "pll_video_post_div", "pll_enet_500m_clk",
"pll_enet_250m_clk", "pll_enet_125m_clk", "pll_enet_100m_clk",
"pll_enet_50m_clk", "pll_enet_40m_clk", "pll_enet_25m_clk",
"pll_dram_main_clk", };
@@ -433,23 +433,22 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_PLL_AUDIO_MAIN_BYPASS] = imx_clk_mux_flags("pll_audio_main_bypass", base + 0xf0, 16, 1, pll_audio_bypass_sel, ARRAY_SIZE(pll_audio_bypass_sel), CLK_SET_RATE_PARENT);
clks[IMX7D_PLL_VIDEO_MAIN_BYPASS] = imx_clk_mux_flags("pll_video_main_bypass", base + 0x130, 16, 1, pll_video_bypass_sel, ARRAY_SIZE(pll_video_bypass_sel), CLK_SET_RATE_PARENT);
- clk_set_parent(clks[IMX7D_PLL_ARM_MAIN_BYPASS], clks[IMX7D_PLL_ARM_MAIN]);
- clk_set_parent(clks[IMX7D_PLL_DRAM_MAIN_BYPASS], clks[IMX7D_PLL_DRAM_MAIN]);
- clk_set_parent(clks[IMX7D_PLL_SYS_MAIN_BYPASS], clks[IMX7D_PLL_SYS_MAIN]);
- clk_set_parent(clks[IMX7D_PLL_ENET_MAIN_BYPASS], clks[IMX7D_PLL_ENET_MAIN]);
- clk_set_parent(clks[IMX7D_PLL_AUDIO_MAIN_BYPASS], clks[IMX7D_PLL_AUDIO_MAIN]);
- clk_set_parent(clks[IMX7D_PLL_VIDEO_MAIN_BYPASS], clks[IMX7D_PLL_VIDEO_MAIN]);
-
clks[IMX7D_PLL_ARM_MAIN_CLK] = imx_clk_gate("pll_arm_main_clk", "pll_arm_main_bypass", base + 0x60, 13);
- clks[IMX7D_PLL_DRAM_MAIN_CLK] = imx_clk_gate("pll_dram_main_clk", "pll_dram_main_bypass", base + 0x70, 13);
+ clks[IMX7D_PLL_DRAM_MAIN_CLK] = imx_clk_gate("pll_dram_main_clk", "pll_dram_test_div", base + 0x70, 13);
clks[IMX7D_PLL_SYS_MAIN_CLK] = imx_clk_gate("pll_sys_main_clk", "pll_sys_main_bypass", base + 0xb0, 13);
clks[IMX7D_PLL_AUDIO_MAIN_CLK] = imx_clk_gate("pll_audio_main_clk", "pll_audio_main_bypass", base + 0xf0, 13);
clks[IMX7D_PLL_VIDEO_MAIN_CLK] = imx_clk_gate("pll_video_main_clk", "pll_video_main_bypass", base + 0x130, 13);
+ clks[IMX7D_PLL_DRAM_TEST_DIV] = clk_register_divider_table(NULL, "pll_dram_test_div", "pll_dram_main_bypass",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 21, 2, 0, test_div_table, &imx_ccm_lock);
clks[IMX7D_PLL_AUDIO_TEST_DIV] = clk_register_divider_table(NULL, "pll_audio_test_div", "pll_audio_main_clk",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xf0, 19, 2, 0, test_div_table, &imx_ccm_lock);
clks[IMX7D_PLL_AUDIO_POST_DIV] = clk_register_divider_table(NULL, "pll_audio_post_div", "pll_audio_test_div",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xf0, 22, 2, 0, post_div_table, &imx_ccm_lock);
+ clks[IMX7D_PLL_VIDEO_TEST_DIV] = clk_register_divider_table(NULL, "pll_video_test_div", "pll_video_main_clk",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x130, 19, 2, 0, test_div_table, &imx_ccm_lock);
+ clks[IMX7D_PLL_VIDEO_POST_DIV] = clk_register_divider_table(NULL, "pll_video_post_div", "pll_video_test_div",
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x130, 22, 2, 0, post_div_table, &imx_ccm_lock);
clks[IMX7D_PLL_SYS_PFD0_392M_CLK] = imx_clk_pfd("pll_sys_pfd0_392m_clk", "pll_sys_main_clk", base + 0xc0, 0);
clks[IMX7D_PLL_SYS_PFD1_332M_CLK] = imx_clk_pfd("pll_sys_pfd1_332m_clk", "pll_sys_main_clk", base + 0xc0, 1);
@@ -797,7 +796,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_OCOTP_CLK] = imx_clk_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0);
clks[IMX7D_SNVS_CLK] = imx_clk_gate4("snvs_clk", "ipg_root_clk", base + 0x4250, 0);
clks[IMX7D_CAAM_CLK] = imx_clk_gate4("caam_clk", "ipg_root_clk", base + 0x4240, 0);
- clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4420, 0);
+ clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4690, 0);
clks[IMX7D_SDMA_CORE_CLK] = imx_clk_gate4("sdma_root_clk", "ahb_root_clk", base + 0x4480, 0);
clks[IMX7D_PCIE_CTRL_ROOT_CLK] = imx_clk_gate4("pcie_ctrl_root_clk", "pcie_ctrl_post_div", base + 0x4600, 0);
clks[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_gate4("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0);
@@ -863,6 +862,9 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0);
clks[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0);
clks[IMX7D_WRCLK_ROOT_CLK] = imx_clk_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0);
+ clks[IMX7D_USB_CTRL_CLK] = imx_clk_gate4("usb_ctrl_clk", "ahb_root_clk", base + 0x4680, 0);
+ clks[IMX7D_USB_PHY1_CLK] = imx_clk_gate4("usb_phy1_clk", "pll_usb1_main_clk", base + 0x46a0, 0);
+ clks[IMX7D_USB_PHY2_CLK] = imx_clk_gate4("usb_phy2_clk", "pll_usb_main_clk", base + 0x46b0, 0);
clks[IMX7D_ADC_ROOT_CLK] = imx_clk_gate4("adc_root_clk", "ipg_root_clk", base + 0x4200, 0);
clks[IMX7D_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
@@ -882,12 +884,23 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clks[clks_init_on[i]]);
+ clk_set_parent(clks[IMX7D_PLL_ARM_MAIN_BYPASS], clks[IMX7D_PLL_ARM_MAIN]);
+ clk_set_parent(clks[IMX7D_PLL_DRAM_MAIN_BYPASS], clks[IMX7D_PLL_DRAM_MAIN]);
+ clk_set_parent(clks[IMX7D_PLL_SYS_MAIN_BYPASS], clks[IMX7D_PLL_SYS_MAIN]);
+ clk_set_parent(clks[IMX7D_PLL_ENET_MAIN_BYPASS], clks[IMX7D_PLL_ENET_MAIN]);
+ clk_set_parent(clks[IMX7D_PLL_AUDIO_MAIN_BYPASS], clks[IMX7D_PLL_AUDIO_MAIN]);
+ clk_set_parent(clks[IMX7D_PLL_VIDEO_MAIN_BYPASS], clks[IMX7D_PLL_VIDEO_MAIN]);
+
/* use old gpt clk setting, gpt1 root clk must be twice as gpt counter freq */
clk_set_parent(clks[IMX7D_GPT1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]);
/* set uart module clock's parent clock source that must be great then 80MHz */
clk_set_parent(clks[IMX7D_UART1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]);
+ /* Set clock rate for USBPHY, the USB_PLL at CCM is from USBOTG2 */
+ clks[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_fixed_factor("pll_usb1_main_clk", "osc", 20, 1);
+ clks[IMX7D_USB_MAIN_480M_CLK] = imx_clk_fixed_factor("pll_usb_main_clk", "osc", 20, 1);
+
imx_register_uart_clocks(uart_clks);
}
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index 85b5cbe9744c..eeba3cb14e2d 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -182,8 +182,12 @@ static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
u32 dp_op, dp_mfd, dp_mfn;
+ int ret;
+
+ ret = __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
+ if (ret)
+ return ret;
- __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN,
dp_op, dp_mfd, dp_mfn);
}
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index d69c4bbf3597..8076ec040f37 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -123,6 +123,13 @@ static inline struct clk *imx_clk_gate(const char *name, const char *parent,
shift, 0, &imx_ccm_lock);
}
+static inline struct clk *imx_clk_gate_flags(const char *name, const char *parent,
+ void __iomem *reg, u8 shift, unsigned long flags)
+{
+ return clk_register_gate(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
+ shift, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_gate_dis(const char *name, const char *parent,
void __iomem *reg, u8 shift)
{
@@ -137,6 +144,13 @@ static inline struct clk *imx_clk_gate2(const char *name, const char *parent,
shift, 0x3, 0, &imx_ccm_lock, NULL);
}
+static inline struct clk *imx_clk_gate2_flags(const char *name, const char *parent,
+ void __iomem *reg, u8 shift, unsigned long flags)
+{
+ return clk_register_gate2(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
+ shift, 0x3, 0, &imx_ccm_lock, NULL);
+}
+
static inline struct clk *imx_clk_gate2_shared(const char *name,
const char *parent, void __iomem *reg, u8 shift,
unsigned int *share_count)
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index 9cdf9d5050ac..4cb70bed89a9 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -29,21 +29,10 @@
#define SCI_CLK_INPUT_TERMINATION BIT(2)
/**
- * struct sci_clk_data - TI SCI clock data
- * @dev: device index
- * @num_clks: number of clocks for this device
- */
-struct sci_clk_data {
- u16 dev;
- u16 num_clks;
-};
-
-/**
* struct sci_clk_provider - TI SCI clock provider representation
* @sci: Handle to the System Control Interface protocol handler
* @ops: Pointer to the SCI ops to be used by the clocks
* @dev: Device pointer for the clock provider
- * @clk_data: Clock data
* @clocks: Clocks array for this device
* @num_clocks: Total number of clocks for this provider
*/
@@ -51,8 +40,7 @@ struct sci_clk_provider {
const struct ti_sci_handle *sci;
const struct ti_sci_clk_ops *ops;
struct device *dev;
- const struct sci_clk_data *clk_data;
- struct clk_hw **clocks;
+ struct sci_clk **clocks;
int num_clocks;
};
@@ -61,6 +49,7 @@ struct sci_clk_provider {
* @hw: Hardware clock cookie for common clock framework
* @dev_id: Device index
* @clk_id: Clock index
+ * @num_parents: Number of parents for this clock
* @provider: Master clock provider
* @flags: Flags for the clock
*/
@@ -68,6 +57,7 @@ struct sci_clk {
struct clk_hw hw;
u16 dev_id;
u8 clk_id;
+ u8 num_parents;
struct sci_clk_provider *provider;
u8 flags;
};
@@ -273,38 +263,22 @@ static const struct clk_ops sci_clk_ops = {
/**
* _sci_clk_get - Gets a handle for an SCI clock
* @provider: Handle to SCI clock provider
- * @dev_id: device ID for the clock to register
- * @clk_id: clock ID for the clock to register
+ * @sci_clk: Handle to the SCI clock to populate
*
* Gets a handle to an existing TI SCI hw clock, or builds a new clock
* entry and registers it with the common clock framework. Called from
* the common clock framework, when a corresponding of_clk_get call is
* executed, or recursively from itself when parsing parent clocks.
- * Returns a pointer to the hw clock struct, or ERR_PTR value in failure.
+ * Returns 0 on success, negative error code on failure.
*/
-static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider,
- u16 dev_id, u8 clk_id)
+static int _sci_clk_build(struct sci_clk_provider *provider,
+ struct sci_clk *sci_clk)
{
struct clk_init_data init = { NULL };
- struct sci_clk *sci_clk = NULL;
char *name = NULL;
char **parent_names = NULL;
int i;
- int ret;
-
- sci_clk = devm_kzalloc(provider->dev, sizeof(*sci_clk), GFP_KERNEL);
- if (!sci_clk)
- return ERR_PTR(-ENOMEM);
-
- sci_clk->dev_id = dev_id;
- sci_clk->clk_id = clk_id;
- sci_clk->provider = provider;
-
- ret = provider->ops->get_num_parents(provider->sci, dev_id,
- clk_id,
- &init.num_parents);
- if (ret)
- goto err;
+ int ret = 0;
name = kasprintf(GFP_KERNEL, "%s:%d:%d", dev_name(provider->dev),
sci_clk->dev_id, sci_clk->clk_id);
@@ -317,11 +291,11 @@ static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider,
* to have mux functionality. Otherwise it is going to act as a root
* clock.
*/
- if (init.num_parents < 2)
- init.num_parents = 0;
+ if (sci_clk->num_parents < 2)
+ sci_clk->num_parents = 0;
- if (init.num_parents) {
- parent_names = kcalloc(init.num_parents, sizeof(char *),
+ if (sci_clk->num_parents) {
+ parent_names = kcalloc(sci_clk->num_parents, sizeof(char *),
GFP_KERNEL);
if (!parent_names) {
@@ -329,7 +303,7 @@ static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider,
goto err;
}
- for (i = 0; i < init.num_parents; i++) {
+ for (i = 0; i < sci_clk->num_parents; i++) {
char *parent_name;
parent_name = kasprintf(GFP_KERNEL, "%s:%d:%d",
@@ -346,6 +320,7 @@ static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider,
}
init.ops = &sci_clk_ops;
+ init.num_parents = sci_clk->num_parents;
sci_clk->hw.init = &init;
ret = devm_clk_hw_register(provider->dev, &sci_clk->hw);
@@ -354,7 +329,7 @@ static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider,
err:
if (parent_names) {
- for (i = 0; i < init.num_parents; i++)
+ for (i = 0; i < sci_clk->num_parents; i++)
kfree(parent_names[i]);
kfree(parent_names);
@@ -362,10 +337,7 @@ err:
kfree(name);
- if (ret)
- return ERR_PTR(ret);
-
- return &sci_clk->hw;
+ return ret;
}
static int _cmp_sci_clk(const void *a, const void *b)
@@ -414,253 +386,20 @@ static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data)
static int ti_sci_init_clocks(struct sci_clk_provider *p)
{
- const struct sci_clk_data *data = p->clk_data;
- struct clk_hw *hw;
int i;
- int num_clks = 0;
-
- while (data->num_clks) {
- num_clks += data->num_clks;
- data++;
- }
-
- p->num_clocks = num_clks;
-
- p->clocks = devm_kcalloc(p->dev, num_clks, sizeof(struct sci_clk),
- GFP_KERNEL);
- if (!p->clocks)
- return -ENOMEM;
-
- num_clks = 0;
-
- data = p->clk_data;
-
- while (data->num_clks) {
- for (i = 0; i < data->num_clks; i++) {
- hw = _sci_clk_build(p, data->dev, i);
- if (!IS_ERR(hw)) {
- p->clocks[num_clks++] = hw;
- continue;
- }
-
- /* Skip any holes in the clock lists */
- if (PTR_ERR(hw) == -ENODEV)
- continue;
+ int ret;
- return PTR_ERR(hw);
- }
- data++;
+ for (i = 0; i < p->num_clocks; i++) {
+ ret = _sci_clk_build(p, p->clocks[i]);
+ if (ret)
+ return ret;
}
return 0;
}
-static const struct sci_clk_data k2g_clk_data[] = {
- /* pmmc */
- { .dev = 0x0, .num_clks = 4 },
-
- /* mlb0 */
- { .dev = 0x1, .num_clks = 5 },
-
- /* dss0 */
- { .dev = 0x2, .num_clks = 2 },
-
- /* mcbsp0 */
- { .dev = 0x3, .num_clks = 8 },
-
- /* mcasp0 */
- { .dev = 0x4, .num_clks = 8 },
-
- /* mcasp1 */
- { .dev = 0x5, .num_clks = 8 },
-
- /* mcasp2 */
- { .dev = 0x6, .num_clks = 8 },
-
- /* dcan0 */
- { .dev = 0x8, .num_clks = 2 },
-
- /* dcan1 */
- { .dev = 0x9, .num_clks = 2 },
-
- /* emif0 */
- { .dev = 0xa, .num_clks = 6 },
-
- /* mmchs0 */
- { .dev = 0xb, .num_clks = 3 },
-
- /* mmchs1 */
- { .dev = 0xc, .num_clks = 3 },
-
- /* gpmc0 */
- { .dev = 0xd, .num_clks = 1 },
-
- /* elm0 */
- { .dev = 0xe, .num_clks = 1 },
-
- /* spi0 */
- { .dev = 0x10, .num_clks = 1 },
-
- /* spi1 */
- { .dev = 0x11, .num_clks = 1 },
-
- /* spi2 */
- { .dev = 0x12, .num_clks = 1 },
-
- /* spi3 */
- { .dev = 0x13, .num_clks = 1 },
-
- /* icss0 */
- { .dev = 0x14, .num_clks = 6 },
-
- /* icss1 */
- { .dev = 0x15, .num_clks = 6 },
-
- /* usb0 */
- { .dev = 0x16, .num_clks = 7 },
-
- /* usb1 */
- { .dev = 0x17, .num_clks = 7 },
-
- /* nss0 */
- { .dev = 0x18, .num_clks = 14 },
-
- /* pcie0 */
- { .dev = 0x19, .num_clks = 1 },
-
- /* gpio0 */
- { .dev = 0x1b, .num_clks = 1 },
-
- /* gpio1 */
- { .dev = 0x1c, .num_clks = 1 },
-
- /* timer64_0 */
- { .dev = 0x1d, .num_clks = 9 },
-
- /* timer64_1 */
- { .dev = 0x1e, .num_clks = 9 },
-
- /* timer64_2 */
- { .dev = 0x1f, .num_clks = 9 },
-
- /* timer64_3 */
- { .dev = 0x20, .num_clks = 9 },
-
- /* timer64_4 */
- { .dev = 0x21, .num_clks = 9 },
-
- /* timer64_5 */
- { .dev = 0x22, .num_clks = 9 },
-
- /* timer64_6 */
- { .dev = 0x23, .num_clks = 9 },
-
- /* msgmgr0 */
- { .dev = 0x25, .num_clks = 1 },
-
- /* bootcfg0 */
- { .dev = 0x26, .num_clks = 1 },
-
- /* arm_bootrom0 */
- { .dev = 0x27, .num_clks = 1 },
-
- /* dsp_bootrom0 */
- { .dev = 0x29, .num_clks = 1 },
-
- /* debugss0 */
- { .dev = 0x2b, .num_clks = 8 },
-
- /* uart0 */
- { .dev = 0x2c, .num_clks = 1 },
-
- /* uart1 */
- { .dev = 0x2d, .num_clks = 1 },
-
- /* uart2 */
- { .dev = 0x2e, .num_clks = 1 },
-
- /* ehrpwm0 */
- { .dev = 0x2f, .num_clks = 1 },
-
- /* ehrpwm1 */
- { .dev = 0x30, .num_clks = 1 },
-
- /* ehrpwm2 */
- { .dev = 0x31, .num_clks = 1 },
-
- /* ehrpwm3 */
- { .dev = 0x32, .num_clks = 1 },
-
- /* ehrpwm4 */
- { .dev = 0x33, .num_clks = 1 },
-
- /* ehrpwm5 */
- { .dev = 0x34, .num_clks = 1 },
-
- /* eqep0 */
- { .dev = 0x35, .num_clks = 1 },
-
- /* eqep1 */
- { .dev = 0x36, .num_clks = 1 },
-
- /* eqep2 */
- { .dev = 0x37, .num_clks = 1 },
-
- /* ecap0 */
- { .dev = 0x38, .num_clks = 1 },
-
- /* ecap1 */
- { .dev = 0x39, .num_clks = 1 },
-
- /* i2c0 */
- { .dev = 0x3a, .num_clks = 1 },
-
- /* i2c1 */
- { .dev = 0x3b, .num_clks = 1 },
-
- /* i2c2 */
- { .dev = 0x3c, .num_clks = 1 },
-
- /* edma0 */
- { .dev = 0x3f, .num_clks = 2 },
-
- /* semaphore0 */
- { .dev = 0x40, .num_clks = 1 },
-
- /* intc0 */
- { .dev = 0x41, .num_clks = 1 },
-
- /* gic0 */
- { .dev = 0x42, .num_clks = 1 },
-
- /* qspi0 */
- { .dev = 0x43, .num_clks = 5 },
-
- /* arm_64b_counter0 */
- { .dev = 0x44, .num_clks = 2 },
-
- /* tetris0 */
- { .dev = 0x45, .num_clks = 2 },
-
- /* cgem0 */
- { .dev = 0x46, .num_clks = 2 },
-
- /* msmc0 */
- { .dev = 0x47, .num_clks = 1 },
-
- /* cbass0 */
- { .dev = 0x49, .num_clks = 1 },
-
- /* board0 */
- { .dev = 0x4c, .num_clks = 36 },
-
- /* edma1 */
- { .dev = 0x4f, .num_clks = 2 },
- { .num_clks = 0 },
-};
-
static const struct of_device_id ti_sci_clk_of_match[] = {
- { .compatible = "ti,k2g-sci-clk", .data = &k2g_clk_data },
+ { .compatible = "ti,k2g-sci-clk" },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, ti_sci_clk_of_match);
@@ -681,12 +420,16 @@ static int ti_sci_clk_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct sci_clk_provider *provider;
const struct ti_sci_handle *handle;
- const struct sci_clk_data *data;
int ret;
-
- data = of_device_get_match_data(dev);
- if (!data)
- return -EINVAL;
+ int num_clks = 0;
+ struct sci_clk **clks = NULL;
+ struct sci_clk **tmp_clks;
+ struct sci_clk *sci_clk;
+ int max_clks = 0;
+ int clk_id = 0;
+ int dev_id = 0;
+ u8 num_parents;
+ int gap_size = 0;
handle = devm_ti_sci_get_handle(dev);
if (IS_ERR(handle))
@@ -696,12 +439,69 @@ static int ti_sci_clk_probe(struct platform_device *pdev)
if (!provider)
return -ENOMEM;
- provider->clk_data = data;
-
provider->sci = handle;
provider->ops = &handle->ops.clk_ops;
provider->dev = dev;
+ while (1) {
+ ret = provider->ops->get_num_parents(provider->sci, dev_id,
+ clk_id, &num_parents);
+ if (ret) {
+ gap_size++;
+ if (!clk_id) {
+ if (gap_size >= 5)
+ break;
+ dev_id++;
+ } else {
+ if (gap_size >= 2) {
+ dev_id++;
+ clk_id = 0;
+ gap_size = 0;
+ } else {
+ clk_id++;
+ }
+ }
+ continue;
+ }
+
+ gap_size = 0;
+
+ if (num_clks == max_clks) {
+ tmp_clks = devm_kmalloc_array(dev, max_clks + 64,
+ sizeof(sci_clk),
+ GFP_KERNEL);
+ memcpy(tmp_clks, clks, max_clks * sizeof(sci_clk));
+ if (max_clks)
+ devm_kfree(dev, clks);
+ max_clks += 64;
+ clks = tmp_clks;
+ }
+
+ sci_clk = devm_kzalloc(dev, sizeof(*sci_clk), GFP_KERNEL);
+ if (!sci_clk)
+ return -ENOMEM;
+ sci_clk->dev_id = dev_id;
+ sci_clk->clk_id = clk_id;
+ sci_clk->provider = provider;
+ sci_clk->num_parents = num_parents;
+
+ clks[num_clks] = sci_clk;
+
+ clk_id++;
+ num_clks++;
+ }
+
+ provider->clocks = devm_kmalloc_array(dev, num_clks, sizeof(sci_clk),
+ GFP_KERNEL);
+ if (!provider->clocks)
+ return -ENOMEM;
+
+ memcpy(provider->clocks, clks, num_clks * sizeof(sci_clk));
+
+ provider->num_clocks = num_clks;
+
+ devm_kfree(dev, clks);
+
ret = ti_sci_init_clocks(provider);
if (ret) {
pr_err("ti-sci-init-clocks failed.\n");
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 1f9ea0f21df1..92afe5989e97 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -54,6 +54,12 @@ config COMMON_CLK_MT2701_BDPSYS
---help---
This driver supports MediaTek MT2701 bdpsys clocks.
+config COMMON_CLK_MT2701_AUDSYS
+ bool "Clock driver for Mediatek MT2701 audsys"
+ depends on COMMON_CLK_MT2701
+ ---help---
+ This driver supports Mediatek MT2701 audsys clocks.
+
config COMMON_CLK_MT2712
bool "Clock driver for MediaTek MT2712"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 5160fdc4bbb8..b80eff2abb31 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_COMMON_CLK_MT6797_MMSYS) += clk-mt6797-mm.o
obj-$(CONFIG_COMMON_CLK_MT6797_VDECSYS) += clk-mt6797-vdec.o
obj-$(CONFIG_COMMON_CLK_MT6797_VENCSYS) += clk-mt6797-venc.o
obj-$(CONFIG_COMMON_CLK_MT2701) += clk-mt2701.o
+obj-$(CONFIG_COMMON_CLK_MT2701_AUDSYS) += clk-mt2701-aud.o
obj-$(CONFIG_COMMON_CLK_MT2701_BDPSYS) += clk-mt2701-bdp.o
obj-$(CONFIG_COMMON_CLK_MT2701_ETHSYS) += clk-mt2701-eth.o
obj-$(CONFIG_COMMON_CLK_MT2701_HIFSYS) += clk-mt2701-hif.o
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
new file mode 100644
index 000000000000..e66896a44fad
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2701-clk.h>
+
+#define GATE_AUDIO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO3(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio3_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate_regs audio0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs audio1_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x10,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs audio2_cg_regs = {
+ .set_ofs = 0x14,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x14,
+};
+
+static const struct mtk_gate_regs audio3_cg_regs = {
+ .set_ofs = 0x634,
+ .clr_ofs = 0x634,
+ .sta_ofs = 0x634,
+};
+
+static const struct mtk_gate audio_clks[] = {
+ /* AUDIO0 */
+ GATE_AUDIO0(CLK_AUD_AFE, "audio_afe", "aud_intbus_sel", 2),
+ GATE_AUDIO0(CLK_AUD_HDMI, "audio_hdmi", "audpll_sel", 20),
+ GATE_AUDIO0(CLK_AUD_SPDF, "audio_spdf", "audpll_sel", 21),
+ GATE_AUDIO0(CLK_AUD_SPDF2, "audio_spdf2", "audpll_sel", 22),
+ GATE_AUDIO0(CLK_AUD_APLL, "audio_apll", "audpll_sel", 23),
+ /* AUDIO1 */
+ GATE_AUDIO1(CLK_AUD_I2SIN1, "audio_i2sin1", "aud_mux1_sel", 0),
+ GATE_AUDIO1(CLK_AUD_I2SIN2, "audio_i2sin2", "aud_mux1_sel", 1),
+ GATE_AUDIO1(CLK_AUD_I2SIN3, "audio_i2sin3", "aud_mux1_sel", 2),
+ GATE_AUDIO1(CLK_AUD_I2SIN4, "audio_i2sin4", "aud_mux1_sel", 3),
+ GATE_AUDIO1(CLK_AUD_I2SIN5, "audio_i2sin5", "aud_mux1_sel", 4),
+ GATE_AUDIO1(CLK_AUD_I2SIN6, "audio_i2sin6", "aud_mux1_sel", 5),
+ GATE_AUDIO1(CLK_AUD_I2SO1, "audio_i2so1", "aud_mux1_sel", 6),
+ GATE_AUDIO1(CLK_AUD_I2SO2, "audio_i2so2", "aud_mux1_sel", 7),
+ GATE_AUDIO1(CLK_AUD_I2SO3, "audio_i2so3", "aud_mux1_sel", 8),
+ GATE_AUDIO1(CLK_AUD_I2SO4, "audio_i2so4", "aud_mux1_sel", 9),
+ GATE_AUDIO1(CLK_AUD_I2SO5, "audio_i2so5", "aud_mux1_sel", 10),
+ GATE_AUDIO1(CLK_AUD_I2SO6, "audio_i2so6", "aud_mux1_sel", 11),
+ GATE_AUDIO1(CLK_AUD_ASRCI1, "audio_asrci1", "asm_h_sel", 12),
+ GATE_AUDIO1(CLK_AUD_ASRCI2, "audio_asrci2", "asm_h_sel", 13),
+ GATE_AUDIO1(CLK_AUD_ASRCO1, "audio_asrco1", "asm_h_sel", 14),
+ GATE_AUDIO1(CLK_AUD_ASRCO2, "audio_asrco2", "asm_h_sel", 15),
+ GATE_AUDIO1(CLK_AUD_INTDIR, "audio_intdir", "intdir_sel", 20),
+ GATE_AUDIO1(CLK_AUD_A1SYS, "audio_a1sys", "aud_mux1_sel", 21),
+ GATE_AUDIO1(CLK_AUD_A2SYS, "audio_a2sys", "aud_mux2_sel", 22),
+ GATE_AUDIO1(CLK_AUD_AFE_CONN, "audio_afe_conn", "aud_mux1_sel", 23),
+ GATE_AUDIO1(CLK_AUD_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
+ /* AUDIO2 */
+ GATE_AUDIO2(CLK_AUD_MMIF_UL1, "audio_ul1", "aud_mux1_sel", 0),
+ GATE_AUDIO2(CLK_AUD_MMIF_UL2, "audio_ul2", "aud_mux1_sel", 1),
+ GATE_AUDIO2(CLK_AUD_MMIF_UL3, "audio_ul3", "aud_mux1_sel", 2),
+ GATE_AUDIO2(CLK_AUD_MMIF_UL4, "audio_ul4", "aud_mux1_sel", 3),
+ GATE_AUDIO2(CLK_AUD_MMIF_UL5, "audio_ul5", "aud_mux1_sel", 4),
+ GATE_AUDIO2(CLK_AUD_MMIF_UL6, "audio_ul6", "aud_mux1_sel", 5),
+ GATE_AUDIO2(CLK_AUD_MMIF_DL1, "audio_dl1", "aud_mux1_sel", 6),
+ GATE_AUDIO2(CLK_AUD_MMIF_DL2, "audio_dl2", "aud_mux1_sel", 7),
+ GATE_AUDIO2(CLK_AUD_MMIF_DL3, "audio_dl3", "aud_mux1_sel", 8),
+ GATE_AUDIO2(CLK_AUD_MMIF_DL4, "audio_dl4", "aud_mux1_sel", 9),
+ GATE_AUDIO2(CLK_AUD_MMIF_DL5, "audio_dl5", "aud_mux1_sel", 10),
+ GATE_AUDIO2(CLK_AUD_MMIF_DL6, "audio_dl6", "aud_mux1_sel", 11),
+ GATE_AUDIO2(CLK_AUD_MMIF_DLMCH, "audio_dlmch", "aud_mux1_sel", 12),
+ GATE_AUDIO2(CLK_AUD_MMIF_ARB1, "audio_arb1", "aud_mux1_sel", 13),
+ GATE_AUDIO2(CLK_AUD_MMIF_AWB1, "audio_awb", "aud_mux1_sel", 14),
+ GATE_AUDIO2(CLK_AUD_MMIF_AWB2, "audio_awb2", "aud_mux1_sel", 15),
+ GATE_AUDIO2(CLK_AUD_MMIF_DAI, "audio_dai", "aud_mux1_sel", 16),
+ /* AUDIO3 */
+ GATE_AUDIO3(CLK_AUD_ASRCI3, "audio_asrci3", "asm_h_sel", 2),
+ GATE_AUDIO3(CLK_AUD_ASRCI4, "audio_asrci4", "asm_h_sel", 3),
+ GATE_AUDIO3(CLK_AUD_ASRCI5, "audio_asrci5", "asm_h_sel", 4),
+ GATE_AUDIO3(CLK_AUD_ASRCI6, "audio_asrci6", "asm_h_sel", 5),
+ GATE_AUDIO3(CLK_AUD_ASRCO3, "audio_asrco3", "asm_h_sel", 6),
+ GATE_AUDIO3(CLK_AUD_ASRCO4, "audio_asrco4", "asm_h_sel", 7),
+ GATE_AUDIO3(CLK_AUD_ASRCO5, "audio_asrco5", "asm_h_sel", 8),
+ GATE_AUDIO3(CLK_AUD_ASRCO6, "audio_asrco6", "asm_h_sel", 9),
+ GATE_AUDIO3(CLK_AUD_MEM_ASRC1, "audio_mem_asrc1", "asm_h_sel", 10),
+ GATE_AUDIO3(CLK_AUD_MEM_ASRC2, "audio_mem_asrc2", "asm_h_sel", 11),
+ GATE_AUDIO3(CLK_AUD_MEM_ASRC3, "audio_mem_asrc3", "asm_h_sel", 12),
+ GATE_AUDIO3(CLK_AUD_MEM_ASRC4, "audio_mem_asrc4", "asm_h_sel", 13),
+ GATE_AUDIO3(CLK_AUD_MEM_ASRC5, "audio_mem_asrc5", "asm_h_sel", 14),
+};
+
+static const struct of_device_id of_match_clk_mt2701_aud[] = {
+ { .compatible = "mediatek,mt2701-audsys", },
+ {}
+};
+
+static int clk_mt2701_aud_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_AUD_NR);
+
+ mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r) {
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ goto err_clk_provider;
+ }
+
+ r = devm_of_platform_populate(&pdev->dev);
+ if (r)
+ goto err_plat_populate;
+
+ return 0;
+
+err_plat_populate:
+ of_clk_del_provider(node);
+err_clk_provider:
+ return r;
+}
+
+static struct platform_driver clk_mt2701_aud_drv = {
+ .probe = clk_mt2701_aud_probe,
+ .driver = {
+ .name = "clk-mt2701-aud",
+ .of_match_table = of_match_clk_mt2701_aud,
+ },
+};
+
+builtin_platform_driver(clk_mt2701_aud_drv);
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 8e7f16fd87c9..deca7527f92f 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -148,6 +148,7 @@ static const struct mtk_fixed_factor top_fixed_divs[] = {
FACTOR(CLK_TOP_CLK26M_D8, "clk26m_d8", "clk26m", 1, 8),
FACTOR(CLK_TOP_32K_INTERNAL, "32k_internal", "clk26m", 1, 793),
FACTOR(CLK_TOP_32K_EXTERNAL, "32k_external", "rtc32k", 1, 1),
+ FACTOR(CLK_TOP_AXISEL_D4, "axisel_d4", "axi_sel", 1, 4),
};
static const char * const axi_parents[] = {
@@ -857,13 +858,13 @@ static const struct mtk_gate peri_clks[] = {
GATE_PERI0(CLK_PERI_USB1, "usb1_ck", "usb20_sel", 11),
GATE_PERI0(CLK_PERI_USB0, "usb0_ck", "usb20_sel", 10),
GATE_PERI0(CLK_PERI_PWM, "pwm_ck", "axi_sel", 9),
- GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axi_sel", 8),
- GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axi_sel", 7),
- GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axi_sel", 6),
- GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axi_sel", 5),
- GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axi_sel", 4),
- GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axi_sel", 3),
- GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axi_sel", 2),
+ GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axisel_d4", 8),
+ GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axisel_d4", 7),
+ GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axisel_d4", 6),
+ GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axisel_d4", 5),
+ GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axisel_d4", 4),
+ GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axisel_d4", 3),
+ GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axisel_d4", 2),
GATE_PERI0(CLK_PERI_THERM, "therm_ck", "axi_sel", 1),
GATE_PERI0(CLK_PERI_NFI, "nfi_ck", "nfi2x_sel", 0),
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 498d13799388..991d4093726e 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -221,6 +221,8 @@ static const struct mtk_fixed_factor top_divs[] = {
4),
FACTOR(CLK_TOP_D2A_ULCLK_6P5M, "d2a_ulclk_6p5m", "clk26m", 1,
4),
+ FACTOR(CLK_TOP_APLL1_D3, "apll1_d3", "apll1_ck", 1,
+ 3),
};
static const char * const axi_parents[] = {
@@ -625,7 +627,7 @@ static const char * const ether_125m_parents[] = {
static const char * const ether_50m_parents[] = {
"clk26m",
"etherpll_50m",
- "univpll_d26",
+ "apll1_d3",
"univpll3_d4"
};
@@ -686,7 +688,7 @@ static const char * const i2c_parents[] = {
static const char * const msdc0p_aes_parents[] = {
"clk26m",
- "msdcpll_ck",
+ "syspll_d2",
"univpll_d3",
"vcodecpll_ck"
};
@@ -719,6 +721,17 @@ static const char * const aud_apll2_parents[] = {
"clkaud_ext_i_2"
};
+static const char * const apll1_ref_parents[] = {
+ "clkaud_ext_i_2",
+ "clkaud_ext_i_1",
+ "clki2si0_mck_i",
+ "clki2si1_mck_i",
+ "clki2si2_mck_i",
+ "clktdmin_mclk_i",
+ "clki2si2_mck_i",
+ "clktdmin_mclk_i"
+};
+
static const char * const audull_vtx_parents[] = {
"d2a_ulclk_6p5m",
"clkaud_ext_i_0"
@@ -886,6 +899,10 @@ static struct mtk_composite top_muxes[] = {
aud_apll2_parents, 0x134, 1, 1),
MUX(CLK_TOP_DA_AUDULL_VTX_6P5M_SEL, "audull_vtx_sel",
audull_vtx_parents, 0x134, 31, 1),
+ MUX(CLK_TOP_APLL1_REF_SEL, "apll1_ref_sel",
+ apll1_ref_parents, 0x134, 4, 3),
+ MUX(CLK_TOP_APLL2_REF_SEL, "apll2_ref_sel",
+ apll1_ref_parents, 0x134, 7, 3),
};
static const char * const mcu_mp0_parents[] = {
@@ -932,36 +949,56 @@ static const struct mtk_clk_divider top_adj_divs[] = {
DIV_ADJ(CLK_TOP_APLL_DIV7, "apll_div7", "i2si3_sel", 0x128, 24, 8),
};
-static const struct mtk_gate_regs top_cg_regs = {
+static const struct mtk_gate_regs top0_cg_regs = {
.set_ofs = 0x120,
.clr_ofs = 0x120,
.sta_ofs = 0x120,
};
-#define GATE_TOP(_id, _name, _parent, _shift) { \
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x424,
+ .clr_ofs = 0x424,
+ .sta_ofs = 0x424,
+};
+
+#define GATE_TOP0(_id, _name, _parent, _shift) { \
.id = _id, \
.name = _name, \
.parent_name = _parent, \
- .regs = &top_cg_regs, \
+ .regs = &top0_cg_regs, \
.shift = _shift, \
.ops = &mtk_clk_gate_ops_no_setclr, \
}
+#define GATE_TOP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
static const struct mtk_gate top_clks[] = {
- GATE_TOP(CLK_TOP_APLL_DIV_PDN0, "apll_div_pdn0", "i2so1_sel", 0),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN1, "apll_div_pdn1", "i2so2_sel", 1),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN2, "apll_div_pdn2", "i2so3_sel", 2),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN3, "apll_div_pdn3", "tdmo0_sel", 3),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN4, "apll_div_pdn4", "tdmo1_sel", 4),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN5, "apll_div_pdn5", "i2si1_sel", 5),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN6, "apll_div_pdn6", "i2si2_sel", 6),
- GATE_TOP(CLK_TOP_APLL_DIV_PDN7, "apll_div_pdn7", "i2si3_sel", 7),
+ /* TOP0 */
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN0, "apll_div_pdn0", "i2so1_sel", 0),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN1, "apll_div_pdn1", "i2so2_sel", 1),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN2, "apll_div_pdn2", "i2so3_sel", 2),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN3, "apll_div_pdn3", "tdmo0_sel", 3),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN4, "apll_div_pdn4", "tdmo1_sel", 4),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN5, "apll_div_pdn5", "i2si1_sel", 5),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN6, "apll_div_pdn6", "i2si2_sel", 6),
+ GATE_TOP0(CLK_TOP_APLL_DIV_PDN7, "apll_div_pdn7", "i2si3_sel", 7),
+ /* TOP1 */
+ GATE_TOP1(CLK_TOP_NFI2X_EN, "nfi2x_en", "nfi2x_sel", 0),
+ GATE_TOP1(CLK_TOP_NFIECC_EN, "nfiecc_en", "nfiecc_sel", 1),
+ GATE_TOP1(CLK_TOP_NFI1X_CK_EN, "nfi1x_ck_en", "nfi2x_sel", 2),
};
static const struct mtk_gate_regs infra_cg_regs = {
.set_ofs = 0x40,
.clr_ofs = 0x44,
- .sta_ofs = 0x40,
+ .sta_ofs = 0x48,
};
#define GATE_INFRA(_id, _name, _parent, _shift) { \
@@ -1120,6 +1157,10 @@ static const struct mtk_gate peri_clks[] = {
"msdc50_0_h_sel", 4),
GATE_PERI2(CLK_PERI_MSDC50_3_HCLK_EN, "per_msdc50_3_h",
"msdc50_3_h_sel", 5),
+ GATE_PERI2(CLK_PERI_MSDC30_0_QTR_EN, "per_msdc30_0_q",
+ "axi_sel", 6),
+ GATE_PERI2(CLK_PERI_MSDC30_3_QTR_EN, "per_msdc30_3_q",
+ "mem_sel", 7),
};
#define MT2712_PLL_FMAX (3000UL * MHZ)
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index fad7d9fc53ba..4f3d47b41b3e 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -106,6 +106,7 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO1(CLK_AUDIO_INTDIR, "audio_intdir", "intdir_sel", 20),
GATE_AUDIO1(CLK_AUDIO_A1SYS, "audio_a1sys", "a1sys_hp_sel", 21),
GATE_AUDIO1(CLK_AUDIO_A2SYS, "audio_a2sys", "a2sys_hp_sel", 22),
+ GATE_AUDIO1(CLK_AUDIO_AFE_CONN, "audio_afe_conn", "a1sys_hp_sel", 23),
/* AUDIO2 */
GATE_AUDIO2(CLK_AUDIO_UL1, "audio_ul1", "a1sys_hp_sel", 0),
GATE_AUDIO2(CLK_AUDIO_UL2, "audio_ul2", "a1sys_hp_sel", 1),
@@ -149,11 +150,23 @@ static int clk_mt7622_audiosys_init(struct platform_device *pdev)
clk_data);
r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
- if (r)
+ if (r) {
dev_err(&pdev->dev,
"could not register clock provider: %s: %d\n",
pdev->name, r);
+ goto err_clk_provider;
+ }
+
+ r = devm_of_platform_populate(&pdev->dev);
+ if (r)
+ goto err_plat_populate;
+
+ return 0;
+
+err_plat_populate:
+ of_clk_del_provider(node);
+err_clk_provider:
return r;
}
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 7694302c70a4..d5cbec522aec 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -3,10 +3,15 @@ config COMMON_CLK_AMLOGIC
depends on OF
depends on ARCH_MESON || COMPILE_TEST
+config COMMON_CLK_REGMAP_MESON
+ bool
+ select REGMAP
+
config COMMON_CLK_MESON8B
bool
depends on COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
+ select COMMON_CLK_REGMAP_MESON
help
Support for the clock controller on AmLogic S802 (Meson8),
S805 (Meson8b) and S812 (Meson8m2) devices. Say Y if you
@@ -16,6 +21,8 @@ config COMMON_CLK_GXBB
bool
depends on COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
+ select COMMON_CLK_REGMAP_MESON
+ select MFD_SYSCON
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
Say Y if you want peripherals and CPU frequency scaling to work.
@@ -24,6 +31,8 @@ config COMMON_CLK_AXG
bool
depends on COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
+ select COMMON_CLK_REGMAP_MESON
+ select MFD_SYSCON
help
Support for the clock controller on AmLogic A113D devices, aka axg.
Say Y if you want peripherals and CPU frequency scaling to work.
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index 3c03ce583798..ffee82e60b7a 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -2,7 +2,8 @@
# Makefile for Meson specific clk
#
-obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-cpu.o clk-mpll.o clk-audio-divider.o
+obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-audio-divider.o
obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
-obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-regmap.o gxbb-aoclk-32k.o
+obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-32k.o
obj-$(CONFIG_COMMON_CLK_AXG) += axg.o
+obj-$(CONFIG_COMMON_CLK_REGMAP_MESON) += clk-regmap.o
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 1294f3ad7cd5..5f5d468c1efe 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -11,125 +11,51 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
+#include <linux/regmap.h>
#include "clkc.h"
#include "axg.h"
static DEFINE_SPINLOCK(meson_clk_lock);
-static const struct pll_rate_table sys_pll_rate_table[] = {
- PLL_RATE(24000000, 56, 1, 2),
- PLL_RATE(48000000, 64, 1, 2),
- PLL_RATE(72000000, 72, 1, 2),
- PLL_RATE(96000000, 64, 1, 2),
- PLL_RATE(120000000, 80, 1, 2),
- PLL_RATE(144000000, 96, 1, 2),
- PLL_RATE(168000000, 56, 1, 1),
- PLL_RATE(192000000, 64, 1, 1),
- PLL_RATE(216000000, 72, 1, 1),
- PLL_RATE(240000000, 80, 1, 1),
- PLL_RATE(264000000, 88, 1, 1),
- PLL_RATE(288000000, 96, 1, 1),
- PLL_RATE(312000000, 52, 1, 2),
- PLL_RATE(336000000, 56, 1, 2),
- PLL_RATE(360000000, 60, 1, 2),
- PLL_RATE(384000000, 64, 1, 2),
- PLL_RATE(408000000, 68, 1, 2),
- PLL_RATE(432000000, 72, 1, 2),
- PLL_RATE(456000000, 76, 1, 2),
- PLL_RATE(480000000, 80, 1, 2),
- PLL_RATE(504000000, 84, 1, 2),
- PLL_RATE(528000000, 88, 1, 2),
- PLL_RATE(552000000, 92, 1, 2),
- PLL_RATE(576000000, 96, 1, 2),
- PLL_RATE(600000000, 50, 1, 1),
- PLL_RATE(624000000, 52, 1, 1),
- PLL_RATE(648000000, 54, 1, 1),
- PLL_RATE(672000000, 56, 1, 1),
- PLL_RATE(696000000, 58, 1, 1),
- PLL_RATE(720000000, 60, 1, 1),
- PLL_RATE(744000000, 62, 1, 1),
- PLL_RATE(768000000, 64, 1, 1),
- PLL_RATE(792000000, 66, 1, 1),
- PLL_RATE(816000000, 68, 1, 1),
- PLL_RATE(840000000, 70, 1, 1),
- PLL_RATE(864000000, 72, 1, 1),
- PLL_RATE(888000000, 74, 1, 1),
- PLL_RATE(912000000, 76, 1, 1),
- PLL_RATE(936000000, 78, 1, 1),
- PLL_RATE(960000000, 80, 1, 1),
- PLL_RATE(984000000, 82, 1, 1),
- PLL_RATE(1008000000, 84, 1, 1),
- PLL_RATE(1032000000, 86, 1, 1),
- PLL_RATE(1056000000, 88, 1, 1),
- PLL_RATE(1080000000, 90, 1, 1),
- PLL_RATE(1104000000, 92, 1, 1),
- PLL_RATE(1128000000, 94, 1, 1),
- PLL_RATE(1152000000, 96, 1, 1),
- PLL_RATE(1176000000, 98, 1, 1),
- PLL_RATE(1200000000, 50, 1, 0),
- PLL_RATE(1224000000, 51, 1, 0),
- PLL_RATE(1248000000, 52, 1, 0),
- PLL_RATE(1272000000, 53, 1, 0),
- PLL_RATE(1296000000, 54, 1, 0),
- PLL_RATE(1320000000, 55, 1, 0),
- PLL_RATE(1344000000, 56, 1, 0),
- PLL_RATE(1368000000, 57, 1, 0),
- PLL_RATE(1392000000, 58, 1, 0),
- PLL_RATE(1416000000, 59, 1, 0),
- PLL_RATE(1440000000, 60, 1, 0),
- PLL_RATE(1464000000, 61, 1, 0),
- PLL_RATE(1488000000, 62, 1, 0),
- PLL_RATE(1512000000, 63, 1, 0),
- PLL_RATE(1536000000, 64, 1, 0),
- PLL_RATE(1560000000, 65, 1, 0),
- PLL_RATE(1584000000, 66, 1, 0),
- PLL_RATE(1608000000, 67, 1, 0),
- PLL_RATE(1632000000, 68, 1, 0),
- PLL_RATE(1656000000, 68, 1, 0),
- PLL_RATE(1680000000, 68, 1, 0),
- PLL_RATE(1704000000, 68, 1, 0),
- PLL_RATE(1728000000, 69, 1, 0),
- PLL_RATE(1752000000, 69, 1, 0),
- PLL_RATE(1776000000, 69, 1, 0),
- PLL_RATE(1800000000, 69, 1, 0),
- PLL_RATE(1824000000, 70, 1, 0),
- PLL_RATE(1848000000, 70, 1, 0),
- PLL_RATE(1872000000, 70, 1, 0),
- PLL_RATE(1896000000, 70, 1, 0),
- PLL_RATE(1920000000, 71, 1, 0),
- PLL_RATE(1944000000, 71, 1, 0),
- PLL_RATE(1968000000, 71, 1, 0),
- PLL_RATE(1992000000, 71, 1, 0),
- PLL_RATE(2016000000, 72, 1, 0),
- PLL_RATE(2040000000, 72, 1, 0),
- PLL_RATE(2064000000, 72, 1, 0),
- PLL_RATE(2088000000, 72, 1, 0),
- PLL_RATE(2112000000, 73, 1, 0),
- { /* sentinel */ },
-};
-
-static struct meson_clk_pll axg_fixed_pll = {
- .m = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 16,
- .width = 2,
+static struct clk_regmap axg_fixed_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_MPLL_CNTL2,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -138,25 +64,34 @@ static struct meson_clk_pll axg_fixed_pll = {
},
};
-static struct meson_clk_pll axg_sys_pll = {
- .m = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 0,
- .width = 9,
+static struct clk_regmap axg_sys_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .n = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 10,
- .width = 2,
- },
- .rate_table = sys_pll_rate_table,
- .rate_count = ARRAY_SIZE(sys_pll_rate_table),
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -257,40 +192,51 @@ static const struct pll_rate_table axg_gp0_pll_rate_table[] = {
{ /* sentinel */ },
};
-static struct pll_params_table axg_gp0_params_table[] = {
- PLL_PARAM(HHI_GP0_PLL_CNTL, 0x40010250),
- PLL_PARAM(HHI_GP0_PLL_CNTL1, 0xc084a000),
- PLL_PARAM(HHI_GP0_PLL_CNTL2, 0xb75020be),
- PLL_PARAM(HHI_GP0_PLL_CNTL3, 0x0a59a288),
- PLL_PARAM(HHI_GP0_PLL_CNTL4, 0xc000004d),
- PLL_PARAM(HHI_GP0_PLL_CNTL5, 0x00078000),
-};
-
-static struct meson_clk_pll axg_gp0_pll = {
- .m = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
- .params = {
- .params_table = axg_gp0_params_table,
- .params_count = ARRAY_SIZE(axg_gp0_params_table),
- .no_init_reset = true,
- .reset_lock_loop = true,
- },
- .rate_table = axg_gp0_pll_rate_table,
- .rate_count = ARRAY_SIZE(axg_gp0_pll_rate_table),
- .lock = &meson_clk_lock,
+static const struct reg_sequence axg_gp0_init_regs[] = {
+ { .reg = HHI_GP0_PLL_CNTL1, .def = 0xc084b000 },
+ { .reg = HHI_GP0_PLL_CNTL2, .def = 0xb75020be },
+ { .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a59a288 },
+ { .reg = HHI_GP0_PLL_CNTL4, .def = 0xc000004d },
+ { .reg = HHI_GP0_PLL_CNTL5, .def = 0x00078000 },
+ { .reg = HHI_GP0_PLL_CNTL, .def = 0x40010250 },
+};
+
+static struct clk_regmap axg_gp0_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_GP0_PLL_CNTL1,
+ .shift = 0,
+ .width = 10,
+ },
+ .l = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = axg_gp0_pll_rate_table,
+ .init_regs = axg_gp0_init_regs,
+ .init_count = ARRAY_SIZE(axg_gp0_init_regs),
+ },
.hw.init = &(struct clk_init_data){
.name = "gp0_pll",
.ops = &meson_clk_pll_ops,
@@ -299,234 +245,427 @@ static struct meson_clk_pll axg_gp0_pll = {
},
};
+static const struct reg_sequence axg_hifi_init_regs[] = {
+ { .reg = HHI_HIFI_PLL_CNTL1, .def = 0xc084b000 },
+ { .reg = HHI_HIFI_PLL_CNTL2, .def = 0xb75020be },
+ { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x0a6a3a88 },
+ { .reg = HHI_HIFI_PLL_CNTL4, .def = 0xc000004d },
+ { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x00058000 },
+ { .reg = HHI_HIFI_PLL_CNTL, .def = 0x40010250 },
+};
+
+static struct clk_regmap axg_hifi_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_HIFI_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_HIFI_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_HIFI_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_HIFI_PLL_CNTL5,
+ .shift = 0,
+ .width = 13,
+ },
+ .l = {
+ .reg_off = HHI_HIFI_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HIFI_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = axg_gp0_pll_rate_table,
+ .init_regs = axg_hifi_init_regs,
+ .init_count = ARRAY_SIZE(axg_hifi_init_regs),
+ .flags = CLK_MESON_PLL_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ },
+};
-static struct clk_fixed_factor axg_fclk_div2 = {
+static struct clk_fixed_factor axg_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div2",
+ .name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor axg_fclk_div3 = {
+static struct clk_regmap axg_fclk_div2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 27,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div2_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div3",
+ .name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor axg_fclk_div4 = {
+static struct clk_regmap axg_fclk_div3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 28,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div3_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_fclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div4",
+ .name = "fclk_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor axg_fclk_div5 = {
+static struct clk_regmap axg_fclk_div4 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 29,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div4_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div5",
+ .name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor axg_fclk_div7 = {
+static struct clk_regmap axg_fclk_div5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div5_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div7",
+ .name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll axg_mpll0 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 0,
- .width = 14,
+static struct clk_regmap axg_fclk_div7 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 31,
},
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 15,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div7_div" },
+ .num_parents = 1,
},
- .n2 = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 16,
- .width = 9,
+};
+
+static struct clk_regmap axg_mpll_prediv = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPLL_CNTL5,
+ .shift = 12,
+ .width = 1,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 14,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_prediv",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
},
- .ssen = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 25,
- .width = 1,
+};
+
+static struct clk_regmap axg_mpll0_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 16,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 25,
+ .width = 1,
+ },
+ .misc = {
+ .reg_off = HHI_PLL_TOP_MISC,
+ .shift = 0,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll0",
+ .name = "mpll0_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll axg_mpll1 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 0,
- .width = 14,
+static struct clk_regmap axg_mpll0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL7,
+ .bit_idx = 14,
},
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 15,
- .width = 1,
- },
- .n2 = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 16,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 14,
- .width = 1,
+};
+
+static struct clk_regmap axg_mpll1_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 16,
+ .width = 9,
+ },
+ .misc = {
+ .reg_off = HHI_PLL_TOP_MISC,
+ .shift = 1,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll1",
+ .name = "mpll1_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll axg_mpll2 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 0,
- .width = 14,
+static struct clk_regmap axg_mpll1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL8,
+ .bit_idx = 14,
},
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 15,
- .width = 1,
- },
- .n2 = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 16,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 14,
- .width = 1,
+};
+
+static struct clk_regmap axg_mpll2_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 16,
+ .width = 9,
+ },
+ .misc = {
+ .reg_off = HHI_PLL_TOP_MISC,
+ .shift = 2,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll2",
+ .name = "mpll2_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll axg_mpll3 = {
- .sdm = {
- .reg_off = HHI_MPLL3_CNTL0,
- .shift = 12,
- .width = 14,
+static struct clk_regmap axg_mpll2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL9,
+ .bit_idx = 14,
},
- .sdm_en = {
- .reg_off = HHI_MPLL3_CNTL0,
- .shift = 11,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_mpll3_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL3_CNTL0,
+ .shift = 12,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL3_CNTL0,
+ .shift = 11,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL3_CNTL0,
+ .shift = 2,
+ .width = 9,
+ },
+ .misc = {
+ .reg_off = HHI_PLL_TOP_MISC,
+ .shift = 3,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
},
- .n2 = {
- .reg_off = HHI_MPLL3_CNTL0,
- .shift = 2,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll3_div",
+ .ops = &meson_clk_mpll_ops,
+ .parent_names = (const char *[]){ "mpll_prediv" },
+ .num_parents = 1,
},
- .en = {
- .reg_off = HHI_MPLL3_CNTL0,
- .shift = 0,
- .width = 1,
+};
+
+static struct clk_regmap axg_mpll3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL3_CNTL0,
+ .bit_idx = 0,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll3",
- .ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll3_div" },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
-/*
- * FIXME The legacy composite clocks (e.g. clk81) are both PLL post-dividers
- * and should be modeled with their respective PLLs via the forthcoming
- * coordinated clock rates feature
- */
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
static const char * const clk81_parent_names[] = {
"xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
"fclk_div3", "fclk_div5"
};
-static struct clk_mux axg_mpeg_clk_sel = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .mask = 0x7,
- .shift = 12,
- .flags = CLK_MUX_READ_ONLY,
- .table = mux_table_clk81,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_mpeg_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 12,
+ .table = mux_table_clk81,
+ },
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_sel",
- .ops = &clk_mux_ro_ops,
+ .ops = &clk_regmap_mux_ro_ops,
.parent_names = clk81_parent_names,
.num_parents = ARRAY_SIZE(clk81_parent_names),
},
};
-static struct clk_divider axg_mpeg_clk_div = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_mpeg_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "mpeg_clk_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate axg_clk81 = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .bit_idx = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_clk81 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .bit_idx = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "clk81",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "mpeg_clk_div" },
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
@@ -545,42 +684,45 @@ static const char * const axg_sd_emmc_clk0_parent_names[] = {
};
/* SDcard clock */
-static struct clk_mux axg_sd_emmc_b_clk0_sel = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .mask = 0x7,
- .shift = 25,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_sd_emmc_b_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = axg_sd_emmc_clk0_parent_names,
.num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_divider axg_sd_emmc_b_clk0_div = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .shift = 16,
- .width = 7,
- .lock = &meson_clk_lock,
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
+static struct clk_regmap axg_sd_emmc_b_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate axg_sd_emmc_b_clk0 = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .bit_idx = 23,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_sd_emmc_b_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .bit_idx = 23,
+ },
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_b_clk0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -588,42 +730,45 @@ static struct clk_gate axg_sd_emmc_b_clk0 = {
};
/* EMMC/NAND clock */
-static struct clk_mux axg_sd_emmc_c_clk0_sel = {
- .reg = (void *)HHI_NAND_CLK_CNTL,
- .mask = 0x7,
- .shift = 9,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_sd_emmc_c_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = axg_sd_emmc_clk0_parent_names,
.num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_divider axg_sd_emmc_c_clk0_div = {
- .reg = (void *)HHI_NAND_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
+static struct clk_regmap axg_sd_emmc_c_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate axg_sd_emmc_c_clk0 = {
- .reg = (void *)HHI_NAND_CLK_CNTL,
- .bit_idx = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap axg_sd_emmc_c_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .bit_idx = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_c_clk0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -750,27 +895,24 @@ static struct clk_hw_onecell_data axg_hw_onecell_data = {
[CLKID_SD_EMMC_C_CLK0_SEL] = &axg_sd_emmc_c_clk0_sel.hw,
[CLKID_SD_EMMC_C_CLK0_DIV] = &axg_sd_emmc_c_clk0_div.hw,
[CLKID_SD_EMMC_C_CLK0] = &axg_sd_emmc_c_clk0.hw,
+ [CLKID_MPLL0_DIV] = &axg_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &axg_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &axg_mpll2_div.hw,
+ [CLKID_MPLL3_DIV] = &axg_mpll3_div.hw,
+ [CLKID_HIFI_PLL] = &axg_hifi_pll.hw,
+ [CLKID_MPLL_PREDIV] = &axg_mpll_prediv.hw,
+ [CLKID_FCLK_DIV2_DIV] = &axg_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &axg_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &axg_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &axg_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &axg_fclk_div7_div.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
};
-/* Convenience tables to populate base addresses in .probe */
-
-static struct meson_clk_pll *const axg_clk_plls[] = {
- &axg_fixed_pll,
- &axg_sys_pll,
- &axg_gp0_pll,
-};
-
-static struct meson_clk_mpll *const axg_clk_mplls[] = {
- &axg_mpll0,
- &axg_mpll1,
- &axg_mpll2,
- &axg_mpll3,
-};
-
-static struct clk_gate *const axg_clk_gates[] = {
+/* Convenience table to populate regmap in .probe */
+static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_clk81,
&axg_ddr,
&axg_audio_locker,
@@ -818,113 +960,100 @@ static struct clk_gate *const axg_clk_gates[] = {
&axg_ao_i2c,
&axg_sd_emmc_b_clk0,
&axg_sd_emmc_c_clk0,
-};
-
-static struct clk_mux *const axg_clk_muxes[] = {
- &axg_mpeg_clk_sel,
- &axg_sd_emmc_b_clk0_sel,
- &axg_sd_emmc_c_clk0_sel,
-};
-
-static struct clk_divider *const axg_clk_dividers[] = {
&axg_mpeg_clk_div,
&axg_sd_emmc_b_clk0_div,
&axg_sd_emmc_c_clk0_div,
-};
-
-struct clkc_data {
- struct clk_gate *const *clk_gates;
- unsigned int clk_gates_count;
- struct meson_clk_mpll *const *clk_mplls;
- unsigned int clk_mplls_count;
- struct meson_clk_pll *const *clk_plls;
- unsigned int clk_plls_count;
- struct clk_mux *const *clk_muxes;
- unsigned int clk_muxes_count;
- struct clk_divider *const *clk_dividers;
- unsigned int clk_dividers_count;
- struct clk_hw_onecell_data *hw_onecell_data;
-};
-
-static const struct clkc_data axg_clkc_data = {
- .clk_gates = axg_clk_gates,
- .clk_gates_count = ARRAY_SIZE(axg_clk_gates),
- .clk_mplls = axg_clk_mplls,
- .clk_mplls_count = ARRAY_SIZE(axg_clk_mplls),
- .clk_plls = axg_clk_plls,
- .clk_plls_count = ARRAY_SIZE(axg_clk_plls),
- .clk_muxes = axg_clk_muxes,
- .clk_muxes_count = ARRAY_SIZE(axg_clk_muxes),
- .clk_dividers = axg_clk_dividers,
- .clk_dividers_count = ARRAY_SIZE(axg_clk_dividers),
- .hw_onecell_data = &axg_hw_onecell_data,
+ &axg_mpeg_clk_sel,
+ &axg_sd_emmc_b_clk0_sel,
+ &axg_sd_emmc_c_clk0_sel,
+ &axg_mpll0,
+ &axg_mpll1,
+ &axg_mpll2,
+ &axg_mpll3,
+ &axg_mpll0_div,
+ &axg_mpll1_div,
+ &axg_mpll2_div,
+ &axg_mpll3_div,
+ &axg_fixed_pll,
+ &axg_sys_pll,
+ &axg_gp0_pll,
+ &axg_hifi_pll,
+ &axg_mpll_prediv,
+ &axg_fclk_div2,
+ &axg_fclk_div3,
+ &axg_fclk_div4,
+ &axg_fclk_div5,
+ &axg_fclk_div7,
};
static const struct of_device_id clkc_match_table[] = {
- { .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
+ { .compatible = "amlogic,axg-clkc" },
{}
};
+static const struct regmap_config clkc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int axg_clkc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct clkc_data *clkc_data;
struct resource *res;
- void __iomem *clk_base;
- int ret, clkid, i;
-
- clkc_data = of_device_get_match_data(&pdev->dev);
- if (!clkc_data)
- return -EINVAL;
-
- /* Generic clocks and PLLs */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
- clk_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!clk_base) {
- dev_err(&pdev->dev, "Unable to map clk base\n");
- return -ENXIO;
- }
+ void __iomem *clk_base = NULL;
+ struct regmap *map;
+ int ret, i;
- /* Populate base address for PLLs */
- for (i = 0; i < clkc_data->clk_plls_count; i++)
- clkc_data->clk_plls[i]->base = clk_base;
+ /* Get the hhi system controller node if available */
+ map = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "failed to get HHI regmap - Trying obsolete regs\n");
- /* Populate base address for MPLLs */
- for (i = 0; i < clkc_data->clk_mplls_count; i++)
- clkc_data->clk_mplls[i]->base = clk_base;
+ /*
+ * FIXME: HHI registers should be accessed through
+ * the appropriate system controller. This is required because
+ * there is more than just clocks in this register space
+ *
+ * This fallback method is only provided temporarily until
+ * all the platform DTs are properly using the syscon node
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
- /* Populate base address for gates */
- for (i = 0; i < clkc_data->clk_gates_count; i++)
- clkc_data->clk_gates[i]->reg = clk_base +
- (u64)clkc_data->clk_gates[i]->reg;
- /* Populate base address for muxes */
- for (i = 0; i < clkc_data->clk_muxes_count; i++)
- clkc_data->clk_muxes[i]->reg = clk_base +
- (u64)clkc_data->clk_muxes[i]->reg;
+ clk_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!clk_base) {
+ dev_err(dev, "Unable to map clk base\n");
+ return -ENXIO;
+ }
+
+ map = devm_regmap_init_mmio(dev, clk_base,
+ &clkc_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+ }
- /* Populate base address for dividers */
- for (i = 0; i < clkc_data->clk_dividers_count; i++)
- clkc_data->clk_dividers[i]->reg = clk_base +
- (u64)clkc_data->clk_dividers[i]->reg;
+ /* Populate regmap for the regmap backed clocks */
+ for (i = 0; i < ARRAY_SIZE(axg_clk_regmaps); i++)
+ axg_clk_regmaps[i]->map = map;
- for (clkid = 0; clkid < clkc_data->hw_onecell_data->num; clkid++) {
+ for (i = 0; i < axg_hw_onecell_data.num; i++) {
/* array might be sparse */
- if (!clkc_data->hw_onecell_data->hws[clkid])
+ if (!axg_hw_onecell_data.hws[i])
continue;
- ret = devm_clk_hw_register(dev,
- clkc_data->hw_onecell_data->hws[clkid]);
+ ret = devm_clk_hw_register(dev, axg_hw_onecell_data.hws[i]);
if (ret) {
- dev_err(&pdev->dev, "Clock registration failed\n");
+ dev_err(dev, "Clock registration failed\n");
return ret;
}
}
- return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
- clkc_data->hw_onecell_data);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &axg_hw_onecell_data);
}
static struct platform_driver axg_driver = {
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
index ce0bafdb6b28..b421df6a7ea0 100644
--- a/drivers/clk/meson/axg.h
+++ b/drivers/clk/meson/axg.h
@@ -117,8 +117,18 @@
#define CLKID_SD_EMMC_B_CLK0_DIV 62
#define CLKID_SD_EMMC_C_CLK0_SEL 63
#define CLKID_SD_EMMC_C_CLK0_DIV 64
+#define CLKID_MPLL0_DIV 65
+#define CLKID_MPLL1_DIV 66
+#define CLKID_MPLL2_DIV 67
+#define CLKID_MPLL3_DIV 68
+#define CLKID_MPLL_PREDIV 70
+#define CLKID_FCLK_DIV2_DIV 71
+#define CLKID_FCLK_DIV3_DIV 72
+#define CLKID_FCLK_DIV4_DIV 73
+#define CLKID_FCLK_DIV5_DIV 74
+#define CLKID_FCLK_DIV7_DIV 75
-#define NR_CLKS 65
+#define NR_CLKS 76
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/axg-clkc.h>
diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c
index 6c07db06642d..f7ab5b1db342 100644
--- a/drivers/clk/meson/clk-audio-divider.c
+++ b/drivers/clk/meson/clk-audio-divider.c
@@ -28,8 +28,11 @@
#include <linux/clk-provider.h>
#include "clkc.h"
-#define to_meson_clk_audio_divider(_hw) container_of(_hw, \
- struct meson_clk_audio_divider, hw)
+static inline struct meson_clk_audio_div_data *
+meson_clk_audio_div_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_audio_div_data *)clk->data;
+}
static int _div_round(unsigned long parent_rate, unsigned long rate,
unsigned long flags)
@@ -45,15 +48,9 @@ static int _get_val(unsigned long parent_rate, unsigned long rate)
return DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1;
}
-static int _valid_divider(struct clk_hw *hw, int divider)
+static int _valid_divider(unsigned int width, int divider)
{
- struct meson_clk_audio_divider *adiv =
- to_meson_clk_audio_divider(hw);
- int max_divider;
- u8 width;
-
- width = adiv->div.width;
- max_divider = 1 << width;
+ int max_divider = 1 << width;
return clamp(divider, 1, max_divider);
}
@@ -61,14 +58,11 @@ static int _valid_divider(struct clk_hw *hw, int divider)
static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct meson_clk_audio_divider *adiv =
- to_meson_clk_audio_divider(hw);
- struct parm *p;
- unsigned long reg, divider;
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
+ unsigned long divider;
- p = &adiv->div;
- reg = readl(adiv->base + p->reg_off);
- divider = PARM_GET(p->width, p->shift, reg) + 1;
+ divider = meson_parm_read(clk->map, &adiv->div);
return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
}
@@ -77,14 +71,14 @@ static long audio_divider_round_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long *parent_rate)
{
- struct meson_clk_audio_divider *adiv =
- to_meson_clk_audio_divider(hw);
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
unsigned long max_prate;
int divider;
if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
divider = _div_round(*parent_rate, rate, adiv->flags);
- divider = _valid_divider(hw, divider);
+ divider = _valid_divider(adiv->div.width, divider);
return DIV_ROUND_UP_ULL((u64)*parent_rate, divider);
}
@@ -93,7 +87,7 @@ static long audio_divider_round_rate(struct clk_hw *hw,
/* Get the corresponding rounded down divider */
divider = max_prate / rate;
- divider = _valid_divider(hw, divider);
+ divider = _valid_divider(adiv->div.width, divider);
/* Get actual rate of the parent */
*parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
@@ -106,28 +100,11 @@ static int audio_divider_set_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
{
- struct meson_clk_audio_divider *adiv =
- to_meson_clk_audio_divider(hw);
- struct parm *p;
- unsigned long reg, flags = 0;
- int val;
-
- val = _get_val(parent_rate, rate);
-
- if (adiv->lock)
- spin_lock_irqsave(adiv->lock, flags);
- else
- __acquire(adiv->lock);
-
- p = &adiv->div;
- reg = readl(adiv->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, val);
- writel(reg, adiv->base + p->reg_off);
-
- if (adiv->lock)
- spin_unlock_irqrestore(adiv->lock, flags);
- else
- __release(adiv->lock);
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
+ int val = _get_val(parent_rate, rate);
+
+ meson_parm_write(clk->map, &adiv->div, val);
return 0;
}
diff --git a/drivers/clk/meson/clk-cpu.c b/drivers/clk/meson/clk-cpu.c
deleted file mode 100644
index f8b2b7efd016..000000000000
--- a/drivers/clk/meson/clk-cpu.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (c) 2015 Endless Mobile, Inc.
- * Author: Carlo Caione <carlo@endlessm.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-/*
- * CPU clock path:
- *
- * +-[/N]-----|3|
- * MUX2 +--[/3]-+----------|2| MUX1
- * [sys_pll]---|1| |--[/2]------------|1|-|1|
- * | |---+------------------|0| | |----- [a5_clk]
- * +--|0| | |
- * [xtal]---+-------------------------------|0|
- *
- *
- *
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-
-#define MESON_CPU_CLK_CNTL1 0x00
-#define MESON_CPU_CLK_CNTL 0x40
-
-#define MESON_CPU_CLK_MUX1 BIT(7)
-#define MESON_CPU_CLK_MUX2 BIT(0)
-
-#define MESON_N_WIDTH 9
-#define MESON_N_SHIFT 20
-#define MESON_SEL_WIDTH 2
-#define MESON_SEL_SHIFT 2
-
-#include "clkc.h"
-
-#define to_meson_clk_cpu_hw(_hw) container_of(_hw, struct meson_clk_cpu, hw)
-#define to_meson_clk_cpu_nb(_nb) container_of(_nb, struct meson_clk_cpu, clk_nb)
-
-static long meson_clk_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_hw(hw);
-
- return divider_round_rate(hw, rate, prate, clk_cpu->div_table,
- MESON_N_WIDTH, CLK_DIVIDER_ROUND_CLOSEST);
-}
-
-static int meson_clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_hw(hw);
- unsigned int div, sel, N = 0;
- u32 reg;
-
- div = DIV_ROUND_UP(parent_rate, rate);
-
- if (div <= 3) {
- sel = div - 1;
- } else {
- sel = 3;
- N = div / 2;
- }
-
- reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL1);
- reg = PARM_SET(MESON_N_WIDTH, MESON_N_SHIFT, reg, N);
- writel(reg, clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL1);
-
- reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL);
- reg = PARM_SET(MESON_SEL_WIDTH, MESON_SEL_SHIFT, reg, sel);
- writel(reg, clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL);
-
- return 0;
-}
-
-static unsigned long meson_clk_cpu_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_hw(hw);
- unsigned int N, sel;
- unsigned int div = 1;
- u32 reg;
-
- reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL1);
- N = PARM_GET(MESON_N_WIDTH, MESON_N_SHIFT, reg);
-
- reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL);
- sel = PARM_GET(MESON_SEL_WIDTH, MESON_SEL_SHIFT, reg);
-
- if (sel < 3)
- div = sel + 1;
- else
- div = 2 * N;
-
- return parent_rate / div;
-}
-
-/* FIXME MUX1 & MUX2 should be struct clk_hw objects */
-static int meson_clk_cpu_pre_rate_change(struct meson_clk_cpu *clk_cpu,
- struct clk_notifier_data *ndata)
-{
- u32 cpu_clk_cntl;
-
- /* switch MUX1 to xtal */
- cpu_clk_cntl = readl(clk_cpu->base + clk_cpu->reg_off
- + MESON_CPU_CLK_CNTL);
- cpu_clk_cntl &= ~MESON_CPU_CLK_MUX1;
- writel(cpu_clk_cntl, clk_cpu->base + clk_cpu->reg_off
- + MESON_CPU_CLK_CNTL);
- udelay(100);
-
- /* switch MUX2 to sys-pll */
- cpu_clk_cntl |= MESON_CPU_CLK_MUX2;
- writel(cpu_clk_cntl, clk_cpu->base + clk_cpu->reg_off
- + MESON_CPU_CLK_CNTL);
-
- return 0;
-}
-
-/* FIXME MUX1 & MUX2 should be struct clk_hw objects */
-static int meson_clk_cpu_post_rate_change(struct meson_clk_cpu *clk_cpu,
- struct clk_notifier_data *ndata)
-{
- u32 cpu_clk_cntl;
-
- /* switch MUX1 to divisors' output */
- cpu_clk_cntl = readl(clk_cpu->base + clk_cpu->reg_off
- + MESON_CPU_CLK_CNTL);
- cpu_clk_cntl |= MESON_CPU_CLK_MUX1;
- writel(cpu_clk_cntl, clk_cpu->base + clk_cpu->reg_off
- + MESON_CPU_CLK_CNTL);
- udelay(100);
-
- return 0;
-}
-
-/*
- * This clock notifier is called when the frequency of the of the parent
- * PLL clock is to be changed. We use the xtal input as temporary parent
- * while the PLL frequency is stabilized.
- */
-int meson_clk_cpu_notifier_cb(struct notifier_block *nb,
- unsigned long event, void *data)
-{
- struct clk_notifier_data *ndata = data;
- struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_nb(nb);
- int ret = 0;
-
- if (event == PRE_RATE_CHANGE)
- ret = meson_clk_cpu_pre_rate_change(clk_cpu, ndata);
- else if (event == POST_RATE_CHANGE)
- ret = meson_clk_cpu_post_rate_change(clk_cpu, ndata);
-
- return notifier_from_errno(ret);
-}
-
-const struct clk_ops meson_clk_cpu_ops = {
- .recalc_rate = meson_clk_cpu_recalc_rate,
- .round_rate = meson_clk_cpu_round_rate,
- .set_rate = meson_clk_cpu_set_rate,
-};
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 5144360e2c80..0df1227b65b3 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -68,11 +68,15 @@
#define N2_MIN 4
#define N2_MAX 511
-#define to_meson_clk_mpll(_hw) container_of(_hw, struct meson_clk_mpll, hw)
+static inline struct meson_clk_mpll_data *
+meson_clk_mpll_data(struct clk_regmap *clk)
+{
+ return (struct meson_clk_mpll_data *)clk->data;
+}
static long rate_from_params(unsigned long parent_rate,
- unsigned long sdm,
- unsigned long n2)
+ unsigned int sdm,
+ unsigned int n2)
{
unsigned long divisor = (SDM_DEN * n2) + sdm;
@@ -84,8 +88,8 @@ static long rate_from_params(unsigned long parent_rate,
static void params_from_rate(unsigned long requested_rate,
unsigned long parent_rate,
- unsigned long *sdm,
- unsigned long *n2)
+ unsigned int *sdm,
+ unsigned int *n2)
{
uint64_t div = parent_rate;
unsigned long rem = do_div(div, requested_rate);
@@ -105,31 +109,23 @@ static void params_from_rate(unsigned long requested_rate,
static unsigned long mpll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw);
- struct parm *p;
- unsigned long reg, sdm, n2;
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
+ unsigned int sdm, n2;
long rate;
- p = &mpll->sdm;
- reg = readl(mpll->base + p->reg_off);
- sdm = PARM_GET(p->width, p->shift, reg);
-
- p = &mpll->n2;
- reg = readl(mpll->base + p->reg_off);
- n2 = PARM_GET(p->width, p->shift, reg);
+ sdm = meson_parm_read(clk->map, &mpll->sdm);
+ n2 = meson_parm_read(clk->map, &mpll->n2);
rate = rate_from_params(parent_rate, sdm, n2);
- if (rate < 0)
- return 0;
-
- return rate;
+ return rate < 0 ? 0 : rate;
}
static long mpll_round_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long *parent_rate)
{
- unsigned long sdm, n2;
+ unsigned int sdm, n2;
params_from_rate(rate, *parent_rate, &sdm, &n2);
return rate_from_params(*parent_rate, sdm, n2);
@@ -139,9 +135,9 @@ static int mpll_set_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
{
- struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw);
- struct parm *p;
- unsigned long reg, sdm, n2;
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
+ unsigned int sdm, n2;
unsigned long flags = 0;
params_from_rate(rate, parent_rate, &sdm, &n2);
@@ -151,97 +147,36 @@ static int mpll_set_rate(struct clk_hw *hw,
else
__acquire(mpll->lock);
- p = &mpll->sdm;
- reg = readl(mpll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, sdm);
- writel(reg, mpll->base + p->reg_off);
-
- p = &mpll->sdm_en;
- reg = readl(mpll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, 1);
- writel(reg, mpll->base + p->reg_off);
-
- p = &mpll->ssen;
- if (p->width != 0) {
- reg = readl(mpll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, 1);
- writel(reg, mpll->base + p->reg_off);
- }
-
- p = &mpll->n2;
- reg = readl(mpll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, n2);
- writel(reg, mpll->base + p->reg_off);
-
- if (mpll->lock)
- spin_unlock_irqrestore(mpll->lock, flags);
- else
- __release(mpll->lock);
-
- return 0;
-}
+ /* Enable and set the fractional part */
+ meson_parm_write(clk->map, &mpll->sdm, sdm);
+ meson_parm_write(clk->map, &mpll->sdm_en, 1);
-static void mpll_enable_core(struct clk_hw *hw, int enable)
-{
- struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw);
- struct parm *p;
- unsigned long reg;
- unsigned long flags = 0;
+ /* Set additional fractional part enable if required */
+ if (MESON_PARM_APPLICABLE(&mpll->ssen))
+ meson_parm_write(clk->map, &mpll->ssen, 1);
- if (mpll->lock)
- spin_lock_irqsave(mpll->lock, flags);
- else
- __acquire(mpll->lock);
+ /* Set the integer divider part */
+ meson_parm_write(clk->map, &mpll->n2, n2);
- p = &mpll->en;
- reg = readl(mpll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, enable ? 1 : 0);
- writel(reg, mpll->base + p->reg_off);
+ /* Set the magic misc bit if required */
+ if (MESON_PARM_APPLICABLE(&mpll->misc))
+ meson_parm_write(clk->map, &mpll->misc, 1);
if (mpll->lock)
spin_unlock_irqrestore(mpll->lock, flags);
else
__release(mpll->lock);
-}
-
-
-static int mpll_enable(struct clk_hw *hw)
-{
- mpll_enable_core(hw, 1);
return 0;
}
-static void mpll_disable(struct clk_hw *hw)
-{
- mpll_enable_core(hw, 0);
-}
-
-static int mpll_is_enabled(struct clk_hw *hw)
-{
- struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw);
- struct parm *p;
- unsigned long reg;
- int en;
-
- p = &mpll->en;
- reg = readl(mpll->base + p->reg_off);
- en = PARM_GET(p->width, p->shift, reg);
-
- return en;
-}
-
const struct clk_ops meson_clk_mpll_ro_ops = {
.recalc_rate = mpll_recalc_rate,
.round_rate = mpll_round_rate,
- .is_enabled = mpll_is_enabled,
};
const struct clk_ops meson_clk_mpll_ops = {
.recalc_rate = mpll_recalc_rate,
.round_rate = mpll_round_rate,
.set_rate = mpll_set_rate,
- .enable = mpll_enable,
- .disable = mpll_disable,
- .is_enabled = mpll_is_enabled,
};
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index 01341553f50b..65a7bd903551 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -2,6 +2,9 @@
* Copyright (c) 2015 Endless Mobile, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
*
+ * Copyright (c) 2018 Baylibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -27,13 +30,14 @@
* | |
* FREF VCO
*
- * out = (in * M / N) >> OD
+ * out = in * (m + frac / frac_max) / (n << sum(ods))
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/slab.h>
@@ -41,209 +45,213 @@
#include "clkc.h"
-#define MESON_PLL_RESET BIT(29)
-#define MESON_PLL_LOCK BIT(31)
-
-#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw)
-
-static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static inline struct meson_clk_pll_data *
+meson_clk_pll_data(struct clk_regmap *clk)
{
- struct meson_clk_pll *pll = to_meson_clk_pll(hw);
- struct parm *p;
- unsigned long parent_rate_mhz = parent_rate / 1000000;
- unsigned long rate_mhz;
- u16 n, m, frac = 0, od, od2 = 0;
- u32 reg;
-
- p = &pll->n;
- reg = readl(pll->base + p->reg_off);
- n = PARM_GET(p->width, p->shift, reg);
-
- p = &pll->m;
- reg = readl(pll->base + p->reg_off);
- m = PARM_GET(p->width, p->shift, reg);
-
- p = &pll->od;
- reg = readl(pll->base + p->reg_off);
- od = PARM_GET(p->width, p->shift, reg);
-
- p = &pll->od2;
- if (p->width) {
- reg = readl(pll->base + p->reg_off);
- od2 = PARM_GET(p->width, p->shift, reg);
- }
-
- p = &pll->frac;
- if (p->width) {
- reg = readl(pll->base + p->reg_off);
- frac = PARM_GET(p->width, p->shift, reg);
- rate_mhz = (parent_rate_mhz * m + \
- (parent_rate_mhz * frac >> 12)) * 2 / n;
- rate_mhz = rate_mhz >> od >> od2;
- } else
- rate_mhz = (parent_rate_mhz * m / n) >> od >> od2;
-
- return rate_mhz * 1000000;
+ return (struct meson_clk_pll_data *)clk->data;
}
-static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static unsigned long __pll_params_to_rate(unsigned long parent_rate,
+ const struct pll_rate_table *pllt,
+ u16 frac,
+ struct meson_clk_pll_data *pll)
{
- struct meson_clk_pll *pll = to_meson_clk_pll(hw);
- const struct pll_rate_table *rate_table = pll->rate_table;
- int i;
+ u64 rate = (u64)parent_rate * pllt->m;
+ unsigned int od = pllt->od + pllt->od2 + pllt->od3;
- for (i = 0; i < pll->rate_count; i++) {
- if (rate <= rate_table[i].rate)
- return rate_table[i].rate;
+ if (frac && MESON_PARM_APPLICABLE(&pll->frac)) {
+ u64 frac_rate = (u64)parent_rate * frac;
+
+ rate += DIV_ROUND_UP_ULL(frac_rate,
+ (1 << pll->frac.width));
}
- /* else return the smallest value */
- return rate_table[0].rate;
+ return DIV_ROUND_UP_ULL(rate, pllt->n << od);
}
-static const struct pll_rate_table *meson_clk_get_pll_settings(struct meson_clk_pll *pll,
- unsigned long rate)
+static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
- const struct pll_rate_table *rate_table = pll->rate_table;
- int i;
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+ struct pll_rate_table pllt;
+ u16 frac;
- for (i = 0; i < pll->rate_count; i++) {
- if (rate == rate_table[i].rate)
- return &rate_table[i];
- }
- return NULL;
+ pllt.n = meson_parm_read(clk->map, &pll->n);
+ pllt.m = meson_parm_read(clk->map, &pll->m);
+ pllt.od = meson_parm_read(clk->map, &pll->od);
+
+ pllt.od2 = MESON_PARM_APPLICABLE(&pll->od2) ?
+ meson_parm_read(clk->map, &pll->od2) :
+ 0;
+
+ pllt.od3 = MESON_PARM_APPLICABLE(&pll->od3) ?
+ meson_parm_read(clk->map, &pll->od3) :
+ 0;
+
+ frac = MESON_PARM_APPLICABLE(&pll->frac) ?
+ meson_parm_read(clk->map, &pll->frac) :
+ 0;
+
+ return __pll_params_to_rate(parent_rate, &pllt, frac, pll);
}
-/* Specific wait loop for GXL/GXM GP0 PLL */
-static int meson_clk_pll_wait_lock_reset(struct meson_clk_pll *pll,
- struct parm *p_n)
+static u16 __pll_params_with_frac(unsigned long rate,
+ unsigned long parent_rate,
+ const struct pll_rate_table *pllt,
+ struct meson_clk_pll_data *pll)
{
- int delay = 100;
- u32 reg;
+ u16 frac_max = (1 << pll->frac.width);
+ u64 val = (u64)rate * pllt->n;
- while (delay > 0) {
- reg = readl(pll->base + p_n->reg_off);
- writel(reg | MESON_PLL_RESET, pll->base + p_n->reg_off);
- udelay(10);
- writel(reg & ~MESON_PLL_RESET, pll->base + p_n->reg_off);
+ val <<= pllt->od + pllt->od2 + pllt->od3;
- /* This delay comes from AMLogic tree clk-gp0-gxl driver */
- mdelay(1);
+ if (pll->flags & CLK_MESON_PLL_ROUND_CLOSEST)
+ val = DIV_ROUND_CLOSEST_ULL(val * frac_max, parent_rate);
+ else
+ val = div_u64(val * frac_max, parent_rate);
- reg = readl(pll->base + p_n->reg_off);
- if (reg & MESON_PLL_LOCK)
- return 0;
- delay--;
+ val -= pllt->m * frac_max;
+
+ return min((u16)val, (u16)(frac_max - 1));
+}
+
+static const struct pll_rate_table *
+meson_clk_get_pll_settings(unsigned long rate,
+ struct meson_clk_pll_data *pll)
+{
+ const struct pll_rate_table *table = pll->table;
+ unsigned int i = 0;
+
+ if (!table)
+ return NULL;
+
+ /* Find the first table element exceeding rate */
+ while (table[i].rate && table[i].rate <= rate)
+ i++;
+
+ if (i != 0) {
+ if (MESON_PARM_APPLICABLE(&pll->frac) ||
+ !(pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) ||
+ (abs(rate - table[i - 1].rate) <
+ abs(rate - table[i].rate)))
+ i--;
}
- return -ETIMEDOUT;
+
+ return (struct pll_rate_table *)&table[i];
}
-static int meson_clk_pll_wait_lock(struct meson_clk_pll *pll,
- struct parm *p_n)
+static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
{
- int delay = 24000000;
- u32 reg;
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+ const struct pll_rate_table *pllt =
+ meson_clk_get_pll_settings(rate, pll);
+ u16 frac;
+
+ if (!pllt)
+ return meson_clk_pll_recalc_rate(hw, *parent_rate);
+
+ if (!MESON_PARM_APPLICABLE(&pll->frac)
+ || rate == pllt->rate)
+ return pllt->rate;
+
+ /*
+ * The rate provided by the setting is not an exact match, let's
+ * try to improve the result using the fractional parameter
+ */
+ frac = __pll_params_with_frac(rate, *parent_rate, pllt, pll);
+
+ return __pll_params_to_rate(*parent_rate, pllt, frac, pll);
+}
- while (delay > 0) {
- reg = readl(pll->base + p_n->reg_off);
+static int meson_clk_pll_wait_lock(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+ int delay = 24000000;
- if (reg & MESON_PLL_LOCK)
+ do {
+ /* Is the clock locked now ? */
+ if (meson_parm_read(clk->map, &pll->l))
return 0;
+
delay--;
- }
+ } while (delay > 0);
+
return -ETIMEDOUT;
}
-static void meson_clk_pll_init_params(struct meson_clk_pll *pll)
+static void meson_clk_pll_init(struct clk_hw *hw)
{
- int i;
-
- for (i = 0 ; i < pll->params.params_count ; ++i)
- writel(pll->params.params_table[i].value,
- pll->base + pll->params.params_table[i].reg_off);
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+
+ if (pll->init_count) {
+ meson_parm_write(clk->map, &pll->rst, 1);
+ regmap_multi_reg_write(clk->map, pll->init_regs,
+ pll->init_count);
+ meson_parm_write(clk->map, &pll->rst, 0);
+ }
}
static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- struct meson_clk_pll *pll = to_meson_clk_pll(hw);
- struct parm *p;
- const struct pll_rate_table *rate_set;
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+ const struct pll_rate_table *pllt;
unsigned long old_rate;
- int ret = 0;
- u32 reg;
+ u16 frac = 0;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
old_rate = rate;
- rate_set = meson_clk_get_pll_settings(pll, rate);
- if (!rate_set)
+ pllt = meson_clk_get_pll_settings(rate, pll);
+ if (!pllt)
return -EINVAL;
- /* Initialize the PLL in a clean state if specified */
- if (pll->params.params_count)
- meson_clk_pll_init_params(pll);
-
- /* PLL reset */
- p = &pll->n;
- reg = readl(pll->base + p->reg_off);
- /* If no_init_reset is provided, avoid resetting at this point */
- if (!pll->params.no_init_reset)
- writel(reg | MESON_PLL_RESET, pll->base + p->reg_off);
-
- reg = PARM_SET(p->width, p->shift, reg, rate_set->n);
- writel(reg, pll->base + p->reg_off);
-
- p = &pll->m;
- reg = readl(pll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, rate_set->m);
- writel(reg, pll->base + p->reg_off);
-
- p = &pll->od;
- reg = readl(pll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, rate_set->od);
- writel(reg, pll->base + p->reg_off);
-
- p = &pll->od2;
- if (p->width) {
- reg = readl(pll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, rate_set->od2);
- writel(reg, pll->base + p->reg_off);
- }
+ /* Put the pll in reset to write the params */
+ meson_parm_write(clk->map, &pll->rst, 1);
- p = &pll->frac;
- if (p->width) {
- reg = readl(pll->base + p->reg_off);
- reg = PARM_SET(p->width, p->shift, reg, rate_set->frac);
- writel(reg, pll->base + p->reg_off);
- }
+ meson_parm_write(clk->map, &pll->n, pllt->n);
+ meson_parm_write(clk->map, &pll->m, pllt->m);
+ meson_parm_write(clk->map, &pll->od, pllt->od);
+
+ if (MESON_PARM_APPLICABLE(&pll->od2))
+ meson_parm_write(clk->map, &pll->od2, pllt->od2);
+
+ if (MESON_PARM_APPLICABLE(&pll->od3))
+ meson_parm_write(clk->map, &pll->od3, pllt->od3);
- p = &pll->n;
- /* If clear_reset_for_lock is provided, remove the reset bit here */
- if (pll->params.clear_reset_for_lock) {
- reg = readl(pll->base + p->reg_off);
- writel(reg & ~MESON_PLL_RESET, pll->base + p->reg_off);
+ if (MESON_PARM_APPLICABLE(&pll->frac)) {
+ frac = __pll_params_with_frac(rate, parent_rate, pllt, pll);
+ meson_parm_write(clk->map, &pll->frac, frac);
}
- /* If reset_lock_loop, use a special loop including resetting */
- if (pll->params.reset_lock_loop)
- ret = meson_clk_pll_wait_lock_reset(pll, p);
- else
- ret = meson_clk_pll_wait_lock(pll, p);
- if (ret) {
+ /* make sure the reset is cleared at this point */
+ meson_parm_write(clk->map, &pll->rst, 0);
+
+ if (meson_clk_pll_wait_lock(hw)) {
pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
__func__, old_rate);
+ /*
+ * FIXME: Do we really need/want this HACK ?
+ * It looks unsafe. what happens if the clock gets into a
+ * broken state and we can't lock back on the old_rate ? Looks
+ * like an infinite recursion is possible
+ */
meson_clk_pll_set_rate(hw, old_rate, parent_rate);
}
- return ret;
+ return 0;
}
const struct clk_ops meson_clk_pll_ops = {
+ .init = meson_clk_pll_init,
.recalc_rate = meson_clk_pll_recalc_rate,
.round_rate = meson_clk_pll_round_rate,
.set_rate = meson_clk_pll_set_rate,
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
new file mode 100644
index 000000000000..3645fdb62343
--- /dev/null
+++ b/drivers/clk/meson/clk-regmap.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include "clk-regmap.h"
+
+static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_gate_data *gate = clk_get_regmap_gate_data(clk);
+ int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
+
+ set ^= enable;
+
+ return regmap_update_bits(clk->map, gate->offset, BIT(gate->bit_idx),
+ set ? BIT(gate->bit_idx) : 0);
+}
+
+static int clk_regmap_gate_enable(struct clk_hw *hw)
+{
+ return clk_regmap_gate_endisable(hw, 1);
+}
+
+static void clk_regmap_gate_disable(struct clk_hw *hw)
+{
+ clk_regmap_gate_endisable(hw, 0);
+}
+
+static int clk_regmap_gate_is_enabled(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_gate_data *gate = clk_get_regmap_gate_data(clk);
+ unsigned int val;
+
+ regmap_read(clk->map, gate->offset, &val);
+ if (gate->flags & CLK_GATE_SET_TO_DISABLE)
+ val ^= BIT(gate->bit_idx);
+
+ val &= BIT(gate->bit_idx);
+
+ return val ? 1 : 0;
+}
+
+const struct clk_ops clk_regmap_gate_ops = {
+ .enable = clk_regmap_gate_enable,
+ .disable = clk_regmap_gate_disable,
+ .is_enabled = clk_regmap_gate_is_enabled,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_gate_ops);
+
+static unsigned long clk_regmap_div_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_div_data *div = clk_get_regmap_div_data(clk);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(clk->map, div->offset, &val);
+ if (ret)
+ /* Gives a hint that something is wrong */
+ return 0;
+
+ val >>= div->shift;
+ val &= clk_div_mask(div->width);
+ return divider_recalc_rate(hw, prate, val, div->table, div->flags,
+ div->width);
+}
+
+static long clk_regmap_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_div_data *div = clk_get_regmap_div_data(clk);
+ unsigned int val;
+ int ret;
+
+ /* if read only, just return current value */
+ if (div->flags & CLK_DIVIDER_READ_ONLY) {
+ ret = regmap_read(clk->map, div->offset, &val);
+ if (ret)
+ /* Gives a hint that something is wrong */
+ return 0;
+
+ val >>= div->shift;
+ val &= clk_div_mask(div->width);
+
+ return divider_ro_round_rate(hw, rate, prate, div->table,
+ div->width, div->flags, val);
+ }
+
+ return divider_round_rate(hw, rate, prate, div->table, div->width,
+ div->flags);
+}
+
+static int clk_regmap_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_div_data *div = clk_get_regmap_div_data(clk);
+ unsigned int val;
+ int ret;
+
+ ret = divider_get_val(rate, parent_rate, div->table, div->width,
+ div->flags);
+ if (ret < 0)
+ return ret;
+
+ val = (unsigned int)ret << div->shift;
+ return regmap_update_bits(clk->map, div->offset,
+ clk_div_mask(div->width) << div->shift, val);
+};
+
+/* Would prefer clk_regmap_div_ro_ops but clashes with qcom */
+
+const struct clk_ops clk_regmap_divider_ops = {
+ .recalc_rate = clk_regmap_div_recalc_rate,
+ .round_rate = clk_regmap_div_round_rate,
+ .set_rate = clk_regmap_div_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_divider_ops);
+
+const struct clk_ops clk_regmap_divider_ro_ops = {
+ .recalc_rate = clk_regmap_div_recalc_rate,
+ .round_rate = clk_regmap_div_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_divider_ro_ops);
+
+static u8 clk_regmap_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_mux_data *mux = clk_get_regmap_mux_data(clk);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(clk->map, mux->offset, &val);
+ if (ret)
+ return ret;
+
+ val >>= mux->shift;
+ val &= mux->mask;
+ return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
+}
+
+static int clk_regmap_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct clk_regmap_mux_data *mux = clk_get_regmap_mux_data(clk);
+ unsigned int val = clk_mux_index_to_val(mux->table, mux->flags, index);
+
+ return regmap_update_bits(clk->map, mux->offset,
+ mux->mask << mux->shift,
+ val << mux->shift);
+}
+
+const struct clk_ops clk_regmap_mux_ops = {
+ .get_parent = clk_regmap_mux_get_parent,
+ .set_parent = clk_regmap_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_mux_ops);
+
+const struct clk_ops clk_regmap_mux_ro_ops = {
+ .get_parent = clk_regmap_mux_get_parent,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops);
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
new file mode 100644
index 000000000000..627c888026d7
--- /dev/null
+++ b/drivers/clk/meson/clk-regmap.h
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#ifndef __CLK_REGMAP_H
+#define __CLK_REGMAP_H
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+/**
+ * struct clk_regmap - regmap backed clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @map: pointer to the regmap structure controlling the clock
+ * @data: data specific to the clock type
+ *
+ * Clock which is controlled by regmap backed registers. The actual type of
+ * of the clock is controlled by the clock_ops and data.
+ */
+struct clk_regmap {
+ struct clk_hw hw;
+ struct regmap *map;
+ void *data;
+};
+
+#define to_clk_regmap(_hw) container_of(_hw, struct clk_regmap, hw)
+
+/**
+ * struct clk_regmap_gate_data - regmap backed gate specific data
+ *
+ * @offset: offset of the register controlling gate
+ * @bit_idx: single bit controlling gate
+ * @flags: hardware-specific flags
+ *
+ * Flags:
+ * Same as clk_gate except CLK_GATE_HIWORD_MASK which is ignored
+ */
+struct clk_regmap_gate_data {
+ unsigned int offset;
+ u8 bit_idx;
+ u8 flags;
+};
+
+static inline struct clk_regmap_gate_data *
+clk_get_regmap_gate_data(struct clk_regmap *clk)
+{
+ return (struct clk_regmap_gate_data *)clk->data;
+}
+
+extern const struct clk_ops clk_regmap_gate_ops;
+
+/**
+ * struct clk_regmap_div_data - regmap backed adjustable divider specific data
+ *
+ * @offset: offset of the register controlling the divider
+ * @shift: shift to the divider bit field
+ * @width: width of the divider bit field
+ * @table: array of value/divider pairs, last entry should have div = 0
+ *
+ * Flags:
+ * Same as clk_divider except CLK_DIVIDER_HIWORD_MASK which is ignored
+ */
+struct clk_regmap_div_data {
+ unsigned int offset;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ const struct clk_div_table *table;
+};
+
+static inline struct clk_regmap_div_data *
+clk_get_regmap_div_data(struct clk_regmap *clk)
+{
+ return (struct clk_regmap_div_data *)clk->data;
+}
+
+extern const struct clk_ops clk_regmap_divider_ops;
+extern const struct clk_ops clk_regmap_divider_ro_ops;
+
+/**
+ * struct clk_regmap_mux_data - regmap backed multiplexer clock specific data
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @offset: offset of theregister controlling multiplexer
+ * @table: array of parent indexed register values
+ * @shift: shift to multiplexer bit field
+ * @mask: mask of mutliplexer bit field
+ * @flags: hardware-specific flags
+ *
+ * Flags:
+ * Same as clk_divider except CLK_MUX_HIWORD_MASK which is ignored
+ */
+struct clk_regmap_mux_data {
+ unsigned int offset;
+ u32 *table;
+ u32 mask;
+ u8 shift;
+ u8 flags;
+};
+
+static inline struct clk_regmap_mux_data *
+clk_get_regmap_mux_data(struct clk_regmap *clk)
+{
+ return (struct clk_regmap_mux_data *)clk->data;
+}
+
+extern const struct clk_ops clk_regmap_mux_ops;
+extern const struct clk_ops clk_regmap_mux_ro_ops;
+
+#endif /* __CLK_REGMAP_H */
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index c2ff0520ce53..8fe73c4edca8 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -18,6 +18,9 @@
#ifndef __CLKC_H
#define __CLKC_H
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
#define PMASK(width) GENMASK(width - 1, 0)
#define SETPMASK(width, shift) GENMASK(shift + width - 1, shift)
#define CLRPMASK(width, shift) (~SETPMASK(width, shift))
@@ -35,13 +38,29 @@ struct parm {
u8 width;
};
+static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p)
+{
+ unsigned int val;
+
+ regmap_read(map, p->reg_off, &val);
+ return PARM_GET(p->width, p->shift, val);
+}
+
+static inline void meson_parm_write(struct regmap *map, struct parm *p,
+ unsigned int val)
+{
+ regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift),
+ val << p->shift);
+}
+
+
struct pll_rate_table {
unsigned long rate;
u16 m;
u16 n;
u16 od;
u16 od2;
- u16 frac;
+ u16 od3;
};
#define PLL_RATE(_r, _m, _n, _od) \
@@ -50,97 +69,53 @@ struct pll_rate_table {
.m = (_m), \
.n = (_n), \
.od = (_od), \
- } \
-
-#define PLL_FRAC_RATE(_r, _m, _n, _od, _od2, _frac) \
- { \
- .rate = (_r), \
- .m = (_m), \
- .n = (_n), \
- .od = (_od), \
- .od2 = (_od2), \
- .frac = (_frac), \
- } \
-
-struct pll_params_table {
- unsigned int reg_off;
- unsigned int value;
-};
-
-#define PLL_PARAM(_reg, _val) \
- { \
- .reg_off = (_reg), \
- .value = (_val), \
}
-struct pll_setup_params {
- struct pll_params_table *params_table;
- unsigned int params_count;
- /* Workaround for GP0, do not reset before configuring */
- bool no_init_reset;
- /* Workaround for GP0, unreset right before checking for lock */
- bool clear_reset_for_lock;
- /* Workaround for GXL GP0, reset in the lock checking loop */
- bool reset_lock_loop;
-};
+#define CLK_MESON_PLL_ROUND_CLOSEST BIT(0)
-struct meson_clk_pll {
- struct clk_hw hw;
- void __iomem *base;
+struct meson_clk_pll_data {
struct parm m;
struct parm n;
struct parm frac;
struct parm od;
struct parm od2;
- const struct pll_setup_params params;
- const struct pll_rate_table *rate_table;
- unsigned int rate_count;
- spinlock_t *lock;
+ struct parm od3;
+ struct parm l;
+ struct parm rst;
+ const struct reg_sequence *init_regs;
+ unsigned int init_count;
+ const struct pll_rate_table *table;
+ u8 flags;
};
#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw)
-struct meson_clk_cpu {
- struct clk_hw hw;
- void __iomem *base;
- u16 reg_off;
- struct notifier_block clk_nb;
- const struct clk_div_table *div_table;
-};
-
-int meson_clk_cpu_notifier_cb(struct notifier_block *nb, unsigned long event,
- void *data);
-
-struct meson_clk_mpll {
- struct clk_hw hw;
- void __iomem *base;
+struct meson_clk_mpll_data {
struct parm sdm;
struct parm sdm_en;
struct parm n2;
- struct parm en;
struct parm ssen;
+ struct parm misc;
spinlock_t *lock;
};
-struct meson_clk_audio_divider {
- struct clk_hw hw;
- void __iomem *base;
+struct meson_clk_audio_div_data {
struct parm div;
u8 flags;
- spinlock_t *lock;
};
#define MESON_GATE(_name, _reg, _bit) \
-struct clk_gate _name = { \
- .reg = (void __iomem *) _reg, \
- .bit_idx = (_bit), \
- .lock = &meson_clk_lock, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = &clk_gate_ops, \
+struct clk_regmap _name = { \
+ .data = &(struct clk_regmap_gate_data){ \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &clk_regmap_gate_ops, \
.parent_names = (const char *[]){ "clk81" }, \
.num_parents = 1, \
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
+ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
}, \
};
diff --git a/drivers/clk/meson/gxbb-aoclk-regmap.c b/drivers/clk/meson/gxbb-aoclk-regmap.c
deleted file mode 100644
index 2515fbfa0467..000000000000
--- a/drivers/clk/meson/gxbb-aoclk-regmap.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2017 BayLibre, SAS.
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * SPDX-License-Identifier: GPL-2.0+
- */
-
-#include <linux/clk-provider.h>
-#include <linux/bitfield.h>
-#include <linux/regmap.h>
-#include "gxbb-aoclk.h"
-
-static int aoclk_gate_regmap_enable(struct clk_hw *hw)
-{
- struct aoclk_gate_regmap *gate = to_aoclk_gate_regmap(hw);
-
- return regmap_update_bits(gate->regmap, AO_RTI_GEN_CNTL_REG0,
- BIT(gate->bit_idx), BIT(gate->bit_idx));
-}
-
-static void aoclk_gate_regmap_disable(struct clk_hw *hw)
-{
- struct aoclk_gate_regmap *gate = to_aoclk_gate_regmap(hw);
-
- regmap_update_bits(gate->regmap, AO_RTI_GEN_CNTL_REG0,
- BIT(gate->bit_idx), 0);
-}
-
-static int aoclk_gate_regmap_is_enabled(struct clk_hw *hw)
-{
- struct aoclk_gate_regmap *gate = to_aoclk_gate_regmap(hw);
- unsigned int val;
- int ret;
-
- ret = regmap_read(gate->regmap, AO_RTI_GEN_CNTL_REG0, &val);
- if (ret)
- return ret;
-
- return (val & BIT(gate->bit_idx)) != 0;
-}
-
-const struct clk_ops meson_aoclk_gate_regmap_ops = {
- .enable = aoclk_gate_regmap_enable,
- .disable = aoclk_gate_regmap_disable,
- .is_enabled = aoclk_gate_regmap_is_enabled,
-};
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 6c161e0a8e59..9ec23ae9a219 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -62,10 +62,9 @@
#include <linux/delay.h>
#include <dt-bindings/clock/gxbb-aoclkc.h>
#include <dt-bindings/reset/gxbb-aoclkc.h>
+#include "clk-regmap.h"
#include "gxbb-aoclk.h"
-static DEFINE_SPINLOCK(gxbb_aoclk_lock);
-
struct gxbb_aoclk_reset_controller {
struct reset_controller_dev reset;
unsigned int *data;
@@ -87,12 +86,14 @@ static const struct reset_control_ops gxbb_aoclk_reset_ops = {
};
#define GXBB_AO_GATE(_name, _bit) \
-static struct aoclk_gate_regmap _name##_ao = { \
- .bit_idx = (_bit), \
- .lock = &gxbb_aoclk_lock, \
+static struct clk_regmap _name##_ao = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = AO_RTI_GEN_CNTL_REG0, \
+ .bit_idx = (_bit), \
+ }, \
.hw.init = &(struct clk_init_data) { \
.name = #_name "_ao", \
- .ops = &meson_aoclk_gate_regmap_ops, \
+ .ops = &clk_regmap_gate_ops, \
.parent_names = (const char *[]){ "clk81" }, \
.num_parents = 1, \
.flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
@@ -107,7 +108,6 @@ GXBB_AO_GATE(uart2, 5);
GXBB_AO_GATE(ir_blaster, 6);
static struct aoclk_cec_32k cec_32k_ao = {
- .lock = &gxbb_aoclk_lock,
.hw.init = &(struct clk_init_data) {
.name = "cec_32k_ao",
.ops = &meson_aoclk_cec_32k_ops,
@@ -126,7 +126,7 @@ static unsigned int gxbb_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct aoclk_gate_regmap *gxbb_aoclk_gate[] = {
+static struct clk_regmap *gxbb_aoclk_gate[] = {
[CLKID_AO_REMOTE] = &remote_ao,
[CLKID_AO_I2C_MASTER] = &i2c_master_ao,
[CLKID_AO_I2C_SLAVE] = &i2c_slave_ao,
@@ -177,10 +177,10 @@ static int gxbb_aoclkc_probe(struct platform_device *pdev)
* Populate regmap and register all clks
*/
for (clkid = 0; clkid < ARRAY_SIZE(gxbb_aoclk_gate); clkid++) {
- gxbb_aoclk_gate[clkid]->regmap = regmap;
+ gxbb_aoclk_gate[clkid]->map = regmap;
ret = devm_clk_hw_register(dev,
- gxbb_aoclk_onecell_data.hws[clkid]);
+ gxbb_aoclk_onecell_data.hws[clkid]);
if (ret)
return ret;
}
diff --git a/drivers/clk/meson/gxbb-aoclk.h b/drivers/clk/meson/gxbb-aoclk.h
index e8604c8f7eee..0be78383f257 100644
--- a/drivers/clk/meson/gxbb-aoclk.h
+++ b/drivers/clk/meson/gxbb-aoclk.h
@@ -17,22 +17,11 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
-struct aoclk_gate_regmap {
- struct clk_hw hw;
- unsigned bit_idx;
- struct regmap *regmap;
- spinlock_t *lock;
-};
-
-#define to_aoclk_gate_regmap(_hw) \
- container_of(_hw, struct aoclk_gate_regmap, hw)
-
extern const struct clk_ops meson_aoclk_gate_regmap_ops;
struct aoclk_cec_32k {
struct clk_hw hw;
struct regmap *regmap;
- spinlock_t *lock;
};
#define to_aoclk_cec_32k(_hw) container_of(_hw, struct aoclk_cec_32k, hw)
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index af24455af5b4..b1e4d9557610 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -19,108 +19,19 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
+#include <linux/regmap.h>
#include "clkc.h"
#include "gxbb.h"
+#include "clk-regmap.h"
static DEFINE_SPINLOCK(meson_clk_lock);
-static const struct pll_rate_table sys_pll_rate_table[] = {
- PLL_RATE(24000000, 56, 1, 2),
- PLL_RATE(48000000, 64, 1, 2),
- PLL_RATE(72000000, 72, 1, 2),
- PLL_RATE(96000000, 64, 1, 2),
- PLL_RATE(120000000, 80, 1, 2),
- PLL_RATE(144000000, 96, 1, 2),
- PLL_RATE(168000000, 56, 1, 1),
- PLL_RATE(192000000, 64, 1, 1),
- PLL_RATE(216000000, 72, 1, 1),
- PLL_RATE(240000000, 80, 1, 1),
- PLL_RATE(264000000, 88, 1, 1),
- PLL_RATE(288000000, 96, 1, 1),
- PLL_RATE(312000000, 52, 1, 2),
- PLL_RATE(336000000, 56, 1, 2),
- PLL_RATE(360000000, 60, 1, 2),
- PLL_RATE(384000000, 64, 1, 2),
- PLL_RATE(408000000, 68, 1, 2),
- PLL_RATE(432000000, 72, 1, 2),
- PLL_RATE(456000000, 76, 1, 2),
- PLL_RATE(480000000, 80, 1, 2),
- PLL_RATE(504000000, 84, 1, 2),
- PLL_RATE(528000000, 88, 1, 2),
- PLL_RATE(552000000, 92, 1, 2),
- PLL_RATE(576000000, 96, 1, 2),
- PLL_RATE(600000000, 50, 1, 1),
- PLL_RATE(624000000, 52, 1, 1),
- PLL_RATE(648000000, 54, 1, 1),
- PLL_RATE(672000000, 56, 1, 1),
- PLL_RATE(696000000, 58, 1, 1),
- PLL_RATE(720000000, 60, 1, 1),
- PLL_RATE(744000000, 62, 1, 1),
- PLL_RATE(768000000, 64, 1, 1),
- PLL_RATE(792000000, 66, 1, 1),
- PLL_RATE(816000000, 68, 1, 1),
- PLL_RATE(840000000, 70, 1, 1),
- PLL_RATE(864000000, 72, 1, 1),
- PLL_RATE(888000000, 74, 1, 1),
- PLL_RATE(912000000, 76, 1, 1),
- PLL_RATE(936000000, 78, 1, 1),
- PLL_RATE(960000000, 80, 1, 1),
- PLL_RATE(984000000, 82, 1, 1),
- PLL_RATE(1008000000, 84, 1, 1),
- PLL_RATE(1032000000, 86, 1, 1),
- PLL_RATE(1056000000, 88, 1, 1),
- PLL_RATE(1080000000, 90, 1, 1),
- PLL_RATE(1104000000, 92, 1, 1),
- PLL_RATE(1128000000, 94, 1, 1),
- PLL_RATE(1152000000, 96, 1, 1),
- PLL_RATE(1176000000, 98, 1, 1),
- PLL_RATE(1200000000, 50, 1, 0),
- PLL_RATE(1224000000, 51, 1, 0),
- PLL_RATE(1248000000, 52, 1, 0),
- PLL_RATE(1272000000, 53, 1, 0),
- PLL_RATE(1296000000, 54, 1, 0),
- PLL_RATE(1320000000, 55, 1, 0),
- PLL_RATE(1344000000, 56, 1, 0),
- PLL_RATE(1368000000, 57, 1, 0),
- PLL_RATE(1392000000, 58, 1, 0),
- PLL_RATE(1416000000, 59, 1, 0),
- PLL_RATE(1440000000, 60, 1, 0),
- PLL_RATE(1464000000, 61, 1, 0),
- PLL_RATE(1488000000, 62, 1, 0),
- PLL_RATE(1512000000, 63, 1, 0),
- PLL_RATE(1536000000, 64, 1, 0),
- PLL_RATE(1560000000, 65, 1, 0),
- PLL_RATE(1584000000, 66, 1, 0),
- PLL_RATE(1608000000, 67, 1, 0),
- PLL_RATE(1632000000, 68, 1, 0),
- PLL_RATE(1656000000, 68, 1, 0),
- PLL_RATE(1680000000, 68, 1, 0),
- PLL_RATE(1704000000, 68, 1, 0),
- PLL_RATE(1728000000, 69, 1, 0),
- PLL_RATE(1752000000, 69, 1, 0),
- PLL_RATE(1776000000, 69, 1, 0),
- PLL_RATE(1800000000, 69, 1, 0),
- PLL_RATE(1824000000, 70, 1, 0),
- PLL_RATE(1848000000, 70, 1, 0),
- PLL_RATE(1872000000, 70, 1, 0),
- PLL_RATE(1896000000, 70, 1, 0),
- PLL_RATE(1920000000, 71, 1, 0),
- PLL_RATE(1944000000, 71, 1, 0),
- PLL_RATE(1968000000, 71, 1, 0),
- PLL_RATE(1992000000, 71, 1, 0),
- PLL_RATE(2016000000, 72, 1, 0),
- PLL_RATE(2040000000, 72, 1, 0),
- PLL_RATE(2064000000, 72, 1, 0),
- PLL_RATE(2088000000, 72, 1, 0),
- PLL_RATE(2112000000, 73, 1, 0),
- { /* sentinel */ },
-};
-
static const struct pll_rate_table gxbb_gp0_pll_rate_table[] = {
PLL_RATE(96000000, 32, 1, 3),
PLL_RATE(99000000, 33, 1, 3),
@@ -278,23 +189,39 @@ static const struct pll_rate_table gxl_gp0_pll_rate_table[] = {
{ /* sentinel */ },
};
-static struct meson_clk_pll gxbb_fixed_pll = {
- .m = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 0,
- .width = 9,
+static struct clk_regmap gxbb_fixed_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_MPLL_CNTL2,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .n = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 16,
- .width = 2,
- },
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -304,33 +231,118 @@ static struct meson_clk_pll gxbb_fixed_pll = {
},
};
-static struct meson_clk_pll gxbb_hdmi_pll = {
- .m = {
- .reg_off = HHI_HDMI_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_HDMI_PLL_CNTL,
- .shift = 9,
- .width = 5,
+static struct clk_fixed_factor gxbb_hdmi_pll_pre_mult = {
+ .mult = 2,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_pre_mult",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
},
- .frac = {
- .reg_off = HHI_HDMI_PLL_CNTL2,
- .shift = 0,
- .width = 12,
+};
+
+static struct clk_regmap gxbb_hdmi_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HDMI_PLL_CNTL2,
+ .shift = 0,
+ .width = 12,
+ },
+ .od = {
+ .reg_off = HHI_HDMI_PLL_CNTL2,
+ .shift = 16,
+ .width = 2,
+ },
+ .od2 = {
+ .reg_off = HHI_HDMI_PLL_CNTL2,
+ .shift = 22,
+ .width = 2,
+ },
+ .od3 = {
+ .reg_off = HHI_HDMI_PLL_CNTL2,
+ .shift = 18,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 28,
+ .width = 1,
+ },
},
- .od = {
- .reg_off = HHI_HDMI_PLL_CNTL2,
- .shift = 16,
- .width = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_pre_mult" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
},
- .od2 = {
- .reg_off = HHI_HDMI_PLL_CNTL2,
- .shift = 22,
- .width = 2,
+};
+
+static struct clk_regmap gxl_hdmi_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .frac = {
+ /*
+ * On gxl, there is a register shift due to
+ * HHI_HDMI_PLL_CNTL1 which does not exist on gxbb,
+ * so we compute the register offset based on the PLL
+ * base to get it right
+ */
+ .reg_off = HHI_HDMI_PLL_CNTL + 4,
+ .shift = 0,
+ .width = 12,
+ },
+ .od = {
+ .reg_off = HHI_HDMI_PLL_CNTL + 8,
+ .shift = 21,
+ .width = 2,
+ },
+ .od2 = {
+ .reg_off = HHI_HDMI_PLL_CNTL + 8,
+ .shift = 23,
+ .width = 2,
+ },
+ .od3 = {
+ .reg_off = HHI_HDMI_PLL_CNTL + 8,
+ .shift = 19,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -340,25 +352,34 @@ static struct meson_clk_pll gxbb_hdmi_pll = {
},
};
-static struct meson_clk_pll gxbb_sys_pll = {
- .m = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 9,
- .width = 5,
+static struct clk_regmap gxbb_sys_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 10,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .od = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 10,
- .width = 2,
- },
- .rate_table = sys_pll_rate_table,
- .rate_count = ARRAY_SIZE(sys_pll_rate_table),
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -368,38 +389,44 @@ static struct meson_clk_pll gxbb_sys_pll = {
},
};
-struct pll_params_table gxbb_gp0_params_table[] = {
- PLL_PARAM(HHI_GP0_PLL_CNTL, 0x6a000228),
- PLL_PARAM(HHI_GP0_PLL_CNTL2, 0x69c80000),
- PLL_PARAM(HHI_GP0_PLL_CNTL3, 0x0a5590c4),
- PLL_PARAM(HHI_GP0_PLL_CNTL4, 0x0000500d),
-};
-
-static struct meson_clk_pll gxbb_gp0_pll = {
- .m = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
- .params = {
- .params_table = gxbb_gp0_params_table,
- .params_count = ARRAY_SIZE(gxbb_gp0_params_table),
- .no_init_reset = true,
- .clear_reset_for_lock = true,
+static const struct reg_sequence gxbb_gp0_init_regs[] = {
+ { .reg = HHI_GP0_PLL_CNTL2, .def = 0x69c80000 },
+ { .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a5590c4 },
+ { .reg = HHI_GP0_PLL_CNTL4, .def = 0x0000500d },
+ { .reg = HHI_GP0_PLL_CNTL, .def = 0x4a000228 },
+};
+
+static struct clk_regmap gxbb_gp0_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = gxbb_gp0_pll_rate_table,
+ .init_regs = gxbb_gp0_init_regs,
+ .init_count = ARRAY_SIZE(gxbb_gp0_init_regs),
},
- .rate_table = gxbb_gp0_pll_rate_table,
- .rate_count = ARRAY_SIZE(gxbb_gp0_pll_rate_table),
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "gp0_pll",
.ops = &meson_clk_pll_ops,
@@ -409,40 +436,51 @@ static struct meson_clk_pll gxbb_gp0_pll = {
},
};
-struct pll_params_table gxl_gp0_params_table[] = {
- PLL_PARAM(HHI_GP0_PLL_CNTL, 0x40010250),
- PLL_PARAM(HHI_GP0_PLL_CNTL1, 0xc084a000),
- PLL_PARAM(HHI_GP0_PLL_CNTL2, 0xb75020be),
- PLL_PARAM(HHI_GP0_PLL_CNTL3, 0x0a59a288),
- PLL_PARAM(HHI_GP0_PLL_CNTL4, 0xc000004d),
- PLL_PARAM(HHI_GP0_PLL_CNTL5, 0x00078000),
-};
-
-static struct meson_clk_pll gxl_gp0_pll = {
- .m = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_GP0_PLL_CNTL,
- .shift = 16,
- .width = 2,
+static const struct reg_sequence gxl_gp0_init_regs[] = {
+ { .reg = HHI_GP0_PLL_CNTL1, .def = 0xc084b000 },
+ { .reg = HHI_GP0_PLL_CNTL2, .def = 0xb75020be },
+ { .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a59a288 },
+ { .reg = HHI_GP0_PLL_CNTL4, .def = 0xc000004d },
+ { .reg = HHI_GP0_PLL_CNTL5, .def = 0x00078000 },
+ { .reg = HHI_GP0_PLL_CNTL, .def = 0x40010250 },
+};
+
+static struct clk_regmap gxl_gp0_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_GP0_PLL_CNTL1,
+ .shift = 0,
+ .width = 10,
+ },
+ .l = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = gxl_gp0_pll_rate_table,
+ .init_regs = gxl_gp0_init_regs,
+ .init_count = ARRAY_SIZE(gxl_gp0_init_regs),
},
- .params = {
- .params_table = gxl_gp0_params_table,
- .params_count = ARRAY_SIZE(gxl_gp0_params_table),
- .no_init_reset = true,
- .reset_lock_loop = true,
- },
- .rate_table = gxl_gp0_pll_rate_table,
- .rate_count = ARRAY_SIZE(gxl_gp0_pll_rate_table),
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "gp0_pll",
.ops = &meson_clk_pll_ops,
@@ -452,161 +490,267 @@ static struct meson_clk_pll gxl_gp0_pll = {
},
};
-static struct clk_fixed_factor gxbb_fclk_div2 = {
+static struct clk_fixed_factor gxbb_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div2",
+ .name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor gxbb_fclk_div3 = {
+static struct clk_regmap gxbb_fclk_div2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 27,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div2_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div3",
+ .name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor gxbb_fclk_div4 = {
+static struct clk_regmap gxbb_fclk_div3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 28,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div3_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div4",
+ .name = "fclk_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor gxbb_fclk_div5 = {
+static struct clk_regmap gxbb_fclk_div4 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 29,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div4_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div5",
+ .name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor gxbb_fclk_div7 = {
+static struct clk_regmap gxbb_fclk_div5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div5_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div7",
+ .name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll gxbb_mpll0 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 0,
- .width = 14,
+static struct clk_regmap gxbb_fclk_div7 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 31,
},
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 15,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div7_div" },
+ .num_parents = 1,
},
- .n2 = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 16,
- .width = 9,
+};
+
+static struct clk_regmap gxbb_mpll_prediv = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPLL_CNTL5,
+ .shift = 12,
+ .width = 1,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 14,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_prediv",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
},
- .ssen = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 25,
- .width = 1,
+};
+
+static struct clk_regmap gxbb_mpll0_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 16,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 25,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll0",
+ .name = "mpll0_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll gxbb_mpll1 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 0,
- .width = 14,
- },
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 15,
- .width = 1,
+static struct clk_regmap gxbb_mpll0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL7,
+ .bit_idx = 14,
},
- .n2 = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 16,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 14,
- .width = 1,
+};
+
+static struct clk_regmap gxbb_mpll1_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll1",
+ .name = "mpll1_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll gxbb_mpll2 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 0,
- .width = 14,
- },
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 15,
- .width = 1,
+static struct clk_regmap gxbb_mpll1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL8,
+ .bit_idx = 14,
},
- .n2 = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 16,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 14,
- .width = 1,
+};
+
+static struct clk_regmap gxbb_mpll2_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll2",
+ .name = "mpll2_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-/*
- * FIXME The legacy composite clocks (e.g. clk81) are both PLL post-dividers
- * and should be modeled with their respective PLLs via the forthcoming
- * coordinated clock rates feature
- */
+static struct clk_regmap gxbb_mpll2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL9,
+ .bit_idx = 14,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
static const char * const clk81_parent_names[] = {
@@ -614,16 +758,16 @@ static const char * const clk81_parent_names[] = {
"fclk_div3", "fclk_div5"
};
-static struct clk_mux gxbb_mpeg_clk_sel = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .mask = 0x7,
- .shift = 12,
- .flags = CLK_MUX_READ_ONLY,
- .table = mux_table_clk81,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mpeg_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 12,
+ .table = mux_table_clk81,
+ },
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_sel",
- .ops = &clk_mux_ro_ops,
+ .ops = &clk_regmap_mux_ro_ops,
/*
* bits 14:12 selects from 8 possible parents:
* xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
@@ -631,72 +775,75 @@ static struct clk_mux gxbb_mpeg_clk_sel = {
*/
.parent_names = clk81_parent_names,
.num_parents = ARRAY_SIZE(clk81_parent_names),
- .flags = (CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED),
},
};
-static struct clk_divider gxbb_mpeg_clk_div = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mpeg_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ro_ops,
.parent_names = (const char *[]){ "mpeg_clk_sel" },
.num_parents = 1,
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED),
},
};
-/* the mother of dragons^W gates */
-static struct clk_gate gxbb_clk81 = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .bit_idx = 7,
- .lock = &meson_clk_lock,
+/* the mother of dragons gates */
+static struct clk_regmap gxbb_clk81 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .bit_idx = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "clk81",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "mpeg_clk_div" },
.num_parents = 1,
- .flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
+ .flags = CLK_IS_CRITICAL,
},
};
-static struct clk_mux gxbb_sar_adc_clk_sel = {
- .reg = (void *)HHI_SAR_CLK_CNTL,
- .mask = 0x3,
- .shift = 9,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sar_adc_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SAR_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data){
.name = "sar_adc_clk_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/* NOTE: The datasheet doesn't list the parents for bit 10 */
.parent_names = (const char *[]){ "xtal", "clk81", },
.num_parents = 2,
},
};
-static struct clk_divider gxbb_sar_adc_clk_div = {
- .reg = (void *)HHI_SAR_CLK_CNTL,
- .shift = 0,
- .width = 8,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sar_adc_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SAR_CLK_CNTL,
+ .shift = 0,
+ .width = 8,
+ },
.hw.init = &(struct clk_init_data){
.name = "sar_adc_clk_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sar_adc_clk_sel" },
.num_parents = 1,
},
};
-static struct clk_gate gxbb_sar_adc_clk = {
- .reg = (void *)HHI_SAR_CLK_CNTL,
- .bit_idx = 8,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sar_adc_clk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SAR_CLK_CNTL,
+ .bit_idx = 8,
+ },
.hw.init = &(struct clk_init_data){
.name = "sar_adc_clk",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "sar_adc_clk_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -708,21 +855,20 @@ static struct clk_gate gxbb_sar_adc_clk = {
* muxed by a glitch-free switch.
*/
-static u32 mux_table_mali_0_1[] = {0, 1, 2, 3, 4, 5, 6, 7};
static const char * const gxbb_mali_0_1_parent_names[] = {
"xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7",
"fclk_div4", "fclk_div3", "fclk_div5"
};
-static struct clk_mux gxbb_mali_0_sel = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .mask = 0x7,
- .shift = 9,
- .table = mux_table_mali_0_1,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bits 10:9 selects from 8 possible parents:
* xtal, gp0_pll, mpll2, mpll1, fclk_div7,
@@ -734,42 +880,44 @@ static struct clk_mux gxbb_mali_0_sel = {
},
};
-static struct clk_divider gxbb_mali_0_div = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali_0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "mali_0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
-static struct clk_gate gxbb_mali_0 = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .bit_idx = 8,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .bit_idx = 8,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali_0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "mali_0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_mux gxbb_mali_1_sel = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .mask = 0x7,
- .shift = 25,
- .table = mux_table_mali_0_1,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bits 10:9 selects from 8 possible parents:
* xtal, gp0_pll, mpll2, mpll1, fclk_div7,
@@ -781,77 +929,79 @@ static struct clk_mux gxbb_mali_1_sel = {
},
};
-static struct clk_divider gxbb_mali_1_div = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .shift = 16,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali_1_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "mali_1_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
-static struct clk_gate gxbb_mali_1 = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .bit_idx = 24,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .bit_idx = 24,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali_1",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "mali_1_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static u32 mux_table_mali[] = {0, 1};
static const char * const gxbb_mali_parent_names[] = {
"mali_0", "mali_1"
};
-static struct clk_mux gxbb_mali = {
- .reg = (void *)HHI_MALI_CLK_CNTL,
- .mask = 1,
- .shift = 31,
- .table = mux_table_mali,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_mali = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MALI_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
.hw.init = &(struct clk_init_data){
.name = "mali",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = gxbb_mali_parent_names,
.num_parents = 2,
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
-static struct clk_mux gxbb_cts_amclk_sel = {
- .reg = (void *) HHI_AUD_CLK_CNTL,
- .mask = 0x3,
- .shift = 9,
- /* Default parent unknown (register reset value: 0) */
- .table = (u32[]){ 1, 2, 3 },
- .lock = &meson_clk_lock,
- .hw.init = &(struct clk_init_data){
+static struct clk_regmap gxbb_cts_amclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_AUD_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .table = (u32[]){ 1, 2, 3 },
+ },
+ .hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
.num_parents = 3,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct meson_clk_audio_divider gxbb_cts_amclk_div = {
- .div = {
- .reg_off = HHI_AUD_CLK_CNTL,
- .shift = 0,
- .width = 8,
+static struct clk_regmap gxbb_cts_amclk_div = {
+ .data = &(struct meson_clk_audio_div_data){
+ .div = {
+ .reg_off = HHI_AUD_CLK_CNTL,
+ .shift = 0,
+ .width = 8,
+ },
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
},
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_div",
.ops = &meson_clk_audio_divider_ops,
@@ -861,71 +1011,75 @@ static struct meson_clk_audio_divider gxbb_cts_amclk_div = {
},
};
-static struct clk_gate gxbb_cts_amclk = {
- .reg = (void *) HHI_AUD_CLK_CNTL,
- .bit_idx = 8,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_cts_amclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_AUD_CLK_CNTL,
+ .bit_idx = 8,
+ },
.hw.init = &(struct clk_init_data){
.name = "cts_amclk",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "cts_amclk_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_mux gxbb_cts_mclk_i958_sel = {
- .reg = (void *)HHI_AUD_CLK_CNTL2,
- .mask = 0x3,
- .shift = 25,
- /* Default parent unknown (register reset value: 0) */
- .table = (u32[]){ 1, 2, 3 },
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_cts_mclk_i958_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_AUD_CLK_CNTL2,
+ .mask = 0x3,
+ .shift = 25,
+ .table = (u32[]){ 1, 2, 3 },
+ },
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "mpll0", "mpll1", "mpll2" },
.num_parents = 3,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_divider gxbb_cts_mclk_i958_div = {
- .reg = (void *)HHI_AUD_CLK_CNTL2,
- .shift = 16,
- .width = 8,
- .lock = &meson_clk_lock,
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
+static struct clk_regmap gxbb_cts_mclk_i958_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_AUD_CLK_CNTL2,
+ .shift = 16,
+ .width = 8,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "cts_mclk_i958_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_cts_mclk_i958 = {
- .reg = (void *)HHI_AUD_CLK_CNTL2,
- .bit_idx = 24,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_cts_mclk_i958 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_AUD_CLK_CNTL2,
+ .bit_idx = 24,
+ },
.hw.init = &(struct clk_init_data){
.name = "cts_mclk_i958",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "cts_mclk_i958_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_mux gxbb_cts_i958 = {
- .reg = (void *)HHI_AUD_CLK_CNTL2,
- .mask = 0x1,
- .shift = 27,
- .lock = &meson_clk_lock,
- .hw.init = &(struct clk_init_data){
+static struct clk_regmap gxbb_cts_i958 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_AUD_CLK_CNTL2,
+ .mask = 0x1,
+ .shift = 27,
+ },
+ .hw.init = &(struct clk_init_data){
.name = "cts_i958",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "cts_amclk", "cts_mclk_i958" },
.num_parents = 2,
/*
@@ -936,27 +1090,29 @@ static struct clk_mux gxbb_cts_i958 = {
},
};
-static struct clk_divider gxbb_32k_clk_div = {
- .reg = (void *)HHI_32K_CLK_CNTL,
- .shift = 0,
- .width = 14,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_32k_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_32K_CLK_CNTL,
+ .shift = 0,
+ .width = 14,
+ },
.hw.init = &(struct clk_init_data){
.name = "32k_clk_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "32k_clk_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST,
},
};
-static struct clk_gate gxbb_32k_clk = {
- .reg = (void *)HHI_32K_CLK_CNTL,
- .bit_idx = 15,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_32k_clk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_32K_CLK_CNTL,
+ .bit_idx = 15,
+ },
.hw.init = &(struct clk_init_data){
.name = "32k_clk",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "32k_clk_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -967,14 +1123,15 @@ static const char * const gxbb_32k_clk_parent_names[] = {
"xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5"
};
-static struct clk_mux gxbb_32k_clk_sel = {
- .reg = (void *)HHI_32K_CLK_CNTL,
- .mask = 0x3,
- .shift = 16,
- .lock = &meson_clk_lock,
- .hw.init = &(struct clk_init_data){
+static struct clk_regmap gxbb_32k_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_32K_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
.name = "32k_clk_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = gxbb_32k_clk_parent_names,
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
@@ -993,42 +1150,45 @@ static const char * const gxbb_sd_emmc_clk0_parent_names[] = {
};
/* SDIO clock */
-static struct clk_mux gxbb_sd_emmc_a_clk0_sel = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .mask = 0x7,
- .shift = 9,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sd_emmc_a_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = gxbb_sd_emmc_clk0_parent_names,
.num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_divider gxbb_sd_emmc_a_clk0_div = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
+static struct clk_regmap gxbb_sd_emmc_a_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sd_emmc_a_clk0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_sd_emmc_a_clk0 = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .bit_idx = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sd_emmc_a_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .bit_idx = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_a_clk0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_a_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1036,42 +1196,45 @@ static struct clk_gate gxbb_sd_emmc_a_clk0 = {
};
/* SDcard clock */
-static struct clk_mux gxbb_sd_emmc_b_clk0_sel = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .mask = 0x7,
- .shift = 25,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sd_emmc_b_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = gxbb_sd_emmc_clk0_parent_names,
.num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_divider gxbb_sd_emmc_b_clk0_div = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .shift = 16,
- .width = 7,
- .lock = &meson_clk_lock,
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
+static struct clk_regmap gxbb_sd_emmc_b_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_sd_emmc_b_clk0 = {
- .reg = (void *)HHI_SD_EMMC_CLK_CNTL,
- .bit_idx = 23,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sd_emmc_b_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SD_EMMC_CLK_CNTL,
+ .bit_idx = 23,
+ },
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_b_clk0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1079,42 +1242,45 @@ static struct clk_gate gxbb_sd_emmc_b_clk0 = {
};
/* EMMC/NAND clock */
-static struct clk_mux gxbb_sd_emmc_c_clk0_sel = {
- .reg = (void *)HHI_NAND_CLK_CNTL,
- .mask = 0x7,
- .shift = 9,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sd_emmc_c_clk0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = gxbb_sd_emmc_clk0_parent_names,
.num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_divider gxbb_sd_emmc_c_clk0_div = {
- .reg = (void *)HHI_NAND_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
- .flags = CLK_DIVIDER_ROUND_CLOSEST,
+static struct clk_regmap gxbb_sd_emmc_c_clk0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_sd_emmc_c_clk0 = {
- .reg = (void *)HHI_NAND_CLK_CNTL,
- .bit_idx = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_sd_emmc_c_clk0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_NAND_CLK_CNTL,
+ .bit_idx = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_c_clk0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1123,20 +1289,19 @@ static struct clk_gate gxbb_sd_emmc_c_clk0 = {
/* VPU Clock */
-static u32 mux_table_vpu[] = {0, 1, 2, 3};
static const char * const gxbb_vpu_parent_names[] = {
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
};
-static struct clk_mux gxbb_vpu_0_sel = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .mask = 0x3,
- .shift = 9,
- .lock = &meson_clk_lock,
- .table = mux_table_vpu,
+static struct clk_regmap gxbb_vpu_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
@@ -1147,42 +1312,44 @@ static struct clk_mux gxbb_vpu_0_sel = {
},
};
-static struct clk_divider gxbb_vpu_0_div = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vpu_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "vpu_0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "vpu_0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_vpu_0 = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .bit_idx = 8,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vpu_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 8,
+ },
.hw.init = &(struct clk_init_data) {
.name = "vpu_0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "vpu_0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
};
-static struct clk_mux gxbb_vpu_1_sel = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .mask = 0x3,
- .shift = 25,
- .lock = &meson_clk_lock,
- .table = mux_table_vpu,
+static struct clk_regmap gxbb_vpu_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
@@ -1193,41 +1360,44 @@ static struct clk_mux gxbb_vpu_1_sel = {
},
};
-static struct clk_divider gxbb_vpu_1_div = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .shift = 16,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vpu_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "vpu_1_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "vpu_1_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_vpu_1 = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .bit_idx = 24,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vpu_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 24,
+ },
.hw.init = &(struct clk_init_data) {
.name = "vpu_1",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "vpu_1_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
};
-static struct clk_mux gxbb_vpu = {
- .reg = (void *)HHI_VPU_CLK_CNTL,
- .mask = 1,
- .shift = 31,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vpu = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
.hw.init = &(struct clk_init_data){
.name = "vpu",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bit 31 selects from 2 possible parents:
* vpu_0 or vpu_1
@@ -1240,20 +1410,19 @@ static struct clk_mux gxbb_vpu = {
/* VAPB Clock */
-static u32 mux_table_vapb[] = {0, 1, 2, 3};
static const char * const gxbb_vapb_parent_names[] = {
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
};
-static struct clk_mux gxbb_vapb_0_sel = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .mask = 0x3,
- .shift = 9,
- .lock = &meson_clk_lock,
- .table = mux_table_vapb,
+static struct clk_regmap gxbb_vapb_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
@@ -1264,42 +1433,44 @@ static struct clk_mux gxbb_vapb_0_sel = {
},
};
-static struct clk_divider gxbb_vapb_0_div = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vapb_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "vapb_0_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "vapb_0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_vapb_0 = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .bit_idx = 8,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vapb_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 8,
+ },
.hw.init = &(struct clk_init_data) {
.name = "vapb_0",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "vapb_0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
};
-static struct clk_mux gxbb_vapb_1_sel = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .mask = 0x3,
- .shift = 25,
- .lock = &meson_clk_lock,
- .table = mux_table_vapb,
+static struct clk_regmap gxbb_vapb_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
@@ -1310,41 +1481,44 @@ static struct clk_mux gxbb_vapb_1_sel = {
},
};
-static struct clk_divider gxbb_vapb_1_div = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .shift = 16,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vapb_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "vapb_1_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "vapb_1_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_gate gxbb_vapb_1 = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .bit_idx = 24,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vapb_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 24,
+ },
.hw.init = &(struct clk_init_data) {
.name = "vapb_1",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "vapb_1_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
},
};
-static struct clk_mux gxbb_vapb_sel = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .mask = 1,
- .shift = 31,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vapb_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
.hw.init = &(struct clk_init_data){
.name = "vapb_sel",
- .ops = &clk_mux_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* bit 31 selects from 2 possible parents:
* vapb_0 or vapb_1
@@ -1355,13 +1529,14 @@ static struct clk_mux gxbb_vapb_sel = {
},
};
-static struct clk_gate gxbb_vapb = {
- .reg = (void *)HHI_VAPBCLK_CNTL,
- .bit_idx = 30,
- .lock = &meson_clk_lock,
+static struct clk_regmap gxbb_vapb = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 30,
+ },
.hw.init = &(struct clk_init_data) {
.name = "vapb",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "vapb_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
@@ -1601,6 +1776,16 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_VAPB_1] = &gxbb_vapb_1.hw,
[CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw,
[CLKID_VAPB] = &gxbb_vapb.hw,
+ [CLKID_HDMI_PLL_PRE_MULT] = &gxbb_hdmi_pll_pre_mult.hw,
+ [CLKID_MPLL0_DIV] = &gxbb_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &gxbb_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &gxbb_mpll2_div.hw,
+ [CLKID_MPLL_PREDIV] = &gxbb_mpll_prediv.hw,
+ [CLKID_FCLK_DIV2_DIV] = &gxbb_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &gxbb_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &gxbb_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &gxbb_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &gxbb_fclk_div7_div.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -1609,7 +1794,7 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
static struct clk_hw_onecell_data gxl_hw_onecell_data = {
.hws = {
[CLKID_SYS_PLL] = &gxbb_sys_pll.hw,
- [CLKID_HDMI_PLL] = &gxbb_hdmi_pll.hw,
+ [CLKID_HDMI_PLL] = &gxl_hdmi_pll.hw,
[CLKID_FIXED_PLL] = &gxbb_fixed_pll.hw,
[CLKID_FCLK_DIV2] = &gxbb_fclk_div2.hw,
[CLKID_FCLK_DIV3] = &gxbb_fclk_div3.hw,
@@ -1748,34 +1933,31 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_VAPB_1] = &gxbb_vapb_1.hw,
[CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw,
[CLKID_VAPB] = &gxbb_vapb.hw,
+ [CLKID_MPLL0_DIV] = &gxbb_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &gxbb_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &gxbb_mpll2_div.hw,
+ [CLKID_MPLL_PREDIV] = &gxbb_mpll_prediv.hw,
+ [CLKID_FCLK_DIV2_DIV] = &gxbb_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &gxbb_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &gxbb_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &gxbb_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &gxbb_fclk_div7_div.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
};
-/* Convenience tables to populate base addresses in .probe */
-
-static struct meson_clk_pll *const gxbb_clk_plls[] = {
- &gxbb_fixed_pll,
- &gxbb_hdmi_pll,
- &gxbb_sys_pll,
+static struct clk_regmap *const gxbb_clk_regmaps[] = {
&gxbb_gp0_pll,
-};
-
-static struct meson_clk_pll *const gxl_clk_plls[] = {
- &gxbb_fixed_pll,
&gxbb_hdmi_pll,
- &gxbb_sys_pll,
- &gxl_gp0_pll,
};
-static struct meson_clk_mpll *const gxbb_clk_mplls[] = {
- &gxbb_mpll0,
- &gxbb_mpll1,
- &gxbb_mpll2,
+static struct clk_regmap *const gxl_clk_regmaps[] = {
+ &gxl_gp0_pll,
+ &gxl_hdmi_pll,
};
-static struct clk_gate *const gxbb_clk_gates[] = {
+static struct clk_regmap *const gx_clk_regmaps[] = {
&gxbb_clk81,
&gxbb_ddr,
&gxbb_dos,
@@ -1872,9 +2054,19 @@ static struct clk_gate *const gxbb_clk_gates[] = {
&gxbb_vapb_0,
&gxbb_vapb_1,
&gxbb_vapb,
-};
-
-static struct clk_mux *const gxbb_clk_muxes[] = {
+ &gxbb_mpeg_clk_div,
+ &gxbb_sar_adc_clk_div,
+ &gxbb_mali_0_div,
+ &gxbb_mali_1_div,
+ &gxbb_cts_mclk_i958_div,
+ &gxbb_32k_clk_div,
+ &gxbb_sd_emmc_a_clk0_div,
+ &gxbb_sd_emmc_b_clk0_div,
+ &gxbb_sd_emmc_c_clk0_div,
+ &gxbb_vpu_0_div,
+ &gxbb_vpu_1_div,
+ &gxbb_vapb_0_div,
+ &gxbb_vapb_1_div,
&gxbb_mpeg_clk_sel,
&gxbb_sar_adc_clk_sel,
&gxbb_mali_0_sel,
@@ -1893,73 +2085,38 @@ static struct clk_mux *const gxbb_clk_muxes[] = {
&gxbb_vapb_0_sel,
&gxbb_vapb_1_sel,
&gxbb_vapb_sel,
-};
-
-static struct clk_divider *const gxbb_clk_dividers[] = {
- &gxbb_mpeg_clk_div,
- &gxbb_sar_adc_clk_div,
- &gxbb_mali_0_div,
- &gxbb_mali_1_div,
- &gxbb_cts_mclk_i958_div,
- &gxbb_32k_clk_div,
- &gxbb_sd_emmc_a_clk0_div,
- &gxbb_sd_emmc_b_clk0_div,
- &gxbb_sd_emmc_c_clk0_div,
- &gxbb_vpu_0_div,
- &gxbb_vpu_1_div,
- &gxbb_vapb_0_div,
- &gxbb_vapb_1_div,
-};
-
-static struct meson_clk_audio_divider *const gxbb_audio_dividers[] = {
+ &gxbb_mpll0,
+ &gxbb_mpll1,
+ &gxbb_mpll2,
+ &gxbb_mpll0_div,
+ &gxbb_mpll1_div,
+ &gxbb_mpll2_div,
&gxbb_cts_amclk_div,
+ &gxbb_fixed_pll,
+ &gxbb_sys_pll,
+ &gxbb_mpll_prediv,
+ &gxbb_fclk_div2,
+ &gxbb_fclk_div3,
+ &gxbb_fclk_div4,
+ &gxbb_fclk_div5,
+ &gxbb_fclk_div7,
};
struct clkc_data {
- struct clk_gate *const *clk_gates;
- unsigned int clk_gates_count;
- struct meson_clk_mpll *const *clk_mplls;
- unsigned int clk_mplls_count;
- struct meson_clk_pll *const *clk_plls;
- unsigned int clk_plls_count;
- struct clk_mux *const *clk_muxes;
- unsigned int clk_muxes_count;
- struct clk_divider *const *clk_dividers;
- unsigned int clk_dividers_count;
- struct meson_clk_audio_divider *const *clk_audio_dividers;
- unsigned int clk_audio_dividers_count;
+ struct clk_regmap *const *regmap_clks;
+ unsigned int regmap_clks_count;
struct clk_hw_onecell_data *hw_onecell_data;
};
static const struct clkc_data gxbb_clkc_data = {
- .clk_gates = gxbb_clk_gates,
- .clk_gates_count = ARRAY_SIZE(gxbb_clk_gates),
- .clk_mplls = gxbb_clk_mplls,
- .clk_mplls_count = ARRAY_SIZE(gxbb_clk_mplls),
- .clk_plls = gxbb_clk_plls,
- .clk_plls_count = ARRAY_SIZE(gxbb_clk_plls),
- .clk_muxes = gxbb_clk_muxes,
- .clk_muxes_count = ARRAY_SIZE(gxbb_clk_muxes),
- .clk_dividers = gxbb_clk_dividers,
- .clk_dividers_count = ARRAY_SIZE(gxbb_clk_dividers),
- .clk_audio_dividers = gxbb_audio_dividers,
- .clk_audio_dividers_count = ARRAY_SIZE(gxbb_audio_dividers),
+ .regmap_clks = gxbb_clk_regmaps,
+ .regmap_clks_count = ARRAY_SIZE(gxbb_clk_regmaps),
.hw_onecell_data = &gxbb_hw_onecell_data,
};
static const struct clkc_data gxl_clkc_data = {
- .clk_gates = gxbb_clk_gates,
- .clk_gates_count = ARRAY_SIZE(gxbb_clk_gates),
- .clk_mplls = gxbb_clk_mplls,
- .clk_mplls_count = ARRAY_SIZE(gxbb_clk_mplls),
- .clk_plls = gxl_clk_plls,
- .clk_plls_count = ARRAY_SIZE(gxl_clk_plls),
- .clk_muxes = gxbb_clk_muxes,
- .clk_muxes_count = ARRAY_SIZE(gxbb_clk_muxes),
- .clk_dividers = gxbb_clk_dividers,
- .clk_dividers_count = ARRAY_SIZE(gxbb_clk_dividers),
- .clk_audio_dividers = gxbb_audio_dividers,
- .clk_audio_dividers_count = ARRAY_SIZE(gxbb_audio_dividers),
+ .regmap_clks = gxl_clk_regmaps,
+ .regmap_clks_count = ARRAY_SIZE(gxl_clk_regmaps),
.hw_onecell_data = &gxl_hw_onecell_data,
};
@@ -1969,71 +2126,79 @@ static const struct of_device_id clkc_match_table[] = {
{},
};
+static const struct regmap_config clkc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int gxbb_clkc_probe(struct platform_device *pdev)
{
const struct clkc_data *clkc_data;
+ struct resource *res;
void __iomem *clk_base;
- int ret, clkid, i;
+ struct regmap *map;
+ int ret, i;
struct device *dev = &pdev->dev;
- clkc_data = of_device_get_match_data(&pdev->dev);
+ clkc_data = of_device_get_match_data(dev);
if (!clkc_data)
return -EINVAL;
- /* Generic clocks and PLLs */
- clk_base = of_iomap(dev->of_node, 0);
- if (!clk_base) {
- pr_err("%s: Unable to map clk base\n", __func__);
- return -ENXIO;
- }
-
- /* Populate base address for PLLs */
- for (i = 0; i < clkc_data->clk_plls_count; i++)
- clkc_data->clk_plls[i]->base = clk_base;
-
- /* Populate base address for MPLLs */
- for (i = 0; i < clkc_data->clk_mplls_count; i++)
- clkc_data->clk_mplls[i]->base = clk_base;
+ /* Get the hhi system controller node if available */
+ map = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "failed to get HHI regmap - Trying obsolete regs\n");
- /* Populate base address for gates */
- for (i = 0; i < clkc_data->clk_gates_count; i++)
- clkc_data->clk_gates[i]->reg = clk_base +
- (u64)clkc_data->clk_gates[i]->reg;
-
- /* Populate base address for muxes */
- for (i = 0; i < clkc_data->clk_muxes_count; i++)
- clkc_data->clk_muxes[i]->reg = clk_base +
- (u64)clkc_data->clk_muxes[i]->reg;
+ /*
+ * FIXME: HHI registers should be accessed through
+ * the appropriate system controller. This is required because
+ * there is more than just clocks in this register space
+ *
+ * This fallback method is only provided temporarily until
+ * all the platform DTs are properly using the syscon node
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ clk_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!clk_base) {
+ dev_err(dev, "Unable to map clk base\n");
+ return -ENXIO;
+ }
+
+ map = devm_regmap_init_mmio(dev, clk_base,
+ &clkc_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+ }
- /* Populate base address for dividers */
- for (i = 0; i < clkc_data->clk_dividers_count; i++)
- clkc_data->clk_dividers[i]->reg = clk_base +
- (u64)clkc_data->clk_dividers[i]->reg;
+ /* Populate regmap for the common regmap backed clocks */
+ for (i = 0; i < ARRAY_SIZE(gx_clk_regmaps); i++)
+ gx_clk_regmaps[i]->map = map;
- /* Populate base address for the audio dividers */
- for (i = 0; i < clkc_data->clk_audio_dividers_count; i++)
- clkc_data->clk_audio_dividers[i]->base = clk_base;
+ /* Populate regmap for soc specific clocks */
+ for (i = 0; i < clkc_data->regmap_clks_count; i++)
+ clkc_data->regmap_clks[i]->map = map;
- /*
- * register all clks
- */
- for (clkid = 0; clkid < clkc_data->hw_onecell_data->num; clkid++) {
+ /* Register all clks */
+ for (i = 0; i < clkc_data->hw_onecell_data->num; i++) {
/* array might be sparse */
- if (!clkc_data->hw_onecell_data->hws[clkid])
+ if (!clkc_data->hw_onecell_data->hws[i])
continue;
ret = devm_clk_hw_register(dev,
- clkc_data->hw_onecell_data->hws[clkid]);
- if (ret)
- goto iounmap;
+ clkc_data->hw_onecell_data->hws[i]);
+ if (ret) {
+ dev_err(dev, "Clock registration failed\n");
+ return ret;
+ }
}
- return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
- clkc_data->hw_onecell_data);
-
-iounmap:
- iounmap(clk_base);
- return ret;
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ clkc_data->hw_onecell_data);
}
static struct platform_driver gxbb_driver = {
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index aee6fbba2004..9febf3f03739 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -194,8 +194,18 @@
#define CLKID_VPU_1_DIV 130
#define CLKID_VAPB_0_DIV 134
#define CLKID_VAPB_1_DIV 137
-
-#define NR_CLKS 141
+#define CLKID_HDMI_PLL_PRE_MULT 141
+#define CLKID_MPLL0_DIV 142
+#define CLKID_MPLL1_DIV 143
+#define CLKID_MPLL2_DIV 144
+#define CLKID_MPLL_PREDIV 145
+#define CLKID_FCLK_DIV2_DIV 146
+#define CLKID_FCLK_DIV3_DIV 147
+#define CLKID_FCLK_DIV4_DIV 148
+#define CLKID_FCLK_DIV5_DIV 149
+#define CLKID_FCLK_DIV7_DIV 150
+
+#define NR_CLKS 151
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 3ffea80c1308..cc2992493e0b 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -23,14 +23,16 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
-#include <linux/init.h>
+#include <linux/regmap.h>
#include "clkc.h"
#include "meson8b.h"
+#include "clk-regmap.h"
static DEFINE_SPINLOCK(meson_clk_lock);
@@ -97,20 +99,6 @@ static const struct pll_rate_table sys_pll_rate_table[] = {
{ /* sentinel */ },
};
-static const struct clk_div_table cpu_div_table[] = {
- { .val = 1, .div = 1 },
- { .val = 2, .div = 2 },
- { .val = 3, .div = 3 },
- { .val = 2, .div = 4 },
- { .val = 3, .div = 6 },
- { .val = 4, .div = 8 },
- { .val = 5, .div = 10 },
- { .val = 6, .div = 12 },
- { .val = 7, .div = 14 },
- { .val = 8, .div = 16 },
- { /* sentinel */ },
-};
-
static struct clk_fixed_rate meson8b_xtal = {
.fixed_rate = 24000000,
.hw.init = &(struct clk_init_data){
@@ -120,23 +108,39 @@ static struct clk_fixed_rate meson8b_xtal = {
},
};
-static struct meson_clk_pll meson8b_fixed_pll = {
- .m = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 16,
- .width = 2,
+static struct clk_regmap meson8b_fixed_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .frac = {
+ .reg_off = HHI_MPLL_CNTL2,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -146,23 +150,34 @@ static struct meson_clk_pll meson8b_fixed_pll = {
},
};
-static struct meson_clk_pll meson8b_vid_pll = {
- .m = {
- .reg_off = HHI_VID_PLL_CNTL,
- .shift = 0,
- .width = 9,
+static struct clk_regmap meson8b_vid_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_VID_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_VID_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_VID_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_VID_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_VID_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
},
- .n = {
- .reg_off = HHI_VID_PLL_CNTL,
- .shift = 9,
- .width = 5,
- },
- .od = {
- .reg_off = HHI_VID_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "vid_pll",
.ops = &meson_clk_pll_ro_ops,
@@ -172,213 +187,317 @@ static struct meson_clk_pll meson8b_vid_pll = {
},
};
-static struct meson_clk_pll meson8b_sys_pll = {
- .m = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 0,
- .width = 9,
- },
- .n = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 9,
- .width = 5,
+static struct clk_regmap meson8b_sys_pll = {
+ .data = &(struct meson_clk_pll_data){
+ .m = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .l = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = sys_pll_rate_table,
},
- .od = {
- .reg_off = HHI_SYS_PLL_CNTL,
- .shift = 16,
- .width = 2,
- },
- .rate_table = sys_pll_rate_table,
- .rate_count = ARRAY_SIZE(sys_pll_rate_table),
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
- .ops = &meson_clk_pll_ops,
+ .ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
},
};
-static struct clk_fixed_factor meson8b_fclk_div2 = {
+static struct clk_fixed_factor meson8b_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div2",
+ .name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor meson8b_fclk_div3 = {
+static struct clk_regmap meson8b_fclk_div2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 27,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div2_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div3",
+ .name = "fclk_div_div3",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor meson8b_fclk_div4 = {
+static struct clk_regmap meson8b_fclk_div3 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 28,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div3_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div4",
+ .name = "fclk_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor meson8b_fclk_div5 = {
+static struct clk_regmap meson8b_fclk_div4 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 29,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div4_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div5",
+ .name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct clk_fixed_factor meson8b_fclk_div7 = {
+static struct clk_regmap meson8b_fclk_div5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div5_div" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data){
- .name = "fclk_div7",
+ .name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll meson8b_mpll0 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 0,
- .width = 14,
+static struct clk_regmap meson8b_fclk_div7 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL6,
+ .bit_idx = 31,
},
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 15,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div7_div" },
+ .num_parents = 1,
},
- .n2 = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 16,
- .width = 9,
+};
+
+static struct clk_regmap meson8b_mpll_prediv = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPLL_CNTL5,
+ .shift = 12,
+ .width = 1,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL7,
- .shift = 14,
- .width = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_prediv",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
},
- .ssen = {
- .reg_off = HHI_MPLL_CNTL,
- .shift = 25,
- .width = 1,
+};
+
+static struct clk_regmap meson8b_mpll0_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 16,
+ .width = 9,
+ },
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 25,
+ .width = 1,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll0",
+ .name = "mpll0_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll meson8b_mpll1 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 0,
- .width = 14,
+static struct clk_regmap meson8b_mpll0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL7,
+ .bit_idx = 14,
},
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 15,
- .width = 1,
- },
- .n2 = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 16,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL8,
- .shift = 14,
- .width = 1,
+};
+
+static struct clk_regmap meson8b_mpll1_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll1",
+ .name = "mpll1_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-static struct meson_clk_mpll meson8b_mpll2 = {
- .sdm = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 0,
- .width = 14,
- },
- .sdm_en = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 15,
- .width = 1,
+static struct clk_regmap meson8b_mpll1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL8,
+ .bit_idx = 14,
},
- .n2 = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 16,
- .width = 9,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
- .en = {
- .reg_off = HHI_MPLL_CNTL9,
- .shift = 14,
- .width = 1,
+};
+
+static struct clk_regmap meson8b_mpll2_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 15,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &meson_clk_lock,
},
- .lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
- .name = "mpll2",
+ .name = "mpll2_div",
.ops = &meson_clk_mpll_ops,
- .parent_names = (const char *[]){ "fixed_pll" },
+ .parent_names = (const char *[]){ "mpll_prediv" },
.num_parents = 1,
},
};
-/*
- * FIXME cpu clocks and the legacy composite clocks (e.g. clk81) are both PLL
- * post-dividers and should be modeled with their respective PLLs via the
- * forthcoming coordinated clock rates feature
- */
-static struct meson_clk_cpu meson8b_cpu_clk = {
- .reg_off = HHI_SYS_CPU_CLK_CNTL1,
- .div_table = cpu_div_table,
- .clk_nb.notifier_call = meson_clk_cpu_notifier_cb,
+static struct clk_regmap meson8b_mpll2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPLL_CNTL9,
+ .bit_idx = 14,
+ },
.hw.init = &(struct clk_init_data){
- .name = "cpu_clk",
- .ops = &meson_clk_cpu_ops,
- .parent_names = (const char *[]){ "sys_pll" },
+ .name = "mpll2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "mpll2_div" },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
},
};
static u32 mux_table_clk81[] = { 6, 5, 7 };
-
-struct clk_mux meson8b_mpeg_clk_sel = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .mask = 0x7,
- .shift = 12,
- .flags = CLK_MUX_READ_ONLY,
- .table = mux_table_clk81,
- .lock = &meson_clk_lock,
+static struct clk_regmap meson8b_mpeg_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 12,
+ .table = mux_table_clk81,
+ },
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_sel",
- .ops = &clk_mux_ro_ops,
+ .ops = &clk_regmap_mux_ro_ops,
/*
* FIXME bits 14:12 selects from 8 possible parents:
* xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
@@ -387,34 +506,136 @@ struct clk_mux meson8b_mpeg_clk_sel = {
.parent_names = (const char *[]){ "fclk_div3", "fclk_div4",
"fclk_div5" },
.num_parents = 3,
- .flags = (CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED),
},
};
-struct clk_divider meson8b_mpeg_clk_div = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .shift = 0,
- .width = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap meson8b_mpeg_clk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
- .ops = &clk_divider_ops,
+ .ops = &clk_regmap_divider_ro_ops,
.parent_names = (const char *[]){ "mpeg_clk_sel" },
.num_parents = 1,
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED),
},
};
-struct clk_gate meson8b_clk81 = {
- .reg = (void *)HHI_MPEG_CLK_CNTL,
- .bit_idx = 7,
- .lock = &meson_clk_lock,
+static struct clk_regmap meson8b_clk81 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MPEG_CLK_CNTL,
+ .bit_idx = 7,
+ },
.hw.init = &(struct clk_init_data){
.name = "clk81",
- .ops = &clk_gate_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "mpeg_clk_div" },
.num_parents = 1,
- .flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
+ .flags = CLK_IS_CRITICAL,
+ },
+};
+
+static struct clk_regmap meson8b_cpu_in_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x1,
+ .shift = 0,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_in_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "xtal", "sys_pll" },
+ .num_parents = 2,
+ .flags = (CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT),
+ },
+};
+
+static struct clk_fixed_factor meson8b_cpu_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_in_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor meson8b_cpu_div3 = {
+ .mult = 1,
+ .div = 3,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_div3",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_in_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct clk_div_table cpu_scale_table[] = {
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 6 },
+ { .val = 4, .div = 8 },
+ { .val = 5, .div = 10 },
+ { .val = 6, .div = 12 },
+ { .val = 7, .div = 14 },
+ { .val = 8, .div = 16 },
+ { /* sentinel */ },
+};
+
+static struct clk_regmap meson8b_cpu_scale_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .shift = 20,
+ .width = 9,
+ .table = cpu_scale_table,
+ .flags = CLK_DIVIDER_ALLOW_ZERO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_scale_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_in_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_cpu_scale_out_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x3,
+ .shift = 2,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_scale_out_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]) { "cpu_in_sel",
+ "cpu_div2",
+ "cpu_div3",
+ "cpu_scale_div" },
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_cpu_clk = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x1,
+ .shift = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "xtal", "cpu_out_sel" },
+ .num_parents = 2,
+ .flags = (CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT),
},
};
@@ -599,24 +820,26 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_MPLL0] = &meson8b_mpll0.hw,
[CLKID_MPLL1] = &meson8b_mpll1.hw,
[CLKID_MPLL2] = &meson8b_mpll2.hw,
+ [CLKID_MPLL0_DIV] = &meson8b_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &meson8b_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &meson8b_mpll2_div.hw,
+ [CLKID_CPU_IN_SEL] = &meson8b_cpu_in_sel.hw,
+ [CLKID_CPU_DIV2] = &meson8b_cpu_div2.hw,
+ [CLKID_CPU_DIV3] = &meson8b_cpu_div3.hw,
+ [CLKID_CPU_SCALE_DIV] = &meson8b_cpu_scale_div.hw,
+ [CLKID_CPU_SCALE_OUT_SEL] = &meson8b_cpu_scale_out_sel.hw,
+ [CLKID_MPLL_PREDIV] = &meson8b_mpll_prediv.hw,
+ [CLKID_FCLK_DIV2_DIV] = &meson8b_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &meson8b_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &meson8b_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &meson8b_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
};
-static struct meson_clk_pll *const meson8b_clk_plls[] = {
- &meson8b_fixed_pll,
- &meson8b_vid_pll,
- &meson8b_sys_pll,
-};
-
-static struct meson_clk_mpll *const meson8b_clk_mplls[] = {
- &meson8b_mpll0,
- &meson8b_mpll1,
- &meson8b_mpll2,
-};
-
-static struct clk_gate *const meson8b_clk_gates[] = {
+static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_clk81,
&meson8b_ddr,
&meson8b_dos,
@@ -695,14 +918,27 @@ static struct clk_gate *const meson8b_clk_gates[] = {
&meson8b_ao_ahb_sram,
&meson8b_ao_ahb_bus,
&meson8b_ao_iface,
-};
-
-static struct clk_mux *const meson8b_clk_muxes[] = {
- &meson8b_mpeg_clk_sel,
-};
-
-static struct clk_divider *const meson8b_clk_dividers[] = {
&meson8b_mpeg_clk_div,
+ &meson8b_mpeg_clk_sel,
+ &meson8b_mpll0,
+ &meson8b_mpll1,
+ &meson8b_mpll2,
+ &meson8b_mpll0_div,
+ &meson8b_mpll1_div,
+ &meson8b_mpll2_div,
+ &meson8b_fixed_pll,
+ &meson8b_vid_pll,
+ &meson8b_sys_pll,
+ &meson8b_cpu_in_sel,
+ &meson8b_cpu_scale_div,
+ &meson8b_cpu_scale_out_sel,
+ &meson8b_cpu_clk,
+ &meson8b_mpll_prediv,
+ &meson8b_fclk_div2,
+ &meson8b_fclk_div3,
+ &meson8b_fclk_div4,
+ &meson8b_fclk_div5,
+ &meson8b_fclk_div7,
};
static const struct meson8b_clk_reset_line {
@@ -804,82 +1040,45 @@ static const struct reset_control_ops meson8b_clk_reset_ops = {
.deassert = meson8b_clk_reset_deassert,
};
+static const struct regmap_config clkc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int meson8b_clkc_probe(struct platform_device *pdev)
{
- int ret, clkid, i;
- struct clk_hw *parent_hw;
- struct clk *parent_clk;
+ int ret, i;
struct device *dev = &pdev->dev;
+ struct regmap *map;
if (!clk_base)
return -ENXIO;
- /* Populate base address for PLLs */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_plls); i++)
- meson8b_clk_plls[i]->base = clk_base;
-
- /* Populate base address for MPLLs */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_mplls); i++)
- meson8b_clk_mplls[i]->base = clk_base;
-
- /* Populate the base address for CPU clk */
- meson8b_cpu_clk.base = clk_base;
-
- /* Populate base address for gates */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_gates); i++)
- meson8b_clk_gates[i]->reg = clk_base +
- (u32)meson8b_clk_gates[i]->reg;
-
- /* Populate base address for muxes */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_muxes); i++)
- meson8b_clk_muxes[i]->reg = clk_base +
- (u32)meson8b_clk_muxes[i]->reg;
+ map = devm_regmap_init_mmio(dev, clk_base, &clkc_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
- /* Populate base address for dividers */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_dividers); i++)
- meson8b_clk_dividers[i]->reg = clk_base +
- (u32)meson8b_clk_dividers[i]->reg;
+ /* Populate regmap for the regmap backed clocks */
+ for (i = 0; i < ARRAY_SIZE(meson8b_clk_regmaps); i++)
+ meson8b_clk_regmaps[i]->map = map;
/*
* register all clks
* CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1
*/
- for (clkid = CLKID_XTAL; clkid < CLK_NR_CLKS; clkid++) {
+ for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) {
/* array might be sparse */
- if (!meson8b_hw_onecell_data.hws[clkid])
+ if (!meson8b_hw_onecell_data.hws[i])
continue;
- /* FIXME convert to devm_clk_register */
- ret = devm_clk_hw_register(dev, meson8b_hw_onecell_data.hws[clkid]);
+ ret = devm_clk_hw_register(dev, meson8b_hw_onecell_data.hws[i]);
if (ret)
return ret;
}
- /*
- * Register CPU clk notifier
- *
- * FIXME this is wrong for a lot of reasons. First, the muxes should be
- * struct clk_hw objects. Second, we shouldn't program the muxes in
- * notifier handlers. The tricky programming sequence will be handled
- * by the forthcoming coordinated clock rates mechanism once that
- * feature is released.
- *
- * Furthermore, looking up the parent this way is terrible. At some
- * point we will stop allocating a default struct clk when registering
- * a new clk_hw, and this hack will no longer work. Releasing the ccr
- * feature before that time solves the problem :-)
- */
- parent_hw = clk_hw_get_parent(&meson8b_cpu_clk.hw);
- parent_clk = parent_hw->clk;
- ret = clk_notifier_register(parent_clk, &meson8b_cpu_clk.clk_nb);
- if (ret) {
- pr_err("%s: failed to register clock notifier for cpu_clk\n",
- __func__);
- return ret;
- }
-
- return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
- &meson8b_hw_onecell_data);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &meson8b_hw_onecell_data);
}
static const struct of_device_id meson8b_clkc_match_table[] = {
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index 2eaf8a52e7dd..6e414bd36981 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -69,7 +69,22 @@
* will remain defined here.
*/
-#define CLK_NR_CLKS 96
+#define CLKID_MPLL0_DIV 96
+#define CLKID_MPLL1_DIV 97
+#define CLKID_MPLL2_DIV 98
+#define CLKID_CPU_IN_SEL 99
+#define CLKID_CPU_DIV2 100
+#define CLKID_CPU_DIV3 101
+#define CLKID_CPU_SCALE_DIV 102
+#define CLKID_CPU_SCALE_OUT_SEL 103
+#define CLKID_MPLL_PREDIV 104
+#define CLKID_FCLK_DIV2_DIV 105
+#define CLKID_FCLK_DIV3_DIV 106
+#define CLKID_FCLK_DIV4_DIV 107
+#define CLKID_FCLK_DIV5_DIV 108
+#define CLKID_FCLK_DIV7_DIV 109
+
+#define CLK_NR_CLKS 110
/*
* include the CLKID and RESETID that have
diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
index 394aa6f03f01..9ff4ea63932d 100644
--- a/drivers/clk/mvebu/armada-38x.c
+++ b/drivers/clk/mvebu/armada-38x.c
@@ -46,11 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
}
static const u32 armada_38x_cpu_frequencies[] __initconst = {
- 0, 0, 0, 0,
- 1066 * 1000 * 1000, 0, 0, 0,
+ 666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0,
+ 1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
1332 * 1000 * 1000, 0, 0, 0,
1600 * 1000 * 1000, 0, 0, 0,
- 1866 * 1000 * 1000,
+ 1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
};
static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
@@ -76,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
};
static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
- {1, 2}, {0, 1}, {0, 1}, {0, 1},
- {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {1, 2}, {0, 1},
+ {1, 2}, {0, 1}, {1, 2}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {1, 2},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
@@ -91,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
- {1, 2}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {7, 15},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index ca9a0a536174..75bf7b8f282f 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -13,18 +13,17 @@
/*
* CP110 has 6 core clocks:
*
- * - APLL (1 Ghz)
- * - PPv2 core (1/3 APLL)
- * - EIP (1/2 APLL)
- * - Core (1/2 EIP)
- * - SDIO (2/5 APLL)
+ * - PLL0 (1 Ghz)
+ * - PPv2 core (1/3 PLL0)
+ * - x2 Core (1/2 PLL0)
+ * - Core (1/2 x2 Core)
+ * - SDIO (2/5 PLL0)
*
* - NAND clock, which is either:
* - Equal to SDIO clock
- * - 2/5 APLL
+ * - 2/5 PLL0
*
- * CP110 has 32 gatable clocks, for the various peripherals in the
- * IP. They have fairly complicated parent/child relationships.
+ * CP110 has 32 gatable clocks, for the various peripherals in the IP.
*/
#define pr_fmt(fmt) "cp110-system-controller: " fmt
@@ -53,9 +52,9 @@ enum {
#define CP110_CLK_NUM \
(CP110_MAX_CORE_CLOCKS + CP110_MAX_GATABLE_CLOCKS)
-#define CP110_CORE_APLL 0
+#define CP110_CORE_PLL0 0
#define CP110_CORE_PPV2 1
-#define CP110_CORE_EIP 2
+#define CP110_CORE_X2CORE 2
#define CP110_CORE_CORE 3
#define CP110_CORE_NAND 4
#define CP110_CORE_SDIO 5
@@ -237,7 +236,7 @@ static int cp110_syscon_common_probe(struct platform_device *pdev,
struct regmap *regmap;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const char *ppv2_name, *apll_name, *core_name, *eip_name, *nand_name,
+ const char *ppv2_name, *pll0_name, *core_name, *x2core_name, *nand_name,
*sdio_name;
struct clk_hw_onecell_data *cp110_clk_data;
struct clk_hw *hw, **cp110_clks;
@@ -263,20 +262,20 @@ static int cp110_syscon_common_probe(struct platform_device *pdev,
cp110_clks = cp110_clk_data->hws;
cp110_clk_data->num = CP110_CLK_NUM;
- /* Register the APLL which is the root of the hw tree */
- apll_name = cp110_unique_name(dev, syscon_node, "apll");
- hw = clk_hw_register_fixed_rate(NULL, apll_name, NULL, 0,
+ /* Register the PLL0 which is the root of the hw tree */
+ pll0_name = cp110_unique_name(dev, syscon_node, "pll0");
+ hw = clk_hw_register_fixed_rate(NULL, pll0_name, NULL, 0,
1000 * 1000 * 1000);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
- goto fail_apll;
+ goto fail_pll0;
}
- cp110_clks[CP110_CORE_APLL] = hw;
+ cp110_clks[CP110_CORE_PLL0] = hw;
- /* PPv2 is APLL/3 */
+ /* PPv2 is PLL0/3 */
ppv2_name = cp110_unique_name(dev, syscon_node, "ppv2-core");
- hw = clk_hw_register_fixed_factor(NULL, ppv2_name, apll_name, 0, 1, 3);
+ hw = clk_hw_register_fixed_factor(NULL, ppv2_name, pll0_name, 0, 1, 3);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail_ppv2;
@@ -284,30 +283,32 @@ static int cp110_syscon_common_probe(struct platform_device *pdev,
cp110_clks[CP110_CORE_PPV2] = hw;
- /* EIP clock is APLL/2 */
- eip_name = cp110_unique_name(dev, syscon_node, "eip");
- hw = clk_hw_register_fixed_factor(NULL, eip_name, apll_name, 0, 1, 2);
+ /* X2CORE clock is PLL0/2 */
+ x2core_name = cp110_unique_name(dev, syscon_node, "x2core");
+ hw = clk_hw_register_fixed_factor(NULL, x2core_name, pll0_name,
+ 0, 1, 2);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail_eip;
}
- cp110_clks[CP110_CORE_EIP] = hw;
+ cp110_clks[CP110_CORE_X2CORE] = hw;
- /* Core clock is EIP/2 */
+ /* Core clock is X2CORE/2 */
core_name = cp110_unique_name(dev, syscon_node, "core");
- hw = clk_hw_register_fixed_factor(NULL, core_name, eip_name, 0, 1, 2);
+ hw = clk_hw_register_fixed_factor(NULL, core_name, x2core_name,
+ 0, 1, 2);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail_core;
}
cp110_clks[CP110_CORE_CORE] = hw;
- /* NAND can be either APLL/2.5 or core clock */
+ /* NAND can be either PLL0/2.5 or core clock */
nand_name = cp110_unique_name(dev, syscon_node, "nand-core");
if (nand_clk_ctrl & NF_CLOCK_SEL_400_MASK)
hw = clk_hw_register_fixed_factor(NULL, nand_name,
- apll_name, 0, 2, 5);
+ pll0_name, 0, 2, 5);
else
hw = clk_hw_register_fixed_factor(NULL, nand_name,
core_name, 0, 1, 1);
@@ -318,10 +319,10 @@ static int cp110_syscon_common_probe(struct platform_device *pdev,
cp110_clks[CP110_CORE_NAND] = hw;
- /* SDIO clock is APLL/2.5 */
+ /* SDIO clock is PLL0/2.5 */
sdio_name = cp110_unique_name(dev, syscon_node, "sdio-core");
hw = clk_hw_register_fixed_factor(NULL, sdio_name,
- apll_name, 0, 2, 5);
+ pll0_name, 0, 2, 5);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail_sdio;
@@ -341,40 +342,23 @@ static int cp110_syscon_common_probe(struct platform_device *pdev,
continue;
switch (i) {
- case CP110_GATE_AUDIO:
- case CP110_GATE_COMM_UNIT:
- case CP110_GATE_EIP150:
- case CP110_GATE_EIP197:
- case CP110_GATE_SLOW_IO:
- parent = gate_name[CP110_GATE_MAIN];
- break;
- case CP110_GATE_MG:
- parent = gate_name[CP110_GATE_MG_CORE];
- break;
case CP110_GATE_NAND:
parent = nand_name;
break;
+ case CP110_GATE_MG:
+ case CP110_GATE_GOP_DP:
case CP110_GATE_PPV2:
parent = ppv2_name;
break;
case CP110_GATE_SDIO:
parent = sdio_name;
break;
- case CP110_GATE_GOP_DP:
- parent = gate_name[CP110_GATE_SDMMC_GOP];
- break;
- case CP110_GATE_XOR1:
- case CP110_GATE_XOR0:
- case CP110_GATE_PCIE_X1_0:
- case CP110_GATE_PCIE_X1_1:
+ case CP110_GATE_MAIN:
+ case CP110_GATE_PCIE_XOR:
case CP110_GATE_PCIE_X4:
- parent = gate_name[CP110_GATE_PCIE_XOR];
- break;
- case CP110_GATE_SATA:
- case CP110_GATE_USB3H0:
- case CP110_GATE_USB3H1:
- case CP110_GATE_USB3DEV:
- parent = gate_name[CP110_GATE_SATA_USB];
+ case CP110_GATE_EIP150:
+ case CP110_GATE_EIP197:
+ parent = x2core_name;
break;
default:
parent = core_name;
@@ -413,12 +397,12 @@ fail_sdio:
fail_nand:
clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_CORE]);
fail_core:
- clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_EIP]);
+ clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_X2CORE]);
fail_eip:
clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_PPV2]);
fail_ppv2:
- clk_hw_unregister_fixed_rate(cp110_clks[CP110_CORE_APLL]);
-fail_apll:
+ clk_hw_unregister_fixed_rate(cp110_clks[CP110_CORE_PLL0]);
+fail_pll0:
return ret;
}
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index f5d815f577e0..5eeecee17b69 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -67,6 +67,7 @@
#define LPC32XX_USB_CLK_STS 0xF8
static struct regmap_config lpc32xx_scb_regmap_config = {
+ .name = "scb",
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
index 4e9b8c2c8980..1ee75a5e93f4 100644
--- a/drivers/clk/qcom/clk-regmap-divider.c
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -28,22 +28,14 @@ static long div_round_ro_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
struct clk_regmap *clkr = &divider->clkr;
- u32 div;
- struct clk_hw *hw_parent = clk_hw_get_parent(hw);
-
- regmap_read(clkr->regmap, divider->reg, &div);
- div >>= divider->shift;
- div &= BIT(divider->width) - 1;
- div += 1;
-
- if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
- if (!hw_parent)
- return -EINVAL;
+ u32 val;
- *prate = clk_hw_round_rate(hw_parent, rate * div);
- }
+ regmap_read(clkr->regmap, divider->reg, &val);
+ val >>= divider->shift;
+ val &= BIT(divider->width) - 1;
- return DIV_ROUND_UP_ULL((u64)*prate, div);
+ return divider_ro_round_rate(hw, rate, prate, NULL, divider->width,
+ CLK_DIVIDER_ROUND_CLOSEST, val);
}
static long div_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index c60f61b10c7f..b94981447664 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -29,6 +29,7 @@
#define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63
#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+#define QCOM_RPM_XO_MODE_ON 0x2
#define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \
static struct clk_rpm _platform##_##_active; \
@@ -56,6 +57,18 @@
}, \
}
+#define DEFINE_CLK_RPM_XO_BUFFER(_platform, _name, _active, offset) \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = QCOM_RPM_CXO_BUFFERS, \
+ .xo_offset = (offset), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_xo_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "cxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
#define DEFINE_CLK_RPM_FIXED(_platform, _name, _active, r_id, r) \
static struct clk_rpm _platform##_##_name = { \
.rpm_clk_id = (r_id), \
@@ -126,8 +139,11 @@
#define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
+struct rpm_cc;
+
struct clk_rpm {
const int rpm_clk_id;
+ const int xo_offset;
const bool active_only;
unsigned long rate;
bool enabled;
@@ -135,12 +151,15 @@ struct clk_rpm {
struct clk_rpm *peer;
struct clk_hw hw;
struct qcom_rpm *rpm;
+ struct rpm_cc *rpm_cc;
};
struct rpm_cc {
struct qcom_rpm *rpm;
struct clk_rpm **clks;
size_t num_clks;
+ u32 xo_buffer_value;
+ struct mutex xo_lock;
};
struct rpm_clk_desc {
@@ -159,7 +178,8 @@ static int clk_rpm_handoff(struct clk_rpm *r)
* The vendor tree simply reads the status for this
* RPM clock.
*/
- if (r->rpm_clk_id == QCOM_RPM_PLL_4)
+ if (r->rpm_clk_id == QCOM_RPM_PLL_4 ||
+ r->rpm_clk_id == QCOM_RPM_CXO_BUFFERS)
return 0;
ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
@@ -288,6 +308,46 @@ out:
mutex_unlock(&rpm_clk_lock);
}
+static int clk_rpm_xo_prepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct rpm_cc *rcc = r->rpm_cc;
+ int ret, clk_id = r->rpm_clk_id;
+ u32 value;
+
+ mutex_lock(&rcc->xo_lock);
+
+ value = rcc->xo_buffer_value | (QCOM_RPM_XO_MODE_ON << r->xo_offset);
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
+ if (!ret) {
+ r->enabled = true;
+ rcc->xo_buffer_value = value;
+ }
+
+ mutex_unlock(&rcc->xo_lock);
+
+ return ret;
+}
+
+static void clk_rpm_xo_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct rpm_cc *rcc = r->rpm_cc;
+ int ret, clk_id = r->rpm_clk_id;
+ u32 value;
+
+ mutex_lock(&rcc->xo_lock);
+
+ value = rcc->xo_buffer_value & ~(QCOM_RPM_XO_MODE_ON << r->xo_offset);
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
+ if (!ret) {
+ r->enabled = false;
+ rcc->xo_buffer_value = value;
+ }
+
+ mutex_unlock(&rcc->xo_lock);
+}
+
static int clk_rpm_fixed_prepare(struct clk_hw *hw)
{
struct clk_rpm *r = to_clk_rpm(hw);
@@ -378,6 +438,11 @@ static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
return r->rate;
}
+static const struct clk_ops clk_rpm_xo_ops = {
+ .prepare = clk_rpm_xo_prepare,
+ .unprepare = clk_rpm_xo_unprepare,
+};
+
static const struct clk_ops clk_rpm_fixed_ops = {
.prepare = clk_rpm_fixed_prepare,
.unprepare = clk_rpm_fixed_unprepare,
@@ -449,6 +514,11 @@ DEFINE_CLK_RPM(apq8064, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK);
+DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_d0_clk, xo_d0_a_clk, 0);
+DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_d1_clk, xo_d1_a_clk, 8);
+DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a0_clk, xo_a0_a_clk, 16);
+DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a1_clk, xo_a1_a_clk, 24);
+DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a2_clk, xo_a2_a_clk, 28);
static struct clk_rpm *apq8064_clks[] = {
[RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk,
@@ -469,6 +539,11 @@ static struct clk_rpm *apq8064_clks[] = {
[RPM_SFPB_A_CLK] = &apq8064_sfpb_a_clk,
[RPM_QDSS_CLK] = &apq8064_qdss_clk,
[RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk,
+ [RPM_XO_D0] = &apq8064_xo_d0_clk,
+ [RPM_XO_D1] = &apq8064_xo_d1_clk,
+ [RPM_XO_A0] = &apq8064_xo_a0_clk,
+ [RPM_XO_A1] = &apq8064_xo_a1_clk,
+ [RPM_XO_A2] = &apq8064_xo_a2_clk,
};
static const struct rpm_clk_desc rpm_clk_apq8064 = {
@@ -526,12 +601,14 @@ static int rpm_clk_probe(struct platform_device *pdev)
rcc->clks = rpm_clks;
rcc->num_clks = num_clks;
+ mutex_init(&rcc->xo_lock);
for (i = 0; i < num_clks; i++) {
if (!rpm_clks[i])
continue;
rpm_clks[i]->rpm = rpm;
+ rpm_clks[i]->rpm_cc = rcc;
ret = clk_rpm_handoff(rpm_clks[i]);
if (ret)
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index c26d9007bfc4..850c02a52248 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -686,7 +686,7 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
goto err;
}
- ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_smdrpm_clk_hw_get,
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, qcom_smdrpm_clk_hw_get,
rcc);
if (ret)
goto err;
@@ -697,19 +697,12 @@ err:
return ret;
}
-static int rpm_smd_clk_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
- return 0;
-}
-
static struct platform_driver rpm_smd_clk_driver = {
.driver = {
.name = "qcom-clk-smd-rpm",
.of_match_table = rpm_smd_clk_match_table,
},
.probe = rpm_smd_clk_probe,
- .remove = rpm_smd_clk_remove,
};
static int __init rpm_smd_clk_init(void)
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 5d7451209206..3d6452932797 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -2895,7 +2895,7 @@ static struct clk_branch gcc_aggre0_snoc_axi_clk = {
.name = "gcc_aggre0_snoc_axi_clk",
.parent_names = (const char *[]){ "system_noc_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -2910,7 +2910,7 @@ static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
.name = "gcc_aggre0_cnoc_ahb_clk",
.parent_names = (const char *[]){ "config_noc_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -2925,7 +2925,7 @@ static struct clk_branch gcc_smmu_aggre0_axi_clk = {
.name = "gcc_smmu_aggre0_axi_clk",
.parent_names = (const char *[]){ "system_noc_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -2940,7 +2940,7 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
.name = "gcc_smmu_aggre0_ahb_clk",
.parent_names = (const char *[]){ "config_noc_clk_src" },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 43b5a89c4b28..ef76c861ec84 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -15,7 +15,9 @@ config CLK_RENESAS
select CLK_R8A7794 if ARCH_R8A7794
select CLK_R8A7795 if ARCH_R8A7795
select CLK_R8A7796 if ARCH_R8A7796
+ select CLK_R8A77965 if ARCH_R8A77965
select CLK_R8A77970 if ARCH_R8A77970
+ select CLK_R8A77980 if ARCH_R8A77980
select CLK_R8A77995 if ARCH_R8A77995
select CLK_SH73A0 if ARCH_SH73A0
@@ -24,12 +26,13 @@ if CLK_RENESAS
config CLK_RENESAS_LEGACY
bool "Legacy DT clock support"
depends on CLK_R8A7790 || CLK_R8A7791 || CLK_R8A7792 || CLK_R8A7794
- default y
help
Enable backward compatibility with old device trees describing a
hierarchical representation of the various CPG and MSTP clocks.
Say Y if you want your kernel to work with old DTBs.
+ It is safe to say N if you use the DTS that is supplied with the
+ current kernel source tree.
# SoC
config CLK_EMEV2
@@ -96,10 +99,18 @@ config CLK_R8A7796
bool "R-Car M3-W clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R8A77965
+ bool "R-Car M3-N clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
config CLK_R8A77970
bool "R-Car V3M clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R8A77980
+ bool "R-Car V3H clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
config CLK_R8A77995
bool "R-Car D3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 34c4e0b37afa..6c0f19636e3e 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -14,7 +14,9 @@ obj-$(CONFIG_CLK_R8A7792) += r8a7792-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7794) += r8a7794-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7795) += r8a7795-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7796) += r8a7796-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77965) += r8a77965-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77970) += r8a77970-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77980) += r8a77980-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 151336d2ba59..9febbf42c3df 100644
--- a/drivers/clk/renesas/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -53,9 +53,9 @@ static int cpg_div6_clock_enable(struct clk_hw *hw)
struct div6_clock *clock = to_div6_clock(hw);
u32 val;
- val = (clk_readl(clock->reg) & ~(CPG_DIV6_DIV_MASK | CPG_DIV6_CKSTP))
+ val = (readl(clock->reg) & ~(CPG_DIV6_DIV_MASK | CPG_DIV6_CKSTP))
| CPG_DIV6_DIV(clock->div - 1);
- clk_writel(val, clock->reg);
+ writel(val, clock->reg);
return 0;
}
@@ -65,7 +65,7 @@ static void cpg_div6_clock_disable(struct clk_hw *hw)
struct div6_clock *clock = to_div6_clock(hw);
u32 val;
- val = clk_readl(clock->reg);
+ val = readl(clock->reg);
val |= CPG_DIV6_CKSTP;
/*
* DIV6 clocks require the divisor field to be non-zero when stopping
@@ -75,14 +75,14 @@ static void cpg_div6_clock_disable(struct clk_hw *hw)
*/
if (!(val & CPG_DIV6_DIV_MASK))
val |= CPG_DIV6_DIV_MASK;
- clk_writel(val, clock->reg);
+ writel(val, clock->reg);
}
static int cpg_div6_clock_is_enabled(struct clk_hw *hw)
{
struct div6_clock *clock = to_div6_clock(hw);
- return !(clk_readl(clock->reg) & CPG_DIV6_CKSTP);
+ return !(readl(clock->reg) & CPG_DIV6_CKSTP);
}
static unsigned long cpg_div6_clock_recalc_rate(struct clk_hw *hw,
@@ -122,10 +122,10 @@ static int cpg_div6_clock_set_rate(struct clk_hw *hw, unsigned long rate,
clock->div = div;
- val = clk_readl(clock->reg) & ~CPG_DIV6_DIV_MASK;
+ val = readl(clock->reg) & ~CPG_DIV6_DIV_MASK;
/* Only program the new divisor if the clock isn't stopped. */
if (!(val & CPG_DIV6_CKSTP))
- clk_writel(val | CPG_DIV6_DIV(clock->div - 1), clock->reg);
+ writel(val | CPG_DIV6_DIV(clock->div - 1), clock->reg);
return 0;
}
@@ -139,7 +139,7 @@ static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
if (clock->src_width == 0)
return 0;
- hw_index = (clk_readl(clock->reg) >> clock->src_shift) &
+ hw_index = (readl(clock->reg) >> clock->src_shift) &
(BIT(clock->src_width) - 1);
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
if (clock->parents[i] == hw_index)
@@ -163,8 +163,8 @@ static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
mask = ~((BIT(clock->src_width) - 1) << clock->src_shift);
hw_index = clock->parents[index];
- clk_writel((clk_readl(clock->reg) & mask) |
- (hw_index << clock->src_shift), clock->reg);
+ writel((readl(clock->reg) & mask) | (hw_index << clock->src_shift),
+ clock->reg);
return 0;
}
@@ -241,7 +241,7 @@ struct clk * __init cpg_div6_register(const char *name,
* Read the divisor. Disabling the clock overwrites the divisor, so we
* need to cache its value for the enable operation.
*/
- clock->div = (clk_readl(clock->reg) & CPG_DIV6_DIV_MASK) + 1;
+ clock->div = (readl(clock->reg) & CPG_DIV6_DIV_MASK) + 1;
switch (num_parents) {
case 1:
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 858c24d4da8f..e82adcb16a52 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -64,13 +64,13 @@ struct mstp_clock {
static inline u32 cpg_mstp_read(struct mstp_clock_group *group,
u32 __iomem *reg)
{
- return group->width_8bit ? readb(reg) : clk_readl(reg);
+ return group->width_8bit ? readb(reg) : readl(reg);
}
static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val,
u32 __iomem *reg)
{
- group->width_8bit ? writeb(val, reg) : clk_writel(val, reg);
+ group->width_8bit ? writeb(val, reg) : writel(val, reg);
}
static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index 28d204bb659e..7b903ce4c901 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -71,7 +71,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
if (!strcmp(name, "main")) {
- u32 ckscr = clk_readl(cpg->reg + CPG_CKSCR);
+ u32 ckscr = readl(cpg->reg + CPG_CKSCR);
switch ((ckscr >> 28) & 3) {
case 0: /* extal1 */
@@ -95,14 +95,14 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
* clock implementation and we currently have no need to change
* the multiplier value.
*/
- u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ u32 value = readl(cpg->reg + CPG_PLL0CR);
parent_name = "main";
mult = ((value >> 24) & 0x7f) + 1;
if (value & BIT(20))
div = 2;
} else if (!strcmp(name, "pll1")) {
- u32 value = clk_readl(cpg->reg + CPG_PLL1CR);
+ u32 value = readl(cpg->reg + CPG_PLL1CR);
parent_name = "main";
/* XXX: enable bit? */
@@ -125,7 +125,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
default:
return ERR_PTR(-EINVAL);
}
- value = clk_readl(cpg->reg + cr);
+ value = readl(cpg->reg + cr);
switch ((value >> 5) & 7) {
case 0:
parent_name = "main";
@@ -161,8 +161,7 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
shift = 0;
}
div *= 32;
- mult = 0x20 - ((clk_readl(cpg->reg + CPG_FRQCRC) >> shift)
- & 0x1f);
+ mult = 0x20 - ((readl(cpg->reg + CPG_FRQCRC) >> shift) & 0x1f);
} else {
struct div4_clk *c;
diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index 2f7ce6696b6c..d074f8e982d0 100644
--- a/drivers/clk/renesas/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -98,20 +98,20 @@ r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg,
* clock implementation and we currently have no need to change
* the multiplier value.
*/
- u32 value = clk_readl(cpg->reg + CPG_FRQCRC);
+ u32 value = readl(cpg->reg + CPG_FRQCRC);
parent_name = "system";
mult = ((value >> 24) & 0x7f) + 1;
} else if (!strcmp(name, "pllc1")) {
- u32 value = clk_readl(cpg->reg + CPG_FRQCRA);
+ u32 value = readl(cpg->reg + CPG_FRQCRA);
parent_name = "system";
mult = ((value >> 24) & 0x7f) + 1;
div = 2;
} else if (!strcmp(name, "pllc2")) {
- u32 value = clk_readl(cpg->reg + CPG_PLLC2CR);
+ u32 value = readl(cpg->reg + CPG_PLLC2CR);
parent_name = "system";
mult = ((value >> 24) & 0x3f) + 1;
} else if (!strcmp(name, "usb24s")) {
- u32 value = clk_readl(cpg->reg + CPG_USBCKCR);
+ u32 value = readl(cpg->reg + CPG_USBCKCR);
if (value & BIT(7))
/* extal2 */
parent_name = of_clk_get_parent_name(np, 1);
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index d14cbe1ca29a..ee32a022e6da 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -62,8 +62,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned int mult;
unsigned int val;
- val = (clk_readl(zclk->reg) & CPG_FRQCRC_ZFC_MASK)
- >> CPG_FRQCRC_ZFC_SHIFT;
+ val = (readl(zclk->reg) & CPG_FRQCRC_ZFC_MASK) >> CPG_FRQCRC_ZFC_SHIFT;
mult = 32 - val;
return div_u64((u64)parent_rate * mult, 32);
@@ -95,21 +94,21 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
mult = div_u64((u64)rate * 32, parent_rate);
mult = clamp(mult, 1U, 32U);
- if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- val = clk_readl(zclk->reg);
+ val = readl(zclk->reg);
val &= ~CPG_FRQCRC_ZFC_MASK;
val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
- clk_writel(val, zclk->reg);
+ writel(val, zclk->reg);
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
* clock change completion.
*/
- kick = clk_readl(zclk->kick_reg);
+ kick = readl(zclk->kick_reg);
kick |= CPG_FRQCRB_KICK;
- clk_writel(kick, zclk->kick_reg);
+ writel(kick, zclk->kick_reg);
/*
* Note: There is no HW information about the worst case latency.
@@ -121,7 +120,7 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
* "super" safe value.
*/
for (i = 1000; i; i--) {
- if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
return 0;
cpu_relax();
@@ -332,7 +331,7 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
mult = config->pll0_mult;
div = 3;
} else {
- u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ u32 value = readl(cpg->reg + CPG_PLL0CR);
mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
}
parent_name = "main";
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 127c58135c8f..67dd712aa723 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -75,9 +75,9 @@ rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *na
* let them run at fixed current speed and implement the details later.
*/
if (strcmp(name, "i") == 0)
- val = (clk_readl(cpg->reg + CPG_FRQCR) >> 8) & 3;
+ val = (readl(cpg->reg + CPG_FRQCR) >> 8) & 3;
else if (strcmp(name, "g") == 0)
- val = clk_readl(cpg->reg + CPG_FRQCR2) & 3;
+ val = readl(cpg->reg + CPG_FRQCR2) & 3;
else
return ERR_PTR(-EINVAL);
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index eea38f6ea77e..bab33610eb6c 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -46,7 +46,7 @@ struct div4_clk {
unsigned int shift;
};
-static struct div4_clk div4_clks[] = {
+static const struct div4_clk div4_clks[] = {
{ "zg", "pll0", CPG_FRQCRA, 16 },
{ "m3", "pll1", CPG_FRQCRA, 12 },
{ "b", "pll1", CPG_FRQCRA, 8 },
@@ -79,13 +79,13 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
{
const struct clk_div_table *table = NULL;
unsigned int shift, reg, width;
- const char *parent_name;
+ const char *parent_name = NULL;
unsigned int mult = 1;
unsigned int div = 1;
if (!strcmp(name, "main")) {
/* extal1, extal1_div2, extal2, extal2_div2 */
- u32 parent_idx = (clk_readl(cpg->reg + CPG_CKSCR) >> 28) & 3;
+ u32 parent_idx = (readl(cpg->reg + CPG_CKSCR) >> 28) & 3;
parent_name = of_clk_get_parent_name(np, parent_idx >> 1);
div = (parent_idx & 1) + 1;
@@ -110,11 +110,11 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
default:
return ERR_PTR(-EINVAL);
}
- if (clk_readl(cpg->reg + CPG_PLLECR) & BIT(enable_bit)) {
- mult = ((clk_readl(enable_reg) >> 24) & 0x3f) + 1;
+ if (readl(cpg->reg + CPG_PLLECR) & BIT(enable_bit)) {
+ mult = ((readl(enable_reg) >> 24) & 0x3f) + 1;
/* handle CFG bit for PLL1 and PLL2 */
if (enable_bit == 1 || enable_bit == 2)
- if (clk_readl(enable_reg) & BIT(20))
+ if (readl(enable_reg) & BIT(20))
mult *= 2;
}
} else if (!strcmp(name, "dsi0phy") || !strcmp(name, "dsi1phy")) {
@@ -135,7 +135,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
shift = 24;
width = 5;
} else {
- struct div4_clk *c;
+ const struct div4_clk *c;
for (c = div4_clks; c->name; c++) {
if (!strcmp(name, c->name)) {
@@ -193,9 +193,9 @@ static void __init sh73a0_cpg_clocks_init(struct device_node *np)
return;
/* Set SDHI clocks to a known state */
- clk_writel(0x108, cpg->reg + CPG_SD0CKCR);
- clk_writel(0x108, cpg->reg + CPG_SD1CKCR);
- clk_writel(0x108, cpg->reg + CPG_SD2CKCR);
+ writel(0x108, cpg->reg + CPG_SD0CKCR);
+ writel(0x108, cpg->reg + CPG_SD1CKCR);
+ writel(0x108, cpg->reg + CPG_SD2CKCR);
for (i = 0; i < num_clks; ++i) {
const char *name;
diff --git a/drivers/clk/renesas/r8a7743-cpg-mssr.c b/drivers/clk/renesas/r8a7743-cpg-mssr.c
index 6dc0b3082aa6..d3c8b1e2969f 100644
--- a/drivers/clk/renesas/r8a7743-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7743-cpg-mssr.c
@@ -117,6 +117,7 @@ static const struct mssr_mod_clk r8a7743_mod_clks[] __initconst = {
DEF_MOD("cmt1", 329, R8A7743_CLK_R),
DEF_MOD("usbhs-dmac0", 330, R8A7743_CLK_HP),
DEF_MOD("usbhs-dmac1", 331, R8A7743_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A7743_CLK_R),
DEF_MOD("irqc", 407, R8A7743_CLK_CP),
DEF_MOD("intc-sys", 408, R8A7743_CLK_ZS),
DEF_MOD("audio-dmac1", 501, R8A7743_CLK_HP),
@@ -195,6 +196,7 @@ static const struct mssr_mod_clk r8a7743_mod_clks[] __initconst = {
};
static const unsigned int r8a7743_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
MOD_CLK_ID(408), /* INTC-SYS (GIC) */
};
diff --git a/drivers/clk/renesas/r8a7745-cpg-mssr.c b/drivers/clk/renesas/r8a7745-cpg-mssr.c
index 2859504cc866..87f5a3619e4f 100644
--- a/drivers/clk/renesas/r8a7745-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7745-cpg-mssr.c
@@ -114,6 +114,7 @@ static const struct mssr_mod_clk r8a7745_mod_clks[] __initconst = {
DEF_MOD("cmt1", 329, R8A7745_CLK_R),
DEF_MOD("usbhs-dmac0", 330, R8A7745_CLK_HP),
DEF_MOD("usbhs-dmac1", 331, R8A7745_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A7745_CLK_R),
DEF_MOD("irqc", 407, R8A7745_CLK_CP),
DEF_MOD("intc-sys", 408, R8A7745_CLK_ZS),
DEF_MOD("audio-dmac0", 502, R8A7745_CLK_HP),
@@ -180,6 +181,7 @@ static const struct mssr_mod_clk r8a7745_mod_clks[] __initconst = {
};
static const unsigned int r8a7745_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
MOD_CLK_ID(408), /* INTC-SYS (GIC) */
};
diff --git a/drivers/clk/renesas/r8a7790-cpg-mssr.c b/drivers/clk/renesas/r8a7790-cpg-mssr.c
index 46bb55bb223d..f936cb74b681 100644
--- a/drivers/clk/renesas/r8a7790-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7790-cpg-mssr.c
@@ -140,6 +140,7 @@ static const struct mssr_mod_clk r8a7790_mod_clks[] __initconst = {
DEF_MOD("cmt1", 329, R8A7790_CLK_R),
DEF_MOD("usbhs-dmac0", 330, R8A7790_CLK_HP),
DEF_MOD("usbhs-dmac1", 331, R8A7790_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A7790_CLK_R),
DEF_MOD("irqc", 407, R8A7790_CLK_CP),
DEF_MOD("intc-sys", 408, R8A7790_CLK_ZS),
DEF_MOD("audio-dmac1", 501, R8A7790_CLK_HP),
@@ -211,6 +212,7 @@ static const struct mssr_mod_clk r8a7790_mod_clks[] __initconst = {
};
static const unsigned int r8a7790_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
MOD_CLK_ID(408), /* INTC-SYS (GIC) */
};
diff --git a/drivers/clk/renesas/r8a7791-cpg-mssr.c b/drivers/clk/renesas/r8a7791-cpg-mssr.c
index c0b51f9bb278..820b220b09cc 100644
--- a/drivers/clk/renesas/r8a7791-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7791-cpg-mssr.c
@@ -128,6 +128,7 @@ static const struct mssr_mod_clk r8a7791_mod_clks[] __initconst = {
DEF_MOD("cmt1", 329, R8A7791_CLK_R),
DEF_MOD("usbhs-dmac0", 330, R8A7791_CLK_HP),
DEF_MOD("usbhs-dmac1", 331, R8A7791_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A7791_CLK_R),
DEF_MOD("irqc", 407, R8A7791_CLK_CP),
DEF_MOD("intc-sys", 408, R8A7791_CLK_ZS),
DEF_MOD("audio-dmac1", 501, R8A7791_CLK_HP),
@@ -209,6 +210,7 @@ static const struct mssr_mod_clk r8a7791_mod_clks[] __initconst = {
};
static const unsigned int r8a7791_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
MOD_CLK_ID(408), /* INTC-SYS (GIC) */
};
diff --git a/drivers/clk/renesas/r8a7792-cpg-mssr.c b/drivers/clk/renesas/r8a7792-cpg-mssr.c
index 7f85bbf20bf7..609a54080496 100644
--- a/drivers/clk/renesas/r8a7792-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7792-cpg-mssr.c
@@ -98,6 +98,7 @@ static const struct mssr_mod_clk r8a7792_mod_clks[] __initconst = {
DEF_MOD("tpu0", 304, R8A7792_CLK_CP),
DEF_MOD("sdhi0", 314, R8A7792_CLK_SD),
DEF_MOD("cmt1", 329, R8A7792_CLK_R),
+ DEF_MOD("rwdt", 402, R8A7792_CLK_R),
DEF_MOD("irqc", 407, R8A7792_CLK_CP),
DEF_MOD("intc-sys", 408, R8A7792_CLK_ZS),
DEF_MOD("audio-dmac0", 502, R8A7792_CLK_HP),
@@ -154,6 +155,7 @@ static const struct mssr_mod_clk r8a7792_mod_clks[] __initconst = {
};
static const unsigned int r8a7792_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
MOD_CLK_ID(408), /* INTC-SYS (GIC) */
};
diff --git a/drivers/clk/renesas/r8a7794-cpg-mssr.c b/drivers/clk/renesas/r8a7794-cpg-mssr.c
index ec091a42da54..2a40bbeaeeaf 100644
--- a/drivers/clk/renesas/r8a7794-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7794-cpg-mssr.c
@@ -121,6 +121,7 @@ static const struct mssr_mod_clk r8a7794_mod_clks[] __initconst = {
DEF_MOD("cmt1", 329, R8A7794_CLK_R),
DEF_MOD("usbhs-dmac0", 330, R8A7794_CLK_HP),
DEF_MOD("usbhs-dmac1", 331, R8A7794_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A7794_CLK_R),
DEF_MOD("irqc", 407, R8A7794_CLK_CP),
DEF_MOD("intc-sys", 408, R8A7794_CLK_ZS),
DEF_MOD("audio-dmac0", 502, R8A7794_CLK_HP),
@@ -190,6 +191,7 @@ static const struct mssr_mod_clk r8a7794_mod_clks[] __initconst = {
};
static const unsigned int r8a7794_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
MOD_CLK_ID(408), /* INTC-SYS (GIC) */
};
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index b1d9f48eae9e..775b0ceaa337 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -74,6 +74,8 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
/* Core Clock Outputs */
+ DEF_BASE("z", R8A7795_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
+ DEF_BASE("z2", R8A7795_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
DEF_FIXED("ztr", R8A7795_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A7795_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A7795_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 41e29734126b..dfb267a92f2a 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -74,6 +74,8 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
/* Core Clock Outputs */
+ DEF_BASE("z", R8A7796_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
+ DEF_BASE("z2", R8A7796_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
DEF_FIXED("ztr", R8A7796_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A7796_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A7796_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
new file mode 100644
index 000000000000..b1acfb60351c
--- /dev/null
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a77965 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ *
+ * Based on r8a7795-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a77965-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A77965_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL4,
+ CLK_PLL1_DIV2,
+ CLK_PLL1_DIV4,
+ CLK_S0,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+ CLK_SSPSRC,
+ CLK_RINT,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+ DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
+ DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("z", R8A77965_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
+ DEF_FIXED("ztr", R8A77965_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED("ztrd2", R8A77965_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
+ DEF_FIXED("zt", R8A77965_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("zx", R8A77965_CLK_ZX, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED("s0d1", R8A77965_CLK_S0D1, CLK_S0, 1, 1),
+ DEF_FIXED("s0d2", R8A77965_CLK_S0D2, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3", R8A77965_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4", R8A77965_CLK_S0D4, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6", R8A77965_CLK_S0D6, CLK_S0, 6, 1),
+ DEF_FIXED("s0d8", R8A77965_CLK_S0D8, CLK_S0, 8, 1),
+ DEF_FIXED("s0d12", R8A77965_CLK_S0D12, CLK_S0, 12, 1),
+ DEF_FIXED("s1d1", R8A77965_CLK_S1D1, CLK_S1, 1, 1),
+ DEF_FIXED("s1d2", R8A77965_CLK_S1D2, CLK_S1, 2, 1),
+ DEF_FIXED("s1d4", R8A77965_CLK_S1D4, CLK_S1, 4, 1),
+ DEF_FIXED("s2d1", R8A77965_CLK_S2D1, CLK_S2, 1, 1),
+ DEF_FIXED("s2d2", R8A77965_CLK_S2D2, CLK_S2, 2, 1),
+ DEF_FIXED("s2d4", R8A77965_CLK_S2D4, CLK_S2, 4, 1),
+ DEF_FIXED("s3d1", R8A77965_CLK_S3D1, CLK_S3, 1, 1),
+ DEF_FIXED("s3d2", R8A77965_CLK_S3D2, CLK_S3, 2, 1),
+ DEF_FIXED("s3d4", R8A77965_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_GEN3_SD("sd0", R8A77965_CLK_SD0, CLK_SDSRC, 0x074),
+ DEF_GEN3_SD("sd1", R8A77965_CLK_SD1, CLK_SDSRC, 0x078),
+ DEF_GEN3_SD("sd2", R8A77965_CLK_SD2, CLK_SDSRC, 0x268),
+ DEF_GEN3_SD("sd3", R8A77965_CLK_SD3, CLK_SDSRC, 0x26c),
+
+ DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cp", R8A77965_CLK_CP, CLK_EXTAL, 2, 1),
+
+ DEF_DIV6P1("canfd", R8A77965_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
+ DEF_DIV6P1("csi0", R8A77965_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
+ DEF_DIV6P1("mso", R8A77965_CLK_MSO, CLK_PLL1_DIV4, 0x014),
+ DEF_DIV6P1("hdmi", R8A77965_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
+
+ DEF_DIV6_RO("osc", R8A77965_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
+ DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
+
+ DEF_BASE("r", R8A77965_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
+};
+
+static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
+ DEF_MOD("scif5", 202, R8A77965_CLK_S3D4),
+ DEF_MOD("scif4", 203, R8A77965_CLK_S3D4),
+ DEF_MOD("scif3", 204, R8A77965_CLK_S3D4),
+ DEF_MOD("scif1", 206, R8A77965_CLK_S3D4),
+ DEF_MOD("scif0", 207, R8A77965_CLK_S3D4),
+ DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S0D3),
+ DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S0D3),
+ DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3),
+
+ DEF_MOD("cmt3", 300, R8A77965_CLK_R),
+ DEF_MOD("cmt2", 301, R8A77965_CLK_R),
+ DEF_MOD("cmt1", 302, R8A77965_CLK_R),
+ DEF_MOD("cmt0", 303, R8A77965_CLK_R),
+ DEF_MOD("scif2", 310, R8A77965_CLK_S3D4),
+ DEF_MOD("sdif3", 311, R8A77965_CLK_SD3),
+ DEF_MOD("sdif2", 312, R8A77965_CLK_SD2),
+ DEF_MOD("sdif1", 313, R8A77965_CLK_SD1),
+ DEF_MOD("sdif0", 314, R8A77965_CLK_SD0),
+ DEF_MOD("pcie1", 318, R8A77965_CLK_S3D1),
+ DEF_MOD("pcie0", 319, R8A77965_CLK_S3D1),
+ DEF_MOD("usb3-if0", 328, R8A77965_CLK_S3D1),
+ DEF_MOD("usb-dmac0", 330, R8A77965_CLK_S3D1),
+ DEF_MOD("usb-dmac1", 331, R8A77965_CLK_S3D1),
+
+ DEF_MOD("rwdt", 402, R8A77965_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A77965_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A77965_CLK_S0D3),
+
+ DEF_MOD("audmac1", 501, R8A77965_CLK_S0D3),
+ DEF_MOD("audmac0", 502, R8A77965_CLK_S0D3),
+ DEF_MOD("drif7", 508, R8A77965_CLK_S3D2),
+ DEF_MOD("drif6", 509, R8A77965_CLK_S3D2),
+ DEF_MOD("drif5", 510, R8A77965_CLK_S3D2),
+ DEF_MOD("drif4", 511, R8A77965_CLK_S3D2),
+ DEF_MOD("drif3", 512, R8A77965_CLK_S3D2),
+ DEF_MOD("drif2", 513, R8A77965_CLK_S3D2),
+ DEF_MOD("drif1", 514, R8A77965_CLK_S3D2),
+ DEF_MOD("drif0", 515, R8A77965_CLK_S3D2),
+ DEF_MOD("hscif4", 516, R8A77965_CLK_S3D1),
+ DEF_MOD("hscif3", 517, R8A77965_CLK_S3D1),
+ DEF_MOD("hscif2", 518, R8A77965_CLK_S3D1),
+ DEF_MOD("hscif1", 519, R8A77965_CLK_S3D1),
+ DEF_MOD("hscif0", 520, R8A77965_CLK_S3D1),
+ DEF_MOD("thermal", 522, R8A77965_CLK_CP),
+ DEF_MOD("pwm", 523, R8A77965_CLK_S0D12),
+
+ DEF_MOD("fcpvd1", 602, R8A77965_CLK_S0D2),
+ DEF_MOD("fcpvd0", 603, R8A77965_CLK_S0D2),
+ DEF_MOD("fcpvb0", 607, R8A77965_CLK_S0D1),
+ DEF_MOD("fcpvi0", 611, R8A77965_CLK_S0D1),
+ DEF_MOD("fcpf0", 615, R8A77965_CLK_S0D1),
+ DEF_MOD("fcpcs", 619, R8A77965_CLK_S0D2),
+ DEF_MOD("vspd1", 622, R8A77965_CLK_S0D2),
+ DEF_MOD("vspd0", 623, R8A77965_CLK_S0D2),
+ DEF_MOD("vspb", 626, R8A77965_CLK_S0D1),
+ DEF_MOD("vspi0", 631, R8A77965_CLK_S0D1),
+
+ DEF_MOD("ehci1", 702, R8A77965_CLK_S3D4),
+ DEF_MOD("ehci0", 703, R8A77965_CLK_S3D4),
+ DEF_MOD("hsusb", 704, R8A77965_CLK_S3D4),
+ DEF_MOD("csi20", 714, R8A77965_CLK_CSI0),
+ DEF_MOD("csi40", 716, R8A77965_CLK_CSI0),
+ DEF_MOD("du3", 721, R8A77965_CLK_S2D1),
+ DEF_MOD("du1", 723, R8A77965_CLK_S2D1),
+ DEF_MOD("du0", 724, R8A77965_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A77965_CLK_S2D1),
+ DEF_MOD("hdmi0", 729, R8A77965_CLK_HDMI),
+
+ DEF_MOD("vin7", 804, R8A77965_CLK_S0D2),
+ DEF_MOD("vin6", 805, R8A77965_CLK_S0D2),
+ DEF_MOD("vin5", 806, R8A77965_CLK_S0D2),
+ DEF_MOD("vin4", 807, R8A77965_CLK_S0D2),
+ DEF_MOD("vin3", 808, R8A77965_CLK_S0D2),
+ DEF_MOD("vin2", 809, R8A77965_CLK_S0D2),
+ DEF_MOD("vin1", 810, R8A77965_CLK_S0D2),
+ DEF_MOD("vin0", 811, R8A77965_CLK_S0D2),
+ DEF_MOD("etheravb", 812, R8A77965_CLK_S0D6),
+ DEF_MOD("imr1", 822, R8A77965_CLK_S0D2),
+ DEF_MOD("imr0", 823, R8A77965_CLK_S0D2),
+
+ DEF_MOD("gpio7", 905, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio6", 906, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio5", 907, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio4", 908, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio3", 909, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio2", 910, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio1", 911, R8A77965_CLK_S3D4),
+ DEF_MOD("gpio0", 912, R8A77965_CLK_S3D4),
+ DEF_MOD("can-fd", 914, R8A77965_CLK_S3D2),
+ DEF_MOD("can-if1", 915, R8A77965_CLK_S3D4),
+ DEF_MOD("can-if0", 916, R8A77965_CLK_S3D4),
+ DEF_MOD("i2c6", 918, R8A77965_CLK_S0D6),
+ DEF_MOD("i2c5", 919, R8A77965_CLK_S0D6),
+ DEF_MOD("i2c-dvfs", 926, R8A77965_CLK_CP),
+ DEF_MOD("i2c4", 927, R8A77965_CLK_S0D6),
+ DEF_MOD("i2c3", 928, R8A77965_CLK_S0D6),
+ DEF_MOD("i2c2", 929, R8A77965_CLK_S3D2),
+ DEF_MOD("i2c1", 930, R8A77965_CLK_S3D2),
+ DEF_MOD("i2c0", 931, R8A77965_CLK_S3D2),
+
+ DEF_MOD("ssi-all", 1005, R8A77965_CLK_S3D4),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A77965_CLK_S3D4),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+};
+
+static const unsigned int r8a77965_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3 PLL4
+ * 14 13 19 17 (MHz)
+ *-----------------------------------------------------------
+ * 0 0 0 0 16.66 x 1 x180 x192 x192 x144
+ * 0 0 0 1 16.66 x 1 x180 x192 x128 x144
+ * 0 0 1 0 Prohibited setting
+ * 0 0 1 1 16.66 x 1 x180 x192 x192 x144
+ * 0 1 0 0 20 x 1 x150 x160 x160 x120
+ * 0 1 0 1 20 x 1 x150 x160 x106 x120
+ * 0 1 1 0 Prohibited setting
+ * 0 1 1 1 20 x 1 x150 x160 x160 x120
+ * 1 0 0 0 25 x 1 x120 x128 x128 x96
+ * 1 0 0 1 25 x 1 x120 x128 x84 x96
+ * 1 0 1 0 Prohibited setting
+ * 1 0 1 1 25 x 1 x120 x128 x128 x96
+ * 1 1 0 0 33.33 / 2 x180 x192 x192 x144
+ * 1 1 0 1 33.33 / 2 x180 x192 x128 x144
+ * 1 1 1 0 Prohibited setting
+ * 1 1 1 1 33.33 / 2 x180 x192 x192 x144
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \
+ (((md) & BIT(13)) >> 11) | \
+ (((md) & BIT(19)) >> 18) | \
+ (((md) & BIT(17)) >> 17))
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL3 mult/div */
+ { 1, 192, 1, 192, 1, },
+ { 1, 192, 1, 128, 1, },
+ { 0, /* Prohibited setting */ },
+ { 1, 192, 1, 192, 1, },
+ { 1, 160, 1, 160, 1, },
+ { 1, 160, 1, 106, 1, },
+ { 0, /* Prohibited setting */ },
+ { 1, 160, 1, 160, 1, },
+ { 1, 128, 1, 128, 1, },
+ { 1, 128, 1, 84, 1, },
+ { 0, /* Prohibited setting */ },
+ { 1, 128, 1, 128, 1, },
+ { 2, 192, 1, 192, 1, },
+ { 2, 192, 1, 128, 1, },
+ { 0, /* Prohibited setting */ },
+ { 2, 192, 1, 192, 1, },
+};
+
+static int __init r8a77965_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+ if (!cpg_pll_config->extal_div) {
+ dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode);
+ return -EINVAL;
+ }
+
+ return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+};
+
+const struct cpg_mssr_info r8a77965_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a77965_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a77965_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a77965_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a77965_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a77965_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a77965_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a77965_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
new file mode 100644
index 000000000000..7aaae73a321a
--- /dev/null
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a77980 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018 Cogent Embedded, Inc.
+ *
+ * Based on r8a7795-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+#include <linux/sys_soc.h>
+
+#include <dt-bindings/clock/r8a77980-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A77980_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+ CLK_PLL1_DIV4,
+ CLK_S0,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN3_PLL2, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
+ DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED("ztrd2", R8A77980_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
+ DEF_FIXED("zt", R8A77980_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("zx", R8A77980_CLK_ZX, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED("s0d1", R8A77980_CLK_S0D1, CLK_S0, 1, 1),
+ DEF_FIXED("s0d2", R8A77980_CLK_S0D2, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3", R8A77980_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4", R8A77980_CLK_S0D4, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6", R8A77980_CLK_S0D6, CLK_S0, 6, 1),
+ DEF_FIXED("s0d12", R8A77980_CLK_S0D12, CLK_S0, 12, 1),
+ DEF_FIXED("s0d24", R8A77980_CLK_S0D24, CLK_S0, 24, 1),
+ DEF_FIXED("s1d1", R8A77980_CLK_S1D1, CLK_S1, 1, 1),
+ DEF_FIXED("s1d2", R8A77980_CLK_S1D2, CLK_S1, 2, 1),
+ DEF_FIXED("s1d4", R8A77980_CLK_S1D4, CLK_S1, 4, 1),
+ DEF_FIXED("s2d1", R8A77980_CLK_S2D1, CLK_S2, 1, 1),
+ DEF_FIXED("s2d2", R8A77980_CLK_S2D2, CLK_S2, 2, 1),
+ DEF_FIXED("s2d4", R8A77980_CLK_S2D4, CLK_S2, 4, 1),
+ DEF_FIXED("s3d1", R8A77980_CLK_S3D1, CLK_S3, 1, 1),
+ DEF_FIXED("s3d2", R8A77980_CLK_S3D2, CLK_S3, 2, 1),
+ DEF_FIXED("s3d4", R8A77980_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_GEN3_SD("sd0", R8A77980_CLK_SD0, CLK_SDSRC, 0x0074),
+
+ DEF_FIXED("cl", R8A77980_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cp", R8A77980_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A77980_CLK_CPEX, CLK_EXTAL, 2, 1),
+
+ DEF_DIV6P1("canfd", R8A77980_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
+ DEF_DIV6P1("csi0", R8A77980_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
+ DEF_DIV6P1("mso", R8A77980_CLK_MSO, CLK_PLL1_DIV4, 0x014),
+};
+
+static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
+ DEF_MOD("tmu4", 121, R8A77980_CLK_S0D6),
+ DEF_MOD("tmu3", 122, R8A77980_CLK_S0D6),
+ DEF_MOD("tmu2", 123, R8A77980_CLK_S0D6),
+ DEF_MOD("tmu1", 124, R8A77980_CLK_S0D6),
+ DEF_MOD("tmu0", 125, R8A77980_CLK_CP),
+ DEF_MOD("scif4", 203, R8A77980_CLK_S3D4),
+ DEF_MOD("scif3", 204, R8A77980_CLK_S3D4),
+ DEF_MOD("scif1", 206, R8A77980_CLK_S3D4),
+ DEF_MOD("scif0", 207, R8A77980_CLK_S3D4),
+ DEF_MOD("msiof3", 208, R8A77980_CLK_MSO),
+ DEF_MOD("msiof2", 209, R8A77980_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A77980_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A77980_CLK_MSO),
+ DEF_MOD("sys-dmac2", 217, R8A77980_CLK_S0D3),
+ DEF_MOD("sys-dmac1", 218, R8A77980_CLK_S0D3),
+ DEF_MOD("tpu0", 304, R8A77980_CLK_S3D4),
+ DEF_MOD("sdif", 314, R8A77980_CLK_SD0),
+ DEF_MOD("pciec0", 319, R8A77980_CLK_S3D1),
+ DEF_MOD("intc-ex", 407, R8A77980_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A77980_CLK_S0D3),
+ DEF_MOD("hscif3", 517, R8A77980_CLK_S3D1),
+ DEF_MOD("hscif2", 518, R8A77980_CLK_S3D1),
+ DEF_MOD("hscif1", 519, R8A77980_CLK_S3D1),
+ DEF_MOD("hscif0", 520, R8A77980_CLK_S3D1),
+ DEF_MOD("imp4", 521, R8A77980_CLK_S1D1),
+ DEF_MOD("thermal", 522, R8A77980_CLK_CP),
+ DEF_MOD("pwm", 523, R8A77980_CLK_S0D12),
+ DEF_MOD("impdma1", 526, R8A77980_CLK_S1D1),
+ DEF_MOD("impdma0", 527, R8A77980_CLK_S1D1),
+ DEF_MOD("imp-ocv4", 528, R8A77980_CLK_S1D1),
+ DEF_MOD("imp-ocv3", 529, R8A77980_CLK_S1D1),
+ DEF_MOD("imp-ocv2", 531, R8A77980_CLK_S1D1),
+ DEF_MOD("fcpvd0", 603, R8A77980_CLK_S3D1),
+ DEF_MOD("vspd0", 623, R8A77980_CLK_S3D1),
+ DEF_MOD("csi41", 715, R8A77980_CLK_CSI0),
+ DEF_MOD("csi40", 716, R8A77980_CLK_CSI0),
+ DEF_MOD("du0", 724, R8A77980_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A77980_CLK_S2D1),
+ DEF_MOD("etheravb", 812, R8A77980_CLK_S3D2),
+ DEF_MOD("gether", 813, R8A77980_CLK_S3D2),
+ DEF_MOD("imp3", 824, R8A77980_CLK_S1D1),
+ DEF_MOD("imp2", 825, R8A77980_CLK_S1D1),
+ DEF_MOD("imp1", 826, R8A77980_CLK_S1D1),
+ DEF_MOD("imp0", 827, R8A77980_CLK_S1D1),
+ DEF_MOD("imp-ocv1", 828, R8A77980_CLK_S1D1),
+ DEF_MOD("imp-ocv0", 829, R8A77980_CLK_S1D1),
+ DEF_MOD("impram", 830, R8A77980_CLK_S1D1),
+ DEF_MOD("impcnn", 831, R8A77980_CLK_S1D1),
+ DEF_MOD("gpio5", 907, R8A77980_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A77980_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A77980_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A77980_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A77980_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A77980_CLK_CP),
+ DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2),
+ DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6),
+ DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6),
+ DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2),
+ DEF_MOD("i2c1", 930, R8A77980_CLK_S3D2),
+ DEF_MOD("i2c0", 931, R8A77980_CLK_S3D2),
+};
+
+static const unsigned int r8a77980_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL2 PLL1 PLL3
+ * 14 13 (MHz)
+ * --------------------------------------------------
+ * 0 0 16.66 x 1 x240 x192 x192
+ * 0 1 20 x 1 x200 x160 x160
+ * 1 0 27 x 1 x148 x118 x118
+ * 1 1 33.33 / 2 x240 x192 x192
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
+ (((md) & BIT(13)) >> 13))
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[4] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL3 mult/div */
+ { 1, 192, 1, 192, 1, },
+ { 1, 160, 1, 160, 1, },
+ { 1, 118, 1, 118, 1, },
+ { 2, 192, 1, 192, 1, },
+};
+
+static int __init r8a77980_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a77980_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a77980_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a77980_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a77980_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a77980_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a77980_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a77980_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a77980_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 0904886f5501..628b63b85d3f 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -13,6 +13,7 @@
*/
#include <linux/bug.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
@@ -62,6 +63,140 @@ static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
}
/*
+ * Z Clock & Z2 Clock
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is adjustable. clk->rate = (parent->rate * mult / 32 ) / 2
+ * parent - fixed parent. No clk_set_parent support
+ */
+#define CPG_FRQCRB 0x00000004
+#define CPG_FRQCRB_KICK BIT(31)
+#define CPG_FRQCRC 0x000000e0
+#define CPG_FRQCRC_ZFC_MASK GENMASK(12, 8)
+#define CPG_FRQCRC_Z2FC_MASK GENMASK(4, 0)
+
+struct cpg_z_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+ void __iomem *kick_reg;
+ unsigned long mask;
+};
+
+#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
+
+static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ u32 val;
+
+ val = readl(zclk->reg) & zclk->mask;
+ mult = 32 - (val >> __ffs(zclk->mask));
+
+ /* Factor of 2 is for fixed divider */
+ return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, 32 * 2);
+}
+
+static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /* Factor of 2 is for fixed divider */
+ unsigned long prate = *parent_rate / 2;
+ unsigned int mult;
+
+ mult = div_u64(rate * 32ULL, prate);
+ mult = clamp(mult, 1U, 32U);
+
+ return (u64)prate * mult / 32;
+}
+
+static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ unsigned int i;
+ u32 val, kick;
+
+ /* Factor of 2 is for fixed divider */
+ mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate);
+ mult = clamp(mult, 1U, 32U);
+
+ if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ return -EBUSY;
+
+ val = readl(zclk->reg) & ~zclk->mask;
+ val |= ((32 - mult) << __ffs(zclk->mask)) & zclk->mask;
+ writel(val, zclk->reg);
+
+ /*
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
+ * clock change completion.
+ */
+ kick = readl(zclk->kick_reg);
+ kick |= CPG_FRQCRB_KICK;
+ writel(kick, zclk->kick_reg);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~10 iterations are needed, independently of the CPU rate.
+ * Since this value might be dependent of external xtal rate, pll1
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ for (i = 1000; i; i--) {
+ if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct clk_ops cpg_z_clk_ops = {
+ .recalc_rate = cpg_z_clk_recalc_rate,
+ .round_rate = cpg_z_clk_round_rate,
+ .set_rate = cpg_z_clk_set_rate,
+};
+
+static struct clk * __init cpg_z_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *reg,
+ unsigned long mask)
+{
+ struct clk_init_data init;
+ struct cpg_z_clk *zclk;
+ struct clk *clk;
+
+ zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
+ if (!zclk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &cpg_z_clk_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ zclk->reg = reg + CPG_FRQCRC;
+ zclk->kick_reg = reg + CPG_FRQCRB;
+ zclk->hw.init = &init;
+ zclk->mask = mask;
+
+ clk = clk_register(NULL, &zclk->hw);
+ if (IS_ERR(clk))
+ kfree(zclk);
+
+ return clk;
+}
+
+/*
* SDn Clock
*/
#define CPG_SD_STP_HCK BIT(9)
@@ -420,6 +555,14 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
mult = 1;
break;
+ case CLK_TYPE_GEN3_Z:
+ return cpg_z_clk_register(core->name, __clk_get_name(parent),
+ base, CPG_FRQCRC_ZFC_MASK);
+
+ case CLK_TYPE_GEN3_Z2:
+ return cpg_z_clk_register(core->name, __clk_get_name(parent),
+ base, CPG_FRQCRC_Z2FC_MASK);
+
default:
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index 2e4284399f53..ea4f8fc3c4c9 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -21,6 +21,8 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_SD,
CLK_TYPE_GEN3_R,
CLK_TYPE_GEN3_PE,
+ CLK_TYPE_GEN3_Z,
+ CLK_TYPE_GEN3_Z2,
};
#define DEF_GEN3_SD(_name, _id, _parent, _offset) \
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index e3cc72c81311..4e88e980fb76 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -693,12 +693,24 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a7796_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A77965
+ {
+ .compatible = "renesas,r8a77965-cpg-mssr",
+ .data = &r8a77965_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A77970
{
.compatible = "renesas,r8a77970-cpg-mssr",
.data = &r8a77970_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A77980
+ {
+ .compatible = "renesas,r8a77980-cpg-mssr",
+ .data = &r8a77980_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A77995
{
.compatible = "renesas,r8a77995-cpg-mssr",
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 0745b0930308..97ccb093c10f 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -139,7 +139,9 @@ extern const struct cpg_mssr_info r8a7792_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7794_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7795_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7796_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a77965_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77970_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a77980_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 077fcdc7908b..026a26bb702d 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -25,6 +25,8 @@ struct rockchip_mmc_clock {
void __iomem *reg;
int id;
int shift;
+ int cached_phase;
+ struct notifier_block clk_rate_change_nb;
};
#define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw)
@@ -58,6 +60,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
u16 degrees;
u32 delay_num = 0;
+ /* See the comment for rockchip_mmc_set_phase below */
+ if (!rate) {
+ pr_err("%s: invalid clk rate\n", __func__);
+ return -EINVAL;
+ }
+
raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
@@ -84,6 +92,23 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
u32 raw_value;
u32 delay;
+ /*
+ * The below calculation is based on the output clock from
+ * MMC host to the card, which expects the phase clock inherits
+ * the clock rate from its parent, namely the output clock
+ * provider of MMC host. However, things may go wrong if
+ * (1) It is orphan.
+ * (2) It is assigned to the wrong parent.
+ *
+ * This check help debug the case (1), which seems to be the
+ * most likely problem we often face and which makes it difficult
+ * for people to debug unstable mmc tuning results.
+ */
+ if (!rate) {
+ pr_err("%s: invalid clk rate\n", __func__);
+ return -EINVAL;
+ }
+
nineties = degrees / 90;
remainder = (degrees % 90);
@@ -139,6 +164,41 @@ static const struct clk_ops rockchip_mmc_clk_ops = {
.set_phase = rockchip_mmc_set_phase,
};
+#define to_rockchip_mmc_clock(x) \
+ container_of(x, struct rockchip_mmc_clock, clk_rate_change_nb)
+static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct rockchip_mmc_clock *mmc_clock = to_rockchip_mmc_clock(nb);
+ struct clk_notifier_data *ndata = data;
+
+ /*
+ * rockchip_mmc_clk is mostly used by mmc controllers to sample
+ * the intput data, which expects the fixed phase after the tuning
+ * process. However if the clock rate is changed, the phase is stale
+ * and may break the data sampling. So here we try to restore the phase
+ * for that case, except that
+ * (1) cached_phase is invaild since we inevitably cached it when the
+ * clock provider be reparented from orphan to its real parent in the
+ * first place. Otherwise we may mess up the initialization of MMC cards
+ * since we only set the default sample phase and drive phase later on.
+ * (2) the new coming rate is higher than the older one since mmc driver
+ * set the max-frequency to match the boards' ability but we can't go
+ * over the heads of that, otherwise the tests smoke out the issue.
+ */
+ if (ndata->old_rate <= ndata->new_rate)
+ return NOTIFY_DONE;
+
+ if (event == PRE_RATE_CHANGE)
+ mmc_clock->cached_phase =
+ rockchip_mmc_get_phase(&mmc_clock->hw);
+ else if (mmc_clock->cached_phase != -EINVAL &&
+ event == POST_RATE_CHANGE)
+ rockchip_mmc_set_phase(&mmc_clock->hw, mmc_clock->cached_phase);
+
+ return NOTIFY_DONE;
+}
+
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
void __iomem *reg, int shift)
@@ -146,6 +206,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
struct clk_init_data init;
struct rockchip_mmc_clock *mmc_clock;
struct clk *clk;
+ int ret;
mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL);
if (!mmc_clock)
@@ -162,8 +223,21 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock->shift = shift;
clk = clk_register(NULL, &mmc_clock->hw);
- if (IS_ERR(clk))
- kfree(mmc_clock);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err_register;
+ }
+
+ mmc_clock->clk_rate_change_nb.notifier_call =
+ &rockchip_mmc_clk_rate_notify;
+ ret = clk_notifier_register(clk, &mmc_clock->clk_rate_change_nb);
+ if (ret)
+ goto err_notifier;
return clk;
+err_notifier:
+ clk_unregister(clk);
+err_register:
+ kfree(mmc_clock);
+ return ERR_PTR(ret);
}
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index 11e7f2d1c054..7af48184b022 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -387,7 +387,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(23), 5, 2, MFLAGS, 0, 6, DFLAGS,
RK2928_CLKGATE_CON(2), 15, GFLAGS),
- COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
+ COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
RK2928_CLKSEL_CON(11), 8, 2, MFLAGS, 0, 8, DFLAGS,
RK2928_CLKGATE_CON(2), 11, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index b04f29774ee7..252366a5231f 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -304,7 +304,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED,
RK3328_CLKSEL_CON(1), 4, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3328_CLKGATE_CON(7), 1, GFLAGS),
- GATE(0, "aclk_core_niu", "aclk_core", CLK_IGNORE_UNUSED,
+ GATE(0, "aclk_core_niu", "aclk_core", 0,
RK3328_CLKGATE_CON(13), 0, GFLAGS),
GATE(0, "aclk_gic400", "aclk_core", CLK_IGNORE_UNUSED,
RK3328_CLKGATE_CON(13), 1, GFLAGS),
@@ -318,7 +318,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKGATE_CON(6), 6, GFLAGS),
GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", CLK_SET_RATE_PARENT,
RK3328_CLKGATE_CON(14), 0, GFLAGS),
- GATE(0, "aclk_gpu_niu", "aclk_gpu_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "aclk_gpu_niu", "aclk_gpu_pre", 0,
RK3328_CLKGATE_CON(14), 1, GFLAGS),
/* PD_DDR */
@@ -513,9 +513,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKGATE_CON(24), 0, GFLAGS),
GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_pre", CLK_SET_RATE_PARENT,
RK3328_CLKGATE_CON(24), 1, GFLAGS),
- GATE(0, "aclk_rkvdec_niu", "aclk_rkvdec_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "aclk_rkvdec_niu", "aclk_rkvdec_pre", 0,
RK3328_CLKGATE_CON(24), 2, GFLAGS),
- GATE(0, "hclk_rkvdec_niu", "hclk_rkvdec_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "hclk_rkvdec_niu", "hclk_rkvdec_pre", 0,
RK3328_CLKGATE_CON(24), 3, GFLAGS),
COMPOSITE(SCLK_VDEC_CABAC, "sclk_vdec_cabac", mux_4plls_p, 0,
@@ -535,9 +535,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKGATE_CON(23), 0, GFLAGS),
GATE(HCLK_VPU, "hclk_vpu", "hclk_vpu_pre", CLK_SET_RATE_PARENT,
RK3328_CLKGATE_CON(23), 1, GFLAGS),
- GATE(0, "aclk_vpu_niu", "aclk_vpu_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "aclk_vpu_niu", "aclk_vpu_pre", 0,
RK3328_CLKGATE_CON(23), 2, GFLAGS),
- GATE(0, "hclk_vpu_niu", "hclk_vpu_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "hclk_vpu_niu", "hclk_vpu_pre", 0,
RK3328_CLKGATE_CON(23), 3, GFLAGS),
COMPOSITE(ACLK_RKVENC, "aclk_rkvenc", mux_4plls_p, 0,
@@ -545,9 +545,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKGATE_CON(6), 3, GFLAGS),
FACTOR_GATE(HCLK_RKVENC, "hclk_rkvenc", "aclk_rkvenc", 0, 1, 4,
RK3328_CLKGATE_CON(11), 4, GFLAGS),
- GATE(0, "aclk_rkvenc_niu", "aclk_rkvenc", CLK_IGNORE_UNUSED,
+ GATE(0, "aclk_rkvenc_niu", "aclk_rkvenc", 0,
RK3328_CLKGATE_CON(25), 0, GFLAGS),
- GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", CLK_IGNORE_UNUSED,
+ GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", 0,
RK3328_CLKGATE_CON(25), 1, GFLAGS),
GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0,
RK3328_CLKGATE_CON(25), 0, GFLAGS),
@@ -588,7 +588,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
COMPOSITE(ACLK_VOP_PRE, "aclk_vop_pre", mux_4plls_p, 0,
RK3328_CLKSEL_CON(39), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3328_CLKGATE_CON(5), 5, GFLAGS),
- GATE(0, "clk_hdmi_sfc", "xin24m", 0,
+ GATE(SCLK_HDMI_SFC, "sclk_hdmi_sfc", "xin24m", 0,
RK3328_CLKGATE_CON(5), 4, GFLAGS),
COMPOSITE_NODIV(0, "clk_cif_src", mux_2plls_p, 0,
@@ -602,7 +602,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKGATE_CON(5), 6, GFLAGS),
DIV(DCLK_HDMIPHY, "dclk_hdmiphy", "dclk_lcdc_src", 0,
RK3328_CLKSEL_CON(40), 3, 3, DFLAGS),
- MUX(DCLK_LCDC, "dclk_lcdc", mux_dclk_lcdc_p, 0,
+ MUX(DCLK_LCDC, "dclk_lcdc", mux_dclk_lcdc_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
RK3328_CLKSEL_CON(40), 1, 1, MFLAGS),
/*
@@ -709,14 +709,14 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
/* PD_VOP */
GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3328_CLKGATE_CON(21), 10, GFLAGS),
- GATE(0, "aclk_rga_niu", "aclk_rga_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(22), 3, GFLAGS),
+ GATE(0, "aclk_rga_niu", "aclk_rga_pre", 0, RK3328_CLKGATE_CON(22), 3, GFLAGS),
GATE(ACLK_VOP, "aclk_vop", "aclk_vop_pre", 0, RK3328_CLKGATE_CON(21), 2, GFLAGS),
- GATE(0, "aclk_vop_niu", "aclk_vop_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(21), 4, GFLAGS),
+ GATE(0, "aclk_vop_niu", "aclk_vop_pre", 0, RK3328_CLKGATE_CON(21), 4, GFLAGS),
GATE(ACLK_IEP, "aclk_iep", "aclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 6, GFLAGS),
GATE(ACLK_CIF, "aclk_cif", "aclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 8, GFLAGS),
GATE(ACLK_HDCP, "aclk_hdcp", "aclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 15, GFLAGS),
- GATE(0, "aclk_vio_niu", "aclk_vio_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(22), 2, GFLAGS),
+ GATE(0, "aclk_vio_niu", "aclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 2, GFLAGS),
GATE(HCLK_VOP, "hclk_vop", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 3, GFLAGS),
GATE(0, "hclk_vop_niu", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 5, GFLAGS),
@@ -724,10 +724,10 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
GATE(HCLK_CIF, "hclk_cif", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 9, GFLAGS),
GATE(HCLK_RGA, "hclk_rga", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 11, GFLAGS),
GATE(0, "hclk_ahb1tom", "hclk_vio_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(21), 12, GFLAGS),
- GATE(0, "pclk_vio_h2p", "hclk_vio_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(21), 13, GFLAGS),
- GATE(0, "hclk_vio_h2p", "hclk_vio_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(21), 14, GFLAGS),
+ GATE(0, "pclk_vio_h2p", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 13, GFLAGS),
+ GATE(0, "hclk_vio_h2p", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(21), 14, GFLAGS),
GATE(HCLK_HDCP, "hclk_hdcp", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 0, GFLAGS),
- GATE(HCLK_VIO, "hclk_vio", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 1, GFLAGS),
+ GATE(0, "hclk_vio_niu", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 1, GFLAGS),
GATE(PCLK_HDMI, "pclk_hdmi", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 4, GFLAGS),
GATE(PCLK_HDCP, "pclk_hdcp", "hclk_vio_pre", 0, RK3328_CLKGATE_CON(22), 5, GFLAGS),
@@ -743,19 +743,19 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
GATE(HCLK_HOST0_ARB, "hclk_host0_arb", "hclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 7, GFLAGS),
GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 8, GFLAGS),
GATE(HCLK_OTG_PMU, "hclk_otg_pmu", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 9, GFLAGS),
- GATE(0, "hclk_peri_niu", "hclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 12, GFLAGS),
- GATE(0, "pclk_peri_niu", "hclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 13, GFLAGS),
+ GATE(0, "hclk_peri_niu", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 12, GFLAGS),
+ GATE(0, "pclk_peri_niu", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 13, GFLAGS),
/* PD_GMAC */
GATE(ACLK_MAC2PHY, "aclk_mac2phy", "aclk_gmac", 0, RK3328_CLKGATE_CON(26), 0, GFLAGS),
GATE(ACLK_MAC2IO, "aclk_mac2io", "aclk_gmac", 0, RK3328_CLKGATE_CON(26), 2, GFLAGS),
- GATE(0, "aclk_gmac_niu", "aclk_gmac", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(26), 4, GFLAGS),
+ GATE(0, "aclk_gmac_niu", "aclk_gmac", 0, RK3328_CLKGATE_CON(26), 4, GFLAGS),
GATE(PCLK_MAC2PHY, "pclk_mac2phy", "pclk_gmac", 0, RK3328_CLKGATE_CON(26), 1, GFLAGS),
GATE(PCLK_MAC2IO, "pclk_mac2io", "pclk_gmac", 0, RK3328_CLKGATE_CON(26), 3, GFLAGS),
- GATE(0, "pclk_gmac_niu", "pclk_gmac", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(26), 5, GFLAGS),
+ GATE(0, "pclk_gmac_niu", "pclk_gmac", 0, RK3328_CLKGATE_CON(26), 5, GFLAGS),
/* PD_BUS */
- GATE(0, "aclk_bus_niu", "aclk_bus_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 12, GFLAGS),
+ GATE(0, "aclk_bus_niu", "aclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 12, GFLAGS),
GATE(ACLK_DCF, "aclk_dcf", "aclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 11, GFLAGS),
GATE(ACLK_TSP, "aclk_tsp", "aclk_bus_pre", 0, RK3328_CLKGATE_CON(17), 12, GFLAGS),
GATE(0, "aclk_intmem", "aclk_bus_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 0, GFLAGS),
@@ -769,10 +769,10 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
GATE(HCLK_TSP, "hclk_tsp", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(17), 11, GFLAGS),
GATE(HCLK_CRYPTO_MST, "hclk_crypto_mst", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 7, GFLAGS),
GATE(HCLK_CRYPTO_SLV, "hclk_crypto_slv", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 8, GFLAGS),
- GATE(0, "hclk_bus_niu", "hclk_bus_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 13, GFLAGS),
+ GATE(0, "hclk_bus_niu", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(15), 13, GFLAGS),
GATE(HCLK_PDM, "hclk_pdm", "hclk_bus_pre", 0, RK3328_CLKGATE_CON(28), 0, GFLAGS),
- GATE(0, "pclk_bus_niu", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 14, GFLAGS),
+ GATE(0, "pclk_bus_niu", "pclk_bus", 0, RK3328_CLKGATE_CON(15), 14, GFLAGS),
GATE(0, "pclk_efuse", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 9, GFLAGS),
GATE(0, "pclk_otp", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(28), 4, GFLAGS),
GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus", 0, RK3328_CLKGATE_CON(15), 10, GFLAGS),
@@ -807,37 +807,42 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
GATE(0, "pclk_acodecphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 5, GFLAGS),
GATE(PCLK_HDMIPHY, "pclk_hdmiphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 7, GFLAGS),
GATE(0, "pclk_vdacphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 8, GFLAGS),
- GATE(0, "pclk_phy_niu", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(15), 15, GFLAGS),
+ GATE(0, "pclk_phy_niu", "pclk_phy_pre", 0, RK3328_CLKGATE_CON(15), 15, GFLAGS),
/* PD_MMC */
- MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc",
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc",
RK3328_SDMMC_CON0, 1),
- MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc",
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc",
RK3328_SDMMC_CON1, 1),
- MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio",
+ MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio",
RK3328_SDIO_CON0, 1),
- MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio",
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio",
RK3328_SDIO_CON1, 1),
- MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc",
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc",
RK3328_EMMC_CON0, 1),
- MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc",
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc",
RK3328_EMMC_CON1, 1),
- MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "sclk_sdmmc_ext",
+ MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "clk_sdmmc_ext",
RK3328_SDMMC_EXT_CON0, 1),
- MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "sclk_sdmmc_ext",
+ MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "clk_sdmmc_ext",
RK3328_SDMMC_EXT_CON1, 1),
};
static const char *const rk3328_critical_clocks[] __initconst = {
"aclk_bus",
+ "aclk_bus_niu",
"pclk_bus",
+ "pclk_bus_niu",
"hclk_bus",
+ "hclk_bus_niu",
"aclk_peri",
"hclk_peri",
+ "hclk_peri_niu",
"pclk_peri",
+ "pclk_peri_niu",
"pclk_dbg",
"aclk_core_niu",
"aclk_gic400",
@@ -861,6 +866,20 @@ static const char *const rk3328_critical_clocks[] __initconst = {
"aclk_rga_niu",
"pclk_vio_h2p",
"hclk_vio_h2p",
+ "aclk_vio_niu",
+ "hclk_vio_niu",
+ "aclk_vop_niu",
+ "hclk_vop_niu",
+ "aclk_gpu_niu",
+ "aclk_rkvdec_niu",
+ "hclk_rkvdec_niu",
+ "aclk_vpu_niu",
+ "hclk_vpu_niu",
+ "aclk_rkvenc_niu",
+ "hclk_rkvenc_niu",
+ "aclk_gmac_niu",
+ "pclk_gmac_niu",
+ "pclk_phy_niu",
};
static void __init rk3328_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 6847120b61cd..bca10d618f0a 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -57,6 +57,7 @@ static struct rockchip_pll_rate_table rk3399_pll_rates[] = {
RK3036_PLL_RATE(1656000000, 1, 69, 1, 1, 1, 0),
RK3036_PLL_RATE(1632000000, 1, 68, 1, 1, 1, 0),
RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1600000000, 3, 200, 1, 1, 1, 0),
RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0),
RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0),
RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0),
@@ -670,7 +671,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKGATE_CON(9), 7, GFLAGS,
&rk3399_uart3_fracmux),
- COMPOSITE(0, "pclk_ddr", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
+ COMPOSITE(PCLK_DDR, "pclk_ddr", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(6), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK3399_CLKGATE_CON(3), 4, GFLAGS),
@@ -886,7 +887,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKGATE_CON(31), 8, GFLAGS),
/* sdio & sdmmc */
- COMPOSITE(0, "hclk_sd", mux_pll_src_cpll_gpll_p, 0,
+ COMPOSITE(HCLK_SD, "hclk_sd", mux_pll_src_cpll_gpll_p, 0,
RK3399_CLKSEL_CON(13), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK3399_CLKGATE_CON(12), 13, GFLAGS),
GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_sd", 0,
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 35dbd63c2f49..3cd8ad59e0b7 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -57,6 +57,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
struct clk_divider *div = NULL;
const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
*gate_ops = NULL;
+ int ret;
if (num_parents > 1) {
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
@@ -74,8 +75,10 @@ static struct clk *rockchip_clk_register_branch(const char *name,
if (gate_offset >= 0) {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate)
+ if (!gate) {
+ ret = -ENOMEM;
goto err_gate;
+ }
gate->flags = gate_flags;
gate->reg = base + gate_offset;
@@ -86,8 +89,10 @@ static struct clk *rockchip_clk_register_branch(const char *name,
if (div_width > 0) {
div = kzalloc(sizeof(*div), GFP_KERNEL);
- if (!div)
+ if (!div) {
+ ret = -ENOMEM;
goto err_div;
+ }
div->flags = div_flags;
div->reg = base + muxdiv_offset;
@@ -106,12 +111,19 @@ static struct clk *rockchip_clk_register_branch(const char *name,
gate ? &gate->hw : NULL, gate_ops,
flags);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err_composite;
+ }
+
return clk;
+err_composite:
+ kfree(div);
err_div:
kfree(gate);
err_gate:
kfree(mux);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
struct rockchip_clk_frac {
@@ -291,8 +303,10 @@ static struct clk *rockchip_clk_register_frac_branch(
init.num_parents = child->num_parents;
mux_clk = clk_register(NULL, &frac_mux->hw);
- if (IS_ERR(mux_clk))
+ if (IS_ERR(mux_clk)) {
+ kfree(frac);
return clk;
+ }
rockchip_clk_add_lookup(ctx, mux_clk, child->id);
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index ef8900bc077f..513826393158 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -8,9 +8,11 @@ obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o
obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o
obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4412-isp.o
obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
+obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5-subcmu.o
obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
+obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5-subcmu.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 5bfc92ee3129..b4b057c7301c 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -143,10 +143,8 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg_base)) {
- dev_err(dev, "failed to map audss registers\n");
+ if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
- }
epll = ERR_PTR(-ENODEV);
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 1b81e283f605..27c9d23657b3 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -670,73 +670,73 @@ static const struct samsung_gate_clock gate_clks[] __initconst = {
/* APLL & MPLL & BPLL & UPLL */
static const struct samsung_pll_rate_table exynos3250_pll_rates[] __initconst = {
- PLL_35XX_RATE(1200000000, 400, 4, 1),
- PLL_35XX_RATE(1100000000, 275, 3, 1),
- PLL_35XX_RATE(1066000000, 533, 6, 1),
- PLL_35XX_RATE(1000000000, 250, 3, 1),
- PLL_35XX_RATE( 960000000, 320, 4, 1),
- PLL_35XX_RATE( 900000000, 300, 4, 1),
- PLL_35XX_RATE( 850000000, 425, 6, 1),
- PLL_35XX_RATE( 800000000, 200, 3, 1),
- PLL_35XX_RATE( 700000000, 175, 3, 1),
- PLL_35XX_RATE( 667000000, 667, 12, 1),
- PLL_35XX_RATE( 600000000, 400, 4, 2),
- PLL_35XX_RATE( 533000000, 533, 6, 2),
- PLL_35XX_RATE( 520000000, 260, 3, 2),
- PLL_35XX_RATE( 500000000, 250, 3, 2),
- PLL_35XX_RATE( 400000000, 200, 3, 2),
- PLL_35XX_RATE( 200000000, 200, 3, 3),
- PLL_35XX_RATE( 100000000, 200, 3, 4),
+ PLL_35XX_RATE(24 * MHZ, 1200000000, 400, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 1100000000, 275, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 1066000000, 533, 6, 1),
+ PLL_35XX_RATE(24 * MHZ, 1000000000, 250, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 960000000, 320, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 900000000, 300, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 850000000, 425, 6, 1),
+ PLL_35XX_RATE(24 * MHZ, 800000000, 200, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 700000000, 175, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 667000000, 667, 12, 1),
+ PLL_35XX_RATE(24 * MHZ, 600000000, 400, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 533000000, 533, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 520000000, 260, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 500000000, 250, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 400000000, 200, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 200000000, 200, 3, 3),
+ PLL_35XX_RATE(24 * MHZ, 100000000, 200, 3, 4),
{ /* sentinel */ }
};
/* EPLL */
static const struct samsung_pll_rate_table exynos3250_epll_rates[] __initconst = {
- PLL_36XX_RATE(800000000, 200, 3, 1, 0),
- PLL_36XX_RATE(288000000, 96, 2, 2, 0),
- PLL_36XX_RATE(192000000, 128, 2, 3, 0),
- PLL_36XX_RATE(144000000, 96, 2, 3, 0),
- PLL_36XX_RATE( 96000000, 128, 2, 4, 0),
- PLL_36XX_RATE( 84000000, 112, 2, 4, 0),
- PLL_36XX_RATE( 80000004, 106, 2, 4, 43691),
- PLL_36XX_RATE( 73728000, 98, 2, 4, 19923),
- PLL_36XX_RATE( 67737598, 270, 3, 5, 62285),
- PLL_36XX_RATE( 65535999, 174, 2, 5, 49982),
- PLL_36XX_RATE( 50000000, 200, 3, 5, 0),
- PLL_36XX_RATE( 49152002, 131, 2, 5, 4719),
- PLL_36XX_RATE( 48000000, 128, 2, 5, 0),
- PLL_36XX_RATE( 45158401, 180, 3, 5, 41524),
+ PLL_36XX_RATE(24 * MHZ, 800000000, 200, 3, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 288000000, 96, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 192000000, 128, 2, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 144000000, 96, 2, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 96000000, 128, 2, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 84000000, 112, 2, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 80000003, 106, 2, 4, 43691),
+ PLL_36XX_RATE(24 * MHZ, 73728000, 98, 2, 4, 19923),
+ PLL_36XX_RATE(24 * MHZ, 67737598, 270, 3, 5, 62285),
+ PLL_36XX_RATE(24 * MHZ, 65535999, 174, 2, 5, 49982),
+ PLL_36XX_RATE(24 * MHZ, 50000000, 200, 3, 5, 0),
+ PLL_36XX_RATE(24 * MHZ, 49152002, 131, 2, 5, 4719),
+ PLL_36XX_RATE(24 * MHZ, 48000000, 128, 2, 5, 0),
+ PLL_36XX_RATE(24 * MHZ, 45158401, 180, 3, 5, 41524),
{ /* sentinel */ }
};
/* VPLL */
static const struct samsung_pll_rate_table exynos3250_vpll_rates[] __initconst = {
- PLL_36XX_RATE(600000000, 100, 2, 1, 0),
- PLL_36XX_RATE(533000000, 266, 3, 2, 32768),
- PLL_36XX_RATE(519230987, 173, 2, 2, 5046),
- PLL_36XX_RATE(500000000, 250, 3, 2, 0),
- PLL_36XX_RATE(445500000, 148, 2, 2, 32768),
- PLL_36XX_RATE(445055007, 148, 2, 2, 23047),
- PLL_36XX_RATE(400000000, 200, 3, 2, 0),
- PLL_36XX_RATE(371250000, 123, 2, 2, 49152),
- PLL_36XX_RATE(370878997, 185, 3, 2, 28803),
- PLL_36XX_RATE(340000000, 170, 3, 2, 0),
- PLL_36XX_RATE(335000015, 111, 2, 2, 43691),
- PLL_36XX_RATE(333000000, 111, 2, 2, 0),
- PLL_36XX_RATE(330000000, 110, 2, 2, 0),
- PLL_36XX_RATE(320000015, 106, 2, 2, 43691),
- PLL_36XX_RATE(300000000, 100, 2, 2, 0),
- PLL_36XX_RATE(275000000, 275, 3, 3, 0),
- PLL_36XX_RATE(222750000, 148, 2, 3, 32768),
- PLL_36XX_RATE(222528007, 148, 2, 3, 23069),
- PLL_36XX_RATE(160000000, 160, 3, 3, 0),
- PLL_36XX_RATE(148500000, 99, 2, 3, 0),
- PLL_36XX_RATE(148352005, 98, 2, 3, 59070),
- PLL_36XX_RATE(108000000, 144, 2, 4, 0),
- PLL_36XX_RATE( 74250000, 99, 2, 4, 0),
- PLL_36XX_RATE( 74176002, 98, 3, 4, 59070),
- PLL_36XX_RATE( 54054000, 216, 3, 5, 14156),
- PLL_36XX_RATE( 54000000, 144, 2, 5, 0),
+ PLL_36XX_RATE(24 * MHZ, 600000000, 100, 2, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 533000000, 266, 3, 2, 32768),
+ PLL_36XX_RATE(24 * MHZ, 519230987, 173, 2, 2, 5046),
+ PLL_36XX_RATE(24 * MHZ, 500000000, 250, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 445500000, 148, 2, 2, 32768),
+ PLL_36XX_RATE(24 * MHZ, 445055007, 148, 2, 2, 23047),
+ PLL_36XX_RATE(24 * MHZ, 400000000, 200, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 371250000, 123, 2, 2, 49152),
+ PLL_36XX_RATE(24 * MHZ, 370878997, 185, 3, 2, 28803),
+ PLL_36XX_RATE(24 * MHZ, 340000000, 170, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 335000015, 111, 2, 2, 43691),
+ PLL_36XX_RATE(24 * MHZ, 333000000, 111, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 330000000, 110, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 320000015, 106, 2, 2, 43691),
+ PLL_36XX_RATE(24 * MHZ, 300000000, 100, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 275000000, 275, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 222750000, 148, 2, 3, 32768),
+ PLL_36XX_RATE(24 * MHZ, 222528007, 148, 2, 3, 23069),
+ PLL_36XX_RATE(24 * MHZ, 160000000, 160, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 148500000, 99, 2, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 148352005, 98, 2, 3, 59070),
+ PLL_36XX_RATE(24 * MHZ, 108000000, 144, 2, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 74250000, 99, 2, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 74176002, 98, 2, 4, 59070),
+ PLL_36XX_RATE(24 * MHZ, 54054000, 216, 3, 5, 14156),
+ PLL_36XX_RATE(24 * MHZ, 54000000, 144, 2, 5, 0),
{ /* sentinel */ }
};
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 134f25f2a913..0421960eb963 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1266,77 +1266,78 @@ static const struct of_device_id ext_clk_match[] __initconst = {
/* PLLs PMS values */
static const struct samsung_pll_rate_table exynos4210_apll_rates[] __initconst = {
- PLL_45XX_RATE(1200000000, 150, 3, 1, 28),
- PLL_45XX_RATE(1000000000, 250, 6, 1, 28),
- PLL_45XX_RATE( 800000000, 200, 6, 1, 28),
- PLL_45XX_RATE( 666857142, 389, 14, 1, 13),
- PLL_45XX_RATE( 600000000, 100, 4, 1, 13),
- PLL_45XX_RATE( 533000000, 533, 24, 1, 5),
- PLL_45XX_RATE( 500000000, 250, 6, 2, 28),
- PLL_45XX_RATE( 400000000, 200, 6, 2, 28),
- PLL_45XX_RATE( 200000000, 200, 6, 3, 28),
+ PLL_4508_RATE(24 * MHZ, 1200000000, 150, 3, 1, 28),
+ PLL_4508_RATE(24 * MHZ, 1000000000, 250, 6, 1, 28),
+ PLL_4508_RATE(24 * MHZ, 800000000, 200, 6, 1, 28),
+ PLL_4508_RATE(24 * MHZ, 666857142, 389, 14, 1, 13),
+ PLL_4508_RATE(24 * MHZ, 600000000, 100, 4, 1, 13),
+ PLL_4508_RATE(24 * MHZ, 533000000, 533, 24, 1, 5),
+ PLL_4508_RATE(24 * MHZ, 500000000, 250, 6, 2, 28),
+ PLL_4508_RATE(24 * MHZ, 400000000, 200, 6, 2, 28),
+ PLL_4508_RATE(24 * MHZ, 200000000, 200, 6, 3, 28),
{ /* sentinel */ }
};
static const struct samsung_pll_rate_table exynos4210_epll_rates[] __initconst = {
- PLL_4600_RATE(192000000, 48, 3, 1, 0, 0),
- PLL_4600_RATE(180633605, 45, 3, 1, 10381, 0),
- PLL_4600_RATE(180000000, 45, 3, 1, 0, 0),
- PLL_4600_RATE( 73727996, 73, 3, 3, 47710, 1),
- PLL_4600_RATE( 67737602, 90, 4, 3, 20762, 1),
- PLL_4600_RATE( 49151992, 49, 3, 3, 9961, 0),
- PLL_4600_RATE( 45158401, 45, 3, 3, 10381, 0),
+ PLL_4600_RATE(24 * MHZ, 192000000, 48, 3, 1, 0, 0),
+ PLL_4600_RATE(24 * MHZ, 180633605, 45, 3, 1, 10381, 0),
+ PLL_4600_RATE(24 * MHZ, 180000000, 45, 3, 1, 0, 0),
+ PLL_4600_RATE(24 * MHZ, 73727996, 73, 3, 3, 47710, 1),
+ PLL_4600_RATE(24 * MHZ, 67737602, 90, 4, 3, 20762, 1),
+ PLL_4600_RATE(24 * MHZ, 49151992, 49, 3, 3, 9961, 0),
+ PLL_4600_RATE(24 * MHZ, 45158401, 45, 3, 3, 10381, 0),
{ /* sentinel */ }
};
static const struct samsung_pll_rate_table exynos4210_vpll_rates[] __initconst = {
- PLL_4650_RATE(360000000, 44, 3, 0, 1024, 0, 14, 0),
- PLL_4650_RATE(324000000, 53, 2, 1, 1024, 1, 1, 1),
- PLL_4650_RATE(259617187, 63, 3, 1, 1950, 0, 20, 1),
- PLL_4650_RATE(110000000, 53, 3, 2, 2048, 0, 17, 0),
- PLL_4650_RATE( 55360351, 53, 3, 3, 2417, 0, 17, 0),
+ PLL_4650_RATE(24 * MHZ, 360000000, 44, 3, 0, 1024, 0, 14, 0),
+ PLL_4650_RATE(24 * MHZ, 324000000, 53, 2, 1, 1024, 1, 1, 1),
+ PLL_4650_RATE(24 * MHZ, 259617187, 63, 3, 1, 1950, 0, 20, 1),
+ PLL_4650_RATE(24 * MHZ, 110000000, 53, 3, 2, 2048, 0, 17, 0),
+ PLL_4650_RATE(24 * MHZ, 55360351, 53, 3, 3, 2417, 0, 17, 0),
{ /* sentinel */ }
};
static const struct samsung_pll_rate_table exynos4x12_apll_rates[] __initconst = {
- PLL_35XX_RATE(1704000000, 213, 3, 0),
- PLL_35XX_RATE(1600000000, 200, 3, 0),
- PLL_35XX_RATE(1500000000, 250, 4, 0),
- PLL_35XX_RATE(1400000000, 175, 3, 0),
- PLL_35XX_RATE(1300000000, 325, 6, 0),
- PLL_35XX_RATE(1200000000, 200, 4, 0),
- PLL_35XX_RATE(1100000000, 275, 6, 0),
- PLL_35XX_RATE(1000000000, 125, 3, 0),
- PLL_35XX_RATE( 900000000, 150, 4, 0),
- PLL_35XX_RATE( 800000000, 100, 3, 0),
- PLL_35XX_RATE( 700000000, 175, 3, 1),
- PLL_35XX_RATE( 600000000, 200, 4, 1),
- PLL_35XX_RATE( 500000000, 125, 3, 1),
- PLL_35XX_RATE( 400000000, 100, 3, 1),
- PLL_35XX_RATE( 300000000, 200, 4, 2),
- PLL_35XX_RATE( 200000000, 100, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 1704000000, 213, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1600000000, 200, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1500000000, 250, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1400000000, 175, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1300000000, 325, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1200000000, 200, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1100000000, 275, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1000000000, 125, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 900000000, 150, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 800000000, 100, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 700000000, 175, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 600000000, 200, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 500000000, 125, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 400000000, 100, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 300000000, 200, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 200000000, 100, 3, 2),
{ /* sentinel */ }
};
static const struct samsung_pll_rate_table exynos4x12_epll_rates[] __initconst = {
- PLL_36XX_RATE(192000000, 48, 3, 1, 0),
- PLL_36XX_RATE(180633605, 45, 3, 1, 10381),
- PLL_36XX_RATE(180000000, 45, 3, 1, 0),
- PLL_36XX_RATE( 73727996, 73, 3, 3, 47710),
- PLL_36XX_RATE( 67737602, 90, 4, 3, 20762),
- PLL_36XX_RATE( 49151992, 49, 3, 3, 9961),
- PLL_36XX_RATE( 45158401, 45, 3, 3, 10381),
+ PLL_36XX_RATE(24 * MHZ, 196608001, 197, 3, 3, -25690),
+ PLL_36XX_RATE(24 * MHZ, 192000000, 48, 3, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 180633605, 45, 3, 1, 10381),
+ PLL_36XX_RATE(24 * MHZ, 180000000, 45, 3, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 73727996, 73, 3, 3, 47710),
+ PLL_36XX_RATE(24 * MHZ, 67737602, 90, 4, 3, 20762),
+ PLL_36XX_RATE(24 * MHZ, 49151992, 49, 3, 3, 9961),
+ PLL_36XX_RATE(24 * MHZ, 45158401, 45, 3, 3, 10381),
{ /* sentinel */ }
};
static const struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initconst = {
- PLL_36XX_RATE(533000000, 133, 3, 1, 16384),
- PLL_36XX_RATE(440000000, 110, 3, 1, 0),
- PLL_36XX_RATE(350000000, 175, 3, 2, 0),
- PLL_36XX_RATE(266000000, 133, 3, 2, 0),
- PLL_36XX_RATE(160000000, 160, 3, 3, 0),
- PLL_36XX_RATE(106031250, 53, 3, 2, 1024),
- PLL_36XX_RATE( 53015625, 53, 3, 3, 1024),
+ PLL_36XX_RATE(24 * MHZ, 533000000, 133, 3, 1, 16384),
+ PLL_36XX_RATE(24 * MHZ, 440000000, 110, 3, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 350000000, 175, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 266000000, 133, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 160000000, 160, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 106031250, 53, 3, 2, 1024),
+ PLL_36XX_RATE(24 * MHZ, 53015625, 53, 3, 3, 1024),
{ /* sentinel */ }
};
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
new file mode 100644
index 000000000000..93306283d764
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 Samsung Electronics Co., Ltd.
+// Author: Marek Szyprowski <m.szyprowski@samsung.com>
+// Common Clock Framework support for Exynos5 power-domain dependent clocks
+
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+
+#include "clk.h"
+#include "clk-exynos5-subcmu.h"
+
+static struct samsung_clk_provider *ctx;
+static const struct exynos5_subcmu_info *cmu;
+static int nr_cmus;
+
+static void exynos5_subcmu_clk_save(void __iomem *base,
+ struct exynos5_subcmu_reg_dump *rd,
+ unsigned int num_regs)
+{
+ for (; num_regs > 0; --num_regs, ++rd) {
+ rd->save = readl(base + rd->offset);
+ writel((rd->save & ~rd->mask) | rd->value, base + rd->offset);
+ rd->save &= rd->mask;
+ }
+};
+
+static void exynos5_subcmu_clk_restore(void __iomem *base,
+ struct exynos5_subcmu_reg_dump *rd,
+ unsigned int num_regs)
+{
+ for (; num_regs > 0; --num_regs, ++rd)
+ writel((readl(base + rd->offset) & ~rd->mask) | rd->save,
+ base + rd->offset);
+}
+
+static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx,
+ const struct samsung_gate_clock *list, int nr_clk)
+{
+ while (nr_clk--)
+ samsung_clk_add_lookup(ctx, ERR_PTR(-EPROBE_DEFER), list++->id);
+}
+
+/*
+ * Pass the needed clock provider context and register sub-CMU clocks
+ *
+ * NOTE: This function has to be called from the main, OF_CLK_DECLARE-
+ * initialized clock provider driver. This happens very early during boot
+ * process. Then this driver, during core_initcall registers two platform
+ * drivers: one which binds to the same device-tree node as OF_CLK_DECLARE
+ * driver and second, for handling its per-domain child-devices. Those
+ * platform drivers are bound to their devices a bit later in arch_initcall,
+ * when OF-core populates all device-tree nodes.
+ */
+void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus,
+ const struct exynos5_subcmu_info *_cmu)
+{
+ ctx = _ctx;
+ cmu = _cmu;
+ nr_cmus = _nr_cmus;
+
+ for (; _nr_cmus--; _cmu++) {
+ exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks,
+ _cmu->nr_gate_clks);
+ exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs,
+ _cmu->nr_suspend_regs);
+ }
+}
+
+static int __maybe_unused exynos5_subcmu_suspend(struct device *dev)
+{
+ struct exynos5_subcmu_info *info = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ exynos5_subcmu_clk_save(ctx->reg_base, info->suspend_regs,
+ info->nr_suspend_regs);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ return 0;
+}
+
+static int __maybe_unused exynos5_subcmu_resume(struct device *dev)
+{
+ struct exynos5_subcmu_info *info = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ exynos5_subcmu_clk_restore(ctx->reg_base, info->suspend_regs,
+ info->nr_suspend_regs);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ return 0;
+}
+
+static int __init exynos5_subcmu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos5_subcmu_info *info = dev_get_drvdata(dev);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get(dev);
+
+ ctx->dev = dev;
+ samsung_clk_register_div(ctx, info->div_clks, info->nr_div_clks);
+ samsung_clk_register_gate(ctx, info->gate_clks, info->nr_gate_clks);
+ ctx->dev = NULL;
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops exynos5_subcmu_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos5_subcmu_suspend,
+ exynos5_subcmu_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver exynos5_subcmu_driver __refdata = {
+ .driver = {
+ .name = "exynos5-subcmu",
+ .suppress_bind_attrs = true,
+ .pm = &exynos5_subcmu_pm_ops,
+ },
+ .probe = exynos5_subcmu_probe,
+};
+
+static int __init exynos5_clk_register_subcmu(struct device *parent,
+ const struct exynos5_subcmu_info *info,
+ struct device_node *pd_node)
+{
+ struct of_phandle_args genpdspec = { .np = pd_node };
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc(info->pd_name, -1);
+ pdev->dev.parent = parent;
+ pdev->driver_override = "exynos5-subcmu";
+ platform_set_drvdata(pdev, (void *)info);
+ of_genpd_add_device(&genpdspec, &pdev->dev);
+ platform_device_add(pdev);
+
+ return 0;
+}
+
+static int __init exynos5_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ const char *name;
+ int i;
+
+ for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
+ if (of_property_read_string(np, "label", &name) < 0)
+ continue;
+ for (i = 0; i < nr_cmus; i++)
+ if (strcmp(cmu[i].pd_name, name) == 0)
+ exynos5_clk_register_subcmu(&pdev->dev,
+ &cmu[i], np);
+ }
+ return 0;
+}
+
+static const struct of_device_id exynos5_clk_of_match[] = {
+ { .compatible = "samsung,exynos5250-clock", },
+ { .compatible = "samsung,exynos5420-clock", },
+ { .compatible = "samsung,exynos5800-clock", },
+ { },
+};
+
+static struct platform_driver exynos5_clk_driver __refdata = {
+ .driver = {
+ .name = "exynos5-clock",
+ .of_match_table = exynos5_clk_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos5_clk_probe,
+};
+
+static int __init exynos5_clk_drv_init(void)
+{
+ platform_driver_register(&exynos5_clk_driver);
+ platform_driver_register(&exynos5_subcmu_driver);
+ return 0;
+}
+core_initcall(exynos5_clk_drv_init);
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h
new file mode 100644
index 000000000000..755ee8aaa3de
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __CLK_EXYNOS5_SUBCMU_H
+#define __CLK_EXYNOS5_SUBCMU_H
+
+struct exynos5_subcmu_reg_dump {
+ u32 offset;
+ u32 value;
+ u32 mask;
+ u32 save;
+};
+
+struct exynos5_subcmu_info {
+ const struct samsung_div_clock *div_clks;
+ unsigned int nr_div_clks;
+ const struct samsung_gate_clock *gate_clks;
+ unsigned int nr_gate_clks;
+ struct exynos5_subcmu_reg_dump *suspend_regs;
+ unsigned int nr_suspend_regs;
+ const char *pd_name;
+};
+
+void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus,
+ const struct exynos5_subcmu_info *cmu);
+
+#endif
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 9b073c98a891..347fd80c351b 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -18,6 +18,7 @@
#include "clk.h"
#include "clk-cpu.h"
+#include "clk-exynos5-subcmu.h"
#define APLL_LOCK 0x0
#define APLL_CON0 0x100
@@ -560,6 +561,8 @@ static const struct samsung_gate_clock exynos5250_gate_clks[] __initconst = {
0),
GATE(CLK_GSCL3, "gscl3", "mout_aclk266_gscl_sub", GATE_IP_GSCL, 3, 0,
0),
+ GATE(CLK_CAMIF_TOP, "camif_top", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 4, 0, 0),
GATE(CLK_GSCL_WA, "gscl_wa", "div_gscl_wa", GATE_IP_GSCL, 5, 0, 0),
GATE(CLK_GSCL_WB, "gscl_wb", "div_gscl_wb", GATE_IP_GSCL, 6, 0, 0),
GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "mout_aclk266_gscl_sub",
@@ -570,18 +573,11 @@ static const struct samsung_gate_clock exynos5250_gate_clks[] __initconst = {
GATE_IP_GSCL, 9, 0, 0),
GATE(CLK_SMMU_GSCL3, "smmu_gscl3", "mout_aclk266_gscl_sub",
GATE_IP_GSCL, 10, 0, 0),
+ GATE(CLK_SMMU_FIMC_LITE0, "smmu_fimc_lite0", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 11, 0, 0),
+ GATE(CLK_SMMU_FIMC_LITE1, "smmu_fimc_lite1", "mout_aclk266_gscl_sub",
+ GATE_IP_GSCL, 12, 0, 0),
- GATE(CLK_FIMD1, "fimd1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 0, 0,
- 0),
- GATE(CLK_MIE1, "mie1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 1, 0,
- 0),
- GATE(CLK_DSIM0, "dsim0", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 3, 0,
- 0),
- GATE(CLK_DP, "dp", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 4, 0, 0),
- GATE(CLK_MIXER, "mixer", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 5, 0,
- 0),
- GATE(CLK_HDMI, "hdmi", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 6, 0,
- 0),
GATE(CLK_MFC, "mfc", "mout_aclk333_sub", GATE_IP_MFC, 0, 0, 0),
GATE(CLK_SMMU_MFCR, "smmu_mfcr", "mout_aclk333_sub", GATE_IP_MFC, 1, 0,
@@ -671,10 +667,6 @@ static const struct samsung_gate_clock exynos5250_gate_clks[] __initconst = {
GATE(CLK_WDT, "wdt", "div_aclk66", GATE_IP_PERIS, 19, 0, 0),
GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0),
GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0),
- GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub",
- GATE_IP_DISP1, 9, 0, 0),
- GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub",
- GATE_IP_DISP1, 8, 0, 0),
GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0),
GATE(CLK_SMMU_FIMC_ISP, "smmu_fimc_isp", "mout_aclk_266_isp_sub",
GATE_IP_ISP0, 8, 0, 0),
@@ -698,48 +690,80 @@ static const struct samsung_gate_clock exynos5250_gate_clks[] __initconst = {
GATE_IP_ISP1, 7, 0, 0),
};
+static const struct samsung_gate_clock exynos5250_disp_gate_clks[] __initconst = {
+ GATE(CLK_FIMD1, "fimd1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 0, 0,
+ 0),
+ GATE(CLK_MIE1, "mie1", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 1, 0,
+ 0),
+ GATE(CLK_DSIM0, "dsim0", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 3, 0,
+ 0),
+ GATE(CLK_DP, "dp", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 4, 0, 0),
+ GATE(CLK_MIXER, "mixer", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 5, 0,
+ 0),
+ GATE(CLK_HDMI, "hdmi", "mout_aclk200_disp1_sub", GATE_IP_DISP1, 6, 0,
+ 0),
+ GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub",
+ GATE_IP_DISP1, 9, 0, 0),
+ GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub",
+ GATE_IP_DISP1, 8, 0, 0),
+};
+
+static struct exynos5_subcmu_reg_dump exynos5250_disp_suspend_regs[] = {
+ { GATE_IP_DISP1, 0xffffffff, 0xffffffff }, /* DISP1 gates */
+ { SRC_TOP3, 0, BIT(4) }, /* MUX mout_aclk200_disp1_sub */
+ { SRC_TOP3, 0, BIT(6) }, /* MUX mout_aclk300_disp1_sub */
+};
+
+static const struct exynos5_subcmu_info exynos5250_disp_subcmu = {
+ .gate_clks = exynos5250_disp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5250_disp_gate_clks),
+ .suspend_regs = exynos5250_disp_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5250_disp_suspend_regs),
+ .pd_name = "DISP1",
+};
+
static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = {
/* sorted in descending order */
/* PLL_36XX_RATE(rate, m, p, s, k) */
- PLL_36XX_RATE(266000000, 266, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 266000000, 266, 3, 3, 0),
/* Not in UM, but need for eDP on snow */
- PLL_36XX_RATE(70500000, 94, 2, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 70500000, 94, 2, 4, 0),
{ },
};
static const struct samsung_pll_rate_table epll_24mhz_tbl[] __initconst = {
/* sorted in descending order */
/* PLL_36XX_RATE(rate, m, p, s, k) */
- PLL_36XX_RATE(192000000, 64, 2, 2, 0),
- PLL_36XX_RATE(180633600, 90, 3, 2, 20762),
- PLL_36XX_RATE(180000000, 90, 3, 2, 0),
- PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
- PLL_36XX_RATE(67737600, 90, 2, 4, 20762),
- PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
- PLL_36XX_RATE(45158400, 90, 3, 4, 20762),
- PLL_36XX_RATE(32768000, 131, 3, 5, 4719),
+ PLL_36XX_RATE(24 * MHZ, 192000000, 64, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 180633605, 90, 3, 2, 20762),
+ PLL_36XX_RATE(24 * MHZ, 180000000, 90, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 73728000, 98, 2, 4, 19923),
+ PLL_36XX_RATE(24 * MHZ, 67737602, 90, 2, 4, 20762),
+ PLL_36XX_RATE(24 * MHZ, 49152000, 98, 3, 4, 19923),
+ PLL_36XX_RATE(24 * MHZ, 45158401, 90, 3, 4, 20762),
+ PLL_36XX_RATE(24 * MHZ, 32768001, 131, 3, 5, 4719),
{ },
};
static const struct samsung_pll_rate_table apll_24mhz_tbl[] __initconst = {
/* sorted in descending order */
- /* PLL_35XX_RATE(rate, m, p, s) */
- PLL_35XX_RATE(1700000000, 425, 6, 0),
- PLL_35XX_RATE(1600000000, 200, 3, 0),
- PLL_35XX_RATE(1500000000, 250, 4, 0),
- PLL_35XX_RATE(1400000000, 175, 3, 0),
- PLL_35XX_RATE(1300000000, 325, 6, 0),
- PLL_35XX_RATE(1200000000, 200, 4, 0),
- PLL_35XX_RATE(1100000000, 275, 6, 0),
- PLL_35XX_RATE(1000000000, 125, 3, 0),
- PLL_35XX_RATE(900000000, 150, 4, 0),
- PLL_35XX_RATE(800000000, 100, 3, 0),
- PLL_35XX_RATE(700000000, 175, 3, 1),
- PLL_35XX_RATE(600000000, 200, 4, 1),
- PLL_35XX_RATE(500000000, 125, 3, 1),
- PLL_35XX_RATE(400000000, 100, 3, 1),
- PLL_35XX_RATE(300000000, 200, 4, 2),
- PLL_35XX_RATE(200000000, 100, 3, 2),
+ /* PLL_35XX_RATE(fin, rate, m, p, s) */
+ PLL_35XX_RATE(24 * MHZ, 1700000000, 425, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1600000000, 200, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1500000000, 250, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1400000000, 175, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1300000000, 325, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1200000000, 200, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1100000000, 275, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1000000000, 125, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 900000000, 150, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 800000000, 100, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 700000000, 175, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 600000000, 200, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 500000000, 125, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 400000000, 100, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 300000000, 200, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 200000000, 100, 3, 2),
};
static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = {
@@ -859,10 +883,11 @@ static void __init exynos5250_clk_init(struct device_node *np)
__raw_writel(tmp, reg_base + PWR_CTRL2);
exynos5250_clk_sleep_init();
+ exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu);
samsung_clk_of_add_provider(np, ctx);
pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
_get_rate("div_arm2"));
}
-CLK_OF_DECLARE(exynos5250_clk, "samsung,exynos5250-clock", exynos5250_clk_init);
+CLK_OF_DECLARE_DRIVER(exynos5250_clk, "samsung,exynos5250-clock", exynos5250_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index fd1d9bfc151b..2cc2583abd87 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -23,57 +23,57 @@
* DISP_PLL, EGL_PLL, KFC_PLL, MEM_PLL, BUS_PLL, MEDIA_PLL, G3D_PLL.
*/
static const struct samsung_pll_rate_table pll2550_24mhz_tbl[] __initconst = {
- PLL_35XX_RATE(1700000000, 425, 6, 0),
- PLL_35XX_RATE(1600000000, 200, 3, 0),
- PLL_35XX_RATE(1500000000, 250, 4, 0),
- PLL_35XX_RATE(1400000000, 175, 3, 0),
- PLL_35XX_RATE(1300000000, 325, 6, 0),
- PLL_35XX_RATE(1200000000, 400, 4, 1),
- PLL_35XX_RATE(1100000000, 275, 3, 1),
- PLL_35XX_RATE(1000000000, 250, 3, 1),
- PLL_35XX_RATE(933000000, 311, 4, 1),
- PLL_35XX_RATE(900000000, 300, 4, 1),
- PLL_35XX_RATE(800000000, 200, 3, 1),
- PLL_35XX_RATE(733000000, 733, 12, 1),
- PLL_35XX_RATE(700000000, 175, 3, 1),
- PLL_35XX_RATE(667000000, 667, 12, 1),
- PLL_35XX_RATE(633000000, 211, 4, 1),
- PLL_35XX_RATE(620000000, 310, 3, 2),
- PLL_35XX_RATE(600000000, 400, 4, 2),
- PLL_35XX_RATE(543000000, 362, 4, 2),
- PLL_35XX_RATE(533000000, 533, 6, 2),
- PLL_35XX_RATE(500000000, 250, 3, 2),
- PLL_35XX_RATE(450000000, 300, 4, 2),
- PLL_35XX_RATE(400000000, 200, 3, 2),
- PLL_35XX_RATE(350000000, 175, 3, 2),
- PLL_35XX_RATE(300000000, 400, 4, 3),
- PLL_35XX_RATE(266000000, 266, 3, 3),
- PLL_35XX_RATE(200000000, 200, 3, 3),
- PLL_35XX_RATE(160000000, 160, 3, 3),
+ PLL_35XX_RATE(24 * MHZ, 1700000000, 425, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1600000000, 200, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1500000000, 250, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1400000000, 175, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1300000000, 325, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1200000000, 400, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 1100000000, 275, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 1000000000, 250, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 933000000, 311, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 900000000, 300, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 800000000, 200, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 733000000, 733, 12, 1),
+ PLL_35XX_RATE(24 * MHZ, 700000000, 175, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 667000000, 667, 12, 1),
+ PLL_35XX_RATE(24 * MHZ, 633000000, 211, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 620000000, 310, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 600000000, 400, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 543000000, 362, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 533000000, 533, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 500000000, 250, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 450000000, 300, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 400000000, 200, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 350000000, 175, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 300000000, 400, 4, 3),
+ PLL_35XX_RATE(24 * MHZ, 266000000, 266, 3, 3),
+ PLL_35XX_RATE(24 * MHZ, 200000000, 200, 3, 3),
+ PLL_35XX_RATE(24 * MHZ, 160000000, 160, 3, 3),
};
/*
* Applicable for 2650 Type PLL for AUD_PLL.
*/
static const struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initconst = {
- PLL_36XX_RATE(1600000000, 200, 3, 0, 0),
- PLL_36XX_RATE(1200000000, 100, 2, 0, 0),
- PLL_36XX_RATE(1000000000, 250, 3, 1, 0),
- PLL_36XX_RATE(800000000, 200, 3, 1, 0),
- PLL_36XX_RATE(600000000, 100, 2, 1, 0),
- PLL_36XX_RATE(532000000, 266, 3, 2, 0),
- PLL_36XX_RATE(480000000, 160, 2, 2, 0),
- PLL_36XX_RATE(432000000, 144, 2, 2, 0),
- PLL_36XX_RATE(400000000, 200, 3, 2, 0),
- PLL_36XX_RATE(394073130, 459, 7, 2, 49282),
- PLL_36XX_RATE(333000000, 111, 2, 2, 0),
- PLL_36XX_RATE(300000000, 100, 2, 2, 0),
- PLL_36XX_RATE(266000000, 266, 3, 3, 0),
- PLL_36XX_RATE(200000000, 200, 3, 3, 0),
- PLL_36XX_RATE(166000000, 166, 3, 3, 0),
- PLL_36XX_RATE(133000000, 266, 3, 4, 0),
- PLL_36XX_RATE(100000000, 200, 3, 4, 0),
- PLL_36XX_RATE(66000000, 176, 2, 5, 0),
+ PLL_36XX_RATE(24 * MHZ, 1600000000, 200, 3, 0, 0),
+ PLL_36XX_RATE(24 * MHZ, 1200000000, 100, 2, 0, 0),
+ PLL_36XX_RATE(24 * MHZ, 1000000000, 250, 3, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 800000000, 200, 3, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 600000000, 100, 2, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 532000000, 266, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 480000000, 160, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 432000000, 144, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 400000000, 200, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 394073128, 459, 7, 2, 49282),
+ PLL_36XX_RATE(24 * MHZ, 333000000, 111, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 300000000, 100, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 266000000, 266, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 200000000, 200, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 166000000, 166, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 133000000, 266, 3, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 100000000, 200, 3, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 66000000, 176, 2, 5, 0),
};
/* CMU_AUD */
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
index fc471a49e8f4..0a0b09591e6f 100644
--- a/drivers/clk/samsung/clk-exynos5410.c
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -226,16 +226,16 @@ static const struct samsung_gate_clock exynos5410_gate_clks[] __initconst = {
};
static const struct samsung_pll_rate_table exynos5410_pll2550x_24mhz_tbl[] __initconst = {
- PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
- PLL_36XX_RATE(333000000U, 111, 2, 2, 0),
- PLL_36XX_RATE(300000000U, 100, 2, 2, 0),
- PLL_36XX_RATE(266000000U, 266, 3, 3, 0),
- PLL_36XX_RATE(200000000U, 200, 3, 3, 0),
- PLL_36XX_RATE(192000000U, 192, 3, 3, 0),
- PLL_36XX_RATE(166000000U, 166, 3, 3, 0),
- PLL_36XX_RATE(133000000U, 266, 3, 4, 0),
- PLL_36XX_RATE(100000000U, 200, 3, 4, 0),
- PLL_36XX_RATE(66000000U, 176, 2, 5, 0),
+ PLL_36XX_RATE(24 * MHZ, 400000000U, 200, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 333000000U, 111, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 300000000U, 100, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 266000000U, 266, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 200000000U, 200, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 192000000U, 192, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 166000000U, 166, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 133000000U, 266, 3, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 100000000U, 200, 3, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 66000000U, 176, 2, 5, 0),
};
static struct samsung_pll_clock exynos5410_plls[nr_plls] __initdata = {
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 45d34f601e9e..95e1bf69449b 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -19,6 +19,7 @@
#include "clk.h"
#include "clk-cpu.h"
+#include "clk-exynos5-subcmu.h"
#define APLL_LOCK 0x0
#define APLL_CON0 0x100
@@ -620,7 +621,8 @@ static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
MUX(CLK_MOUT_MX_MSPLL_CCORE, "mout_mx_mspll_ccore",
mout_group5_5800_p, SRC_TOP7, 16, 2),
- MUX(0, "mout_mau_epll_clk", mout_mau_epll_clk_p, SRC_TOP7, 20, 2),
+ MUX_F(0, "mout_mau_epll_clk", mout_mau_epll_clk_p, SRC_TOP7, 20, 2,
+ CLK_SET_RATE_PARENT, 0),
MUX(0, "mout_fimd1", mout_group3_p, SRC_DISP10, 4, 1),
};
@@ -863,7 +865,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
DIV(0, "dout_mipi1", "mout_mipi1", DIV_DISP10, 16, 8),
DIV(0, "dout_dp1", "mout_dp1", DIV_DISP10, 24, 4),
DIV(CLK_DOUT_PIXEL, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
- DIV(0, "dout_disp1_blk", "aclk200_disp1", DIV2_RATIO0, 16, 2),
DIV(CLK_DOUT_ACLK400_DISP1, "dout_aclk400_disp1",
"mout_aclk400_disp1", DIV_TOP2, 4, 3),
@@ -912,8 +913,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
DIV(0, "dout_spi1", "mout_spi1", DIV_PERIC1, 24, 4),
DIV(0, "dout_spi2", "mout_spi2", DIV_PERIC1, 28, 4),
- /* Mfc Block */
- DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2),
/* PCM */
DIV(0, "dout_pcm1", "dout_audio1", DIV_PERIC2, 16, 8),
@@ -932,8 +931,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
DIV(0, "dout_spi2_pre", "dout_spi2", DIV_PERIC4, 24, 8),
/* GSCL Block */
- DIV(0, "dout_gscl_blk_300", "mout_user_aclk300_gscl",
- DIV2_RATIO0, 4, 2),
DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
/* MSCL Block */
@@ -1190,8 +1187,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_SCLK_GSCL_WB, "sclk_gscl_wb", "mout_user_aclk333_432_gscl",
GATE_TOP_SCLK_GSCL, 7, 0, 0),
- GATE(CLK_GSCL0, "gscl0", "aclk300_gscl", GATE_IP_GSCL0, 0, 0, 0),
- GATE(CLK_GSCL1, "gscl1", "aclk300_gscl", GATE_IP_GSCL0, 1, 0, 0),
GATE(CLK_FIMC_3AA, "fimc_3aa", "aclk333_432_gscl",
GATE_IP_GSCL0, 4, 0, 0),
GATE(CLK_FIMC_LITE0, "fimc_lite0", "aclk333_432_gscl",
@@ -1205,10 +1200,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE_IP_GSCL1, 3, 0, 0),
GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "dout_gscl_blk_333",
GATE_IP_GSCL1, 4, 0, 0),
- GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "dout_gscl_blk_300",
- GATE_IP_GSCL1, 6, 0, 0),
- GATE(CLK_SMMU_GSCL1, "smmu_gscl1", "dout_gscl_blk_300",
- GATE_IP_GSCL1, 7, 0, 0),
GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, 0, 0),
GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, 0, 0),
GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "dout_gscl_blk_333",
@@ -1227,18 +1218,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
GATE_IP_MSCL, 10, 0, 0),
- GATE(CLK_FIMD1, "fimd1", "aclk300_disp1", GATE_IP_DISP1, 0, 0, 0),
- GATE(CLK_DSIM1, "dsim1", "aclk200_disp1", GATE_IP_DISP1, 3, 0, 0),
- GATE(CLK_DP1, "dp1", "aclk200_disp1", GATE_IP_DISP1, 4, 0, 0),
- GATE(CLK_MIXER, "mixer", "aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
- GATE(CLK_HDMI, "hdmi", "aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
- GATE(CLK_SMMU_FIMD1M0, "smmu_fimd1m0", "dout_disp1_blk",
- GATE_IP_DISP1, 7, 0, 0),
- GATE(CLK_SMMU_FIMD1M1, "smmu_fimd1m1", "dout_disp1_blk",
- GATE_IP_DISP1, 8, 0, 0),
- GATE(CLK_SMMU_MIXER, "smmu_mixer", "aclk200_disp1",
- GATE_IP_DISP1, 9, 0, 0),
-
/* ISP */
GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp",
GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0),
@@ -1255,48 +1234,138 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "dout_isp_sensor2",
GATE_TOP_SCLK_ISP, 12, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
+};
+
+static const struct samsung_div_clock exynos5x_disp_div_clks[] __initconst = {
+ DIV(0, "dout_disp1_blk", "aclk200_disp1", DIV2_RATIO0, 16, 2),
+};
+
+static const struct samsung_gate_clock exynos5x_disp_gate_clks[] __initconst = {
+ GATE(CLK_FIMD1, "fimd1", "aclk300_disp1", GATE_IP_DISP1, 0, 0, 0),
+ GATE(CLK_DSIM1, "dsim1", "aclk200_disp1", GATE_IP_DISP1, 3, 0, 0),
+ GATE(CLK_DP1, "dp1", "aclk200_disp1", GATE_IP_DISP1, 4, 0, 0),
+ GATE(CLK_MIXER, "mixer", "aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
+ GATE(CLK_HDMI, "hdmi", "aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
+ GATE(CLK_SMMU_FIMD1M0, "smmu_fimd1m0", "dout_disp1_blk",
+ GATE_IP_DISP1, 7, 0, 0),
+ GATE(CLK_SMMU_FIMD1M1, "smmu_fimd1m1", "dout_disp1_blk",
+ GATE_IP_DISP1, 8, 0, 0),
+ GATE(CLK_SMMU_MIXER, "smmu_mixer", "aclk200_disp1",
+ GATE_IP_DISP1, 9, 0, 0),
+};
+
+static struct exynos5_subcmu_reg_dump exynos5x_disp_suspend_regs[] = {
+ { GATE_IP_DISP1, 0xffffffff, 0xffffffff }, /* DISP1 gates */
+ { SRC_TOP5, 0, BIT(0) }, /* MUX mout_user_aclk400_disp1 */
+ { SRC_TOP5, 0, BIT(24) }, /* MUX mout_user_aclk300_disp1 */
+ { SRC_TOP3, 0, BIT(8) }, /* MUX mout_user_aclk200_disp1 */
+ { DIV2_RATIO0, 0, 0x30000 }, /* DIV dout_disp1_blk */
+};
+
+static const struct samsung_div_clock exynos5x_gsc_div_clks[] __initconst = {
+ DIV(0, "dout_gscl_blk_300", "mout_user_aclk300_gscl",
+ DIV2_RATIO0, 4, 2),
+};
+
+static const struct samsung_gate_clock exynos5x_gsc_gate_clks[] __initconst = {
+ GATE(CLK_GSCL0, "gscl0", "aclk300_gscl", GATE_IP_GSCL0, 0, 0, 0),
+ GATE(CLK_GSCL1, "gscl1", "aclk300_gscl", GATE_IP_GSCL0, 1, 0, 0),
+ GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "dout_gscl_blk_300",
+ GATE_IP_GSCL1, 6, 0, 0),
+ GATE(CLK_SMMU_GSCL1, "smmu_gscl1", "dout_gscl_blk_300",
+ GATE_IP_GSCL1, 7, 0, 0),
+};
+
+static struct exynos5_subcmu_reg_dump exynos5x_gsc_suspend_regs[] = {
+ { GATE_IP_GSCL0, 0x3, 0x3 }, /* GSC gates */
+ { GATE_IP_GSCL1, 0xc0, 0xc0 }, /* GSC gates */
+ { SRC_TOP5, 0, BIT(28) }, /* MUX mout_user_aclk300_gscl */
+ { DIV2_RATIO0, 0, 0x30 }, /* DIV dout_gscl_blk_300 */
+};
+
+static const struct samsung_div_clock exynos5x_mfc_div_clks[] __initconst = {
+ DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2),
+};
+
+static const struct samsung_gate_clock exynos5x_mfc_gate_clks[] __initconst = {
GATE(CLK_MFC, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
GATE(CLK_SMMU_MFCL, "smmu_mfcl", "dout_mfc_blk", GATE_IP_MFC, 1, 0, 0),
GATE(CLK_SMMU_MFCR, "smmu_mfcr", "dout_mfc_blk", GATE_IP_MFC, 2, 0, 0),
+};
- GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
+static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = {
+ { GATE_IP_MFC, 0xffffffff, 0xffffffff }, /* MFC gates */
+ { SRC_TOP4, 0, BIT(28) }, /* MUX mout_user_aclk333 */
+ { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */
+};
+
+static const struct exynos5_subcmu_info exynos5x_subcmus[] = {
+ {
+ .div_clks = exynos5x_disp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
+ .gate_clks = exynos5x_disp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
+ .suspend_regs = exynos5x_disp_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
+ .pd_name = "DISP",
+ }, {
+ .div_clks = exynos5x_gsc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
+ .gate_clks = exynos5x_gsc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
+ .suspend_regs = exynos5x_gsc_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
+ .pd_name = "GSC",
+ }, {
+ .div_clks = exynos5x_mfc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
+ .gate_clks = exynos5x_mfc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
+ .suspend_regs = exynos5x_mfc_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
+ .pd_name = "MFC",
+ },
};
static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
- PLL_35XX_RATE(2000000000, 250, 3, 0),
- PLL_35XX_RATE(1900000000, 475, 6, 0),
- PLL_35XX_RATE(1800000000, 225, 3, 0),
- PLL_35XX_RATE(1700000000, 425, 6, 0),
- PLL_35XX_RATE(1600000000, 200, 3, 0),
- PLL_35XX_RATE(1500000000, 250, 4, 0),
- PLL_35XX_RATE(1400000000, 175, 3, 0),
- PLL_35XX_RATE(1300000000, 325, 6, 0),
- PLL_35XX_RATE(1200000000, 200, 2, 1),
- PLL_35XX_RATE(1100000000, 275, 3, 1),
- PLL_35XX_RATE(1000000000, 250, 3, 1),
- PLL_35XX_RATE(900000000, 150, 2, 1),
- PLL_35XX_RATE(800000000, 200, 3, 1),
- PLL_35XX_RATE(700000000, 175, 3, 1),
- PLL_35XX_RATE(600000000, 200, 2, 2),
- PLL_35XX_RATE(500000000, 250, 3, 2),
- PLL_35XX_RATE(400000000, 200, 3, 2),
- PLL_35XX_RATE(300000000, 200, 2, 3),
- PLL_35XX_RATE(200000000, 200, 3, 3),
+ PLL_35XX_RATE(24 * MHZ, 2000000000, 250, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1900000000, 475, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1800000000, 225, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1700000000, 425, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1600000000, 200, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1500000000, 250, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1400000000, 175, 3, 0),
+ PLL_35XX_RATE(24 * MHZ, 1300000000, 325, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1200000000, 200, 2, 1),
+ PLL_35XX_RATE(24 * MHZ, 1100000000, 275, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 1000000000, 250, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 900000000, 150, 2, 1),
+ PLL_35XX_RATE(24 * MHZ, 800000000, 200, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 700000000, 175, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 600000000, 200, 2, 2),
+ PLL_35XX_RATE(24 * MHZ, 500000000, 250, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 400000000, 200, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 300000000, 200, 2, 3),
+ PLL_35XX_RATE(24 * MHZ, 200000000, 200, 3, 3),
};
static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = {
- PLL_36XX_RATE(600000000U, 100, 2, 1, 0),
- PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
- PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
- PLL_36XX_RATE(361267218U, 301, 5, 2, 3671),
- PLL_36XX_RATE(200000000U, 200, 3, 3, 0),
- PLL_36XX_RATE(196608001U, 197, 3, 3, -25690),
- PLL_36XX_RATE(180633609U, 301, 5, 3, 3671),
- PLL_36XX_RATE(131072006U, 131, 3, 3, 4719),
- PLL_36XX_RATE(100000000U, 200, 3, 4, 0),
- PLL_36XX_RATE( 65536003U, 131, 3, 4, 4719),
- PLL_36XX_RATE( 49152000U, 197, 3, 5, -25690),
- PLL_36XX_RATE( 32768001U, 131, 3, 5, 4719),
+ PLL_36XX_RATE(24 * MHZ, 600000000U, 100, 2, 1, 0),
+ PLL_36XX_RATE(24 * MHZ, 400000000U, 200, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 393216003U, 197, 3, 2, -25690),
+ PLL_36XX_RATE(24 * MHZ, 361267218U, 301, 5, 2, 3671),
+ PLL_36XX_RATE(24 * MHZ, 200000000U, 200, 3, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 196608001U, 197, 3, 3, -25690),
+ PLL_36XX_RATE(24 * MHZ, 180633609U, 301, 5, 3, 3671),
+ PLL_36XX_RATE(24 * MHZ, 131072006U, 131, 3, 3, 4719),
+ PLL_36XX_RATE(24 * MHZ, 100000000U, 200, 3, 4, 0),
+ PLL_36XX_RATE(24 * MHZ, 73728000U, 98, 2, 4, 19923),
+ PLL_36XX_RATE(24 * MHZ, 67737602U, 90, 2, 4, 20762),
+ PLL_36XX_RATE(24 * MHZ, 65536003U, 131, 3, 4, 4719),
+ PLL_36XX_RATE(24 * MHZ, 49152000U, 197, 3, 5, -25690),
+ PLL_36XX_RATE(24 * MHZ, 45158401U, 90, 3, 4, 20762),
+ PLL_36XX_RATE(24 * MHZ, 32768001U, 131, 3, 5, 4719),
};
static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
@@ -1472,6 +1541,8 @@ static void __init exynos5x_clk_init(struct device_node *np,
exynos5420_kfcclk_d, ARRAY_SIZE(exynos5420_kfcclk_d), 0);
exynos5420_clk_sleep_init();
+ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
+ exynos5x_subcmus);
samsung_clk_of_add_provider(np, ctx);
}
@@ -1480,10 +1551,12 @@ static void __init exynos5420_clk_init(struct device_node *np)
{
exynos5x_clk_init(np, EXYNOS5420);
}
-CLK_OF_DECLARE(exynos5420_clk, "samsung,exynos5420-clock", exynos5420_clk_init);
+CLK_OF_DECLARE_DRIVER(exynos5420_clk, "samsung,exynos5420-clock",
+ exynos5420_clk_init);
static void __init exynos5800_clk_init(struct device_node *np)
{
exynos5x_clk_init(np, EXYNOS5800);
}
-CLK_OF_DECLARE(exynos5800_clk, "samsung,exynos5800-clock", exynos5800_clk_init);
+CLK_OF_DECLARE_DRIVER(exynos5800_clk, "samsung,exynos5800-clock",
+ exynos5800_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index db270908037a..5305ace514b2 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -703,68 +703,69 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
* & MPHY_PLL & G3D_PLL & DISP_PLL & ISP_PLL
*/
static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst = {
- PLL_35XX_RATE(2500000000U, 625, 6, 0),
- PLL_35XX_RATE(2400000000U, 500, 5, 0),
- PLL_35XX_RATE(2300000000U, 575, 6, 0),
- PLL_35XX_RATE(2200000000U, 550, 6, 0),
- PLL_35XX_RATE(2100000000U, 350, 4, 0),
- PLL_35XX_RATE(2000000000U, 500, 6, 0),
- PLL_35XX_RATE(1900000000U, 475, 6, 0),
- PLL_35XX_RATE(1800000000U, 375, 5, 0),
- PLL_35XX_RATE(1700000000U, 425, 6, 0),
- PLL_35XX_RATE(1600000000U, 400, 6, 0),
- PLL_35XX_RATE(1500000000U, 250, 4, 0),
- PLL_35XX_RATE(1400000000U, 350, 6, 0),
- PLL_35XX_RATE(1332000000U, 222, 4, 0),
- PLL_35XX_RATE(1300000000U, 325, 6, 0),
- PLL_35XX_RATE(1200000000U, 500, 5, 1),
- PLL_35XX_RATE(1100000000U, 550, 6, 1),
- PLL_35XX_RATE(1086000000U, 362, 4, 1),
- PLL_35XX_RATE(1066000000U, 533, 6, 1),
- PLL_35XX_RATE(1000000000U, 500, 6, 1),
- PLL_35XX_RATE(933000000U, 311, 4, 1),
- PLL_35XX_RATE(921000000U, 307, 4, 1),
- PLL_35XX_RATE(900000000U, 375, 5, 1),
- PLL_35XX_RATE(825000000U, 275, 4, 1),
- PLL_35XX_RATE(800000000U, 400, 6, 1),
- PLL_35XX_RATE(733000000U, 733, 12, 1),
- PLL_35XX_RATE(700000000U, 175, 3, 1),
- PLL_35XX_RATE(667000000U, 222, 4, 1),
- PLL_35XX_RATE(633000000U, 211, 4, 1),
- PLL_35XX_RATE(600000000U, 500, 5, 2),
- PLL_35XX_RATE(552000000U, 460, 5, 2),
- PLL_35XX_RATE(550000000U, 550, 6, 2),
- PLL_35XX_RATE(543000000U, 362, 4, 2),
- PLL_35XX_RATE(533000000U, 533, 6, 2),
- PLL_35XX_RATE(500000000U, 500, 6, 2),
- PLL_35XX_RATE(444000000U, 370, 5, 2),
- PLL_35XX_RATE(420000000U, 350, 5, 2),
- PLL_35XX_RATE(400000000U, 400, 6, 2),
- PLL_35XX_RATE(350000000U, 350, 6, 2),
- PLL_35XX_RATE(333000000U, 222, 4, 2),
- PLL_35XX_RATE(300000000U, 500, 5, 3),
- PLL_35XX_RATE(278000000U, 556, 6, 3),
- PLL_35XX_RATE(266000000U, 532, 6, 3),
- PLL_35XX_RATE(250000000U, 500, 6, 3),
- PLL_35XX_RATE(200000000U, 400, 6, 3),
- PLL_35XX_RATE(166000000U, 332, 6, 3),
- PLL_35XX_RATE(160000000U, 320, 6, 3),
- PLL_35XX_RATE(133000000U, 532, 6, 4),
- PLL_35XX_RATE(100000000U, 400, 6, 4),
+ PLL_35XX_RATE(24 * MHZ, 2500000000U, 625, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 2400000000U, 500, 5, 0),
+ PLL_35XX_RATE(24 * MHZ, 2300000000U, 575, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 2200000000U, 550, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 2100000000U, 350, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 2000000000U, 500, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1900000000U, 475, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1800000000U, 375, 5, 0),
+ PLL_35XX_RATE(24 * MHZ, 1700000000U, 425, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1600000000U, 400, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1500000000U, 250, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1400000000U, 350, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1332000000U, 222, 4, 0),
+ PLL_35XX_RATE(24 * MHZ, 1300000000U, 325, 6, 0),
+ PLL_35XX_RATE(24 * MHZ, 1200000000U, 500, 5, 1),
+ PLL_35XX_RATE(24 * MHZ, 1100000000U, 550, 6, 1),
+ PLL_35XX_RATE(24 * MHZ, 1086000000U, 362, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 1066000000U, 533, 6, 1),
+ PLL_35XX_RATE(24 * MHZ, 1000000000U, 500, 6, 1),
+ PLL_35XX_RATE(24 * MHZ, 933000000U, 311, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 921000000U, 307, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 900000000U, 375, 5, 1),
+ PLL_35XX_RATE(24 * MHZ, 825000000U, 275, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 800000000U, 400, 6, 1),
+ PLL_35XX_RATE(24 * MHZ, 733000000U, 733, 12, 1),
+ PLL_35XX_RATE(24 * MHZ, 700000000U, 175, 3, 1),
+ PLL_35XX_RATE(24 * MHZ, 666000000U, 222, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 633000000U, 211, 4, 1),
+ PLL_35XX_RATE(24 * MHZ, 600000000U, 500, 5, 2),
+ PLL_35XX_RATE(24 * MHZ, 552000000U, 460, 5, 2),
+ PLL_35XX_RATE(24 * MHZ, 550000000U, 550, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 543000000U, 362, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 533000000U, 533, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 500000000U, 500, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 444000000U, 370, 5, 2),
+ PLL_35XX_RATE(24 * MHZ, 420000000U, 350, 5, 2),
+ PLL_35XX_RATE(24 * MHZ, 400000000U, 400, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 350000000U, 350, 6, 2),
+ PLL_35XX_RATE(24 * MHZ, 333000000U, 222, 4, 2),
+ PLL_35XX_RATE(24 * MHZ, 300000000U, 500, 5, 3),
+ PLL_35XX_RATE(24 * MHZ, 278000000U, 556, 6, 3),
+ PLL_35XX_RATE(24 * MHZ, 266000000U, 532, 6, 3),
+ PLL_35XX_RATE(24 * MHZ, 250000000U, 500, 6, 3),
+ PLL_35XX_RATE(24 * MHZ, 200000000U, 400, 6, 3),
+ PLL_35XX_RATE(24 * MHZ, 166000000U, 332, 6, 3),
+ PLL_35XX_RATE(24 * MHZ, 160000000U, 320, 6, 3),
+ PLL_35XX_RATE(24 * MHZ, 133000000U, 532, 6, 4),
+ PLL_35XX_RATE(24 * MHZ, 100000000U, 400, 6, 4),
{ /* sentinel */ }
};
/* AUD_PLL */
static const struct samsung_pll_rate_table exynos5433_aud_pll_rates[] __initconst = {
- PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
- PLL_36XX_RATE(393216000U, 197, 3, 2, -25690),
- PLL_36XX_RATE(384000000U, 128, 2, 2, 0),
- PLL_36XX_RATE(368640000U, 246, 4, 2, -15729),
- PLL_36XX_RATE(361507200U, 181, 3, 2, -16148),
- PLL_36XX_RATE(338688000U, 113, 2, 2, -6816),
- PLL_36XX_RATE(294912000U, 98, 1, 3, 19923),
- PLL_36XX_RATE(288000000U, 96, 1, 3, 0),
- PLL_36XX_RATE(252000000U, 84, 1, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 400000000U, 200, 3, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 393216003U, 197, 3, 2, -25690),
+ PLL_36XX_RATE(24 * MHZ, 384000000U, 128, 2, 2, 0),
+ PLL_36XX_RATE(24 * MHZ, 368639991U, 246, 4, 2, -15729),
+ PLL_36XX_RATE(24 * MHZ, 361507202U, 181, 3, 2, -16148),
+ PLL_36XX_RATE(24 * MHZ, 338687988U, 113, 2, 2, -6816),
+ PLL_36XX_RATE(24 * MHZ, 294912002U, 98, 1, 3, 19923),
+ PLL_36XX_RATE(24 * MHZ, 288000000U, 96, 1, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 252000000U, 84, 1, 3, 0),
+ PLL_36XX_RATE(24 * MHZ, 196608001U, 197, 3, 3, -25690),
{ /* sentinel */ }
};
@@ -1672,7 +1673,7 @@ static const struct samsung_gate_clock peric_gate_clks[] __initconst = {
ENABLE_SCLK_PERIC, 11, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_IOCLK_I2S1_BCLK, "sclk_ioclk_i2s1_bclk",
"ioclk_i2s1_bclk_in", ENABLE_SCLK_PERIC, 10,
- CLK_SET_RATE_PARENT, 0),
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_SPDIF, "sclk_spdif", "sclk_spdif_peric",
ENABLE_SCLK_PERIC, 8, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric",
@@ -5513,10 +5514,8 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg_base)) {
- dev_err(dev, "failed to map registers\n");
+ if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
- }
for (i = 0; i < info->nr_clk_ids; ++i)
ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index 5931a4140c3d..492d51691080 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -140,7 +140,7 @@ static const struct samsung_div_clock topc_div_clks[] __initconst = {
};
static const struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initconst = {
- PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
+ PLL_36XX_RATE(24 * MHZ, 491519897, 20, 1, 0, 31457),
{},
};
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index 61eb8abbfd9c..ca57b3dfa814 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -41,35 +41,62 @@ enum samsung_pll_type {
pll_1460x,
};
-#define PLL_35XX_RATE(_rate, _m, _p, _s) \
+#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
+ ((u64)(_fin) * (BIT(_ks) * (_m) + (_k)) / BIT(_ks) / ((_p) << (_s)))
+#define PLL_VALID_RATE(_fin, _fout, _m, _p, _s, _k, _ks) ((_fout) + \
+ BUILD_BUG_ON_ZERO(PLL_RATE(_fin, _m, _p, _s, _k, _ks) != (_fout)))
+
+#define PLL_35XX_RATE(_fin, _rate, _m, _p, _s) \
+ { \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ _m, _p, _s, 0, 16), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ }
+
+#define PLL_S3C2410_MPLL_RATE(_fin, _rate, _m, _p, _s) \
+ { \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ _m + 8, _p + 2, _s, 0, 16), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ }
+
+#define PLL_S3C2440_MPLL_RATE(_fin, _rate, _m, _p, _s) \
{ \
- .rate = (_rate), \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ 2 * (_m + 8), _p + 2, _s, 0, 16), \
.mdiv = (_m), \
.pdiv = (_p), \
.sdiv = (_s), \
}
-#define PLL_36XX_RATE(_rate, _m, _p, _s, _k) \
+#define PLL_36XX_RATE(_fin, _rate, _m, _p, _s, _k) \
{ \
- .rate = (_rate), \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ _m, _p, _s, _k, 16), \
.mdiv = (_m), \
.pdiv = (_p), \
.sdiv = (_s), \
.kdiv = (_k), \
}
-#define PLL_45XX_RATE(_rate, _m, _p, _s, _afc) \
+#define PLL_4508_RATE(_fin, _rate, _m, _p, _s, _afc) \
{ \
- .rate = (_rate), \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ _m, _p, _s - 1, 0, 16), \
.mdiv = (_m), \
.pdiv = (_p), \
.sdiv = (_s), \
.afc = (_afc), \
}
-#define PLL_4600_RATE(_rate, _m, _p, _s, _k, _vsel) \
+#define PLL_4600_RATE(_fin, _rate, _m, _p, _s, _k, _vsel) \
{ \
- .rate = (_rate), \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ _m, _p, _s, _k, 16), \
.mdiv = (_m), \
.pdiv = (_p), \
.sdiv = (_s), \
@@ -77,9 +104,10 @@ enum samsung_pll_type {
.vsel = (_vsel), \
}
-#define PLL_4650_RATE(_rate, _m, _p, _s, _k, _mfr, _mrr, _vsel) \
+#define PLL_4650_RATE(_fin, _rate, _m, _p, _s, _k, _mfr, _mrr, _vsel) \
{ \
- .rate = (_rate), \
+ .rate = PLL_VALID_RATE(_fin, _rate, \
+ _m, _p, _s, _k, 10), \
.mdiv = (_m), \
.pdiv = (_p), \
.sdiv = (_s), \
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index e0650c33863b..a9c887475054 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -95,7 +95,7 @@ static void __init s3c2410_clk_sleep_init(void) {}
PNAME(fclk_p) = { "mpll", "div_slow" };
-struct samsung_mux_clock s3c2410_common_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2410_common_muxes[] __initdata = {
MUX(FCLK, "fclk", fclk_p, CLKSLOW, 4, 1),
};
@@ -111,12 +111,12 @@ static struct clk_div_table divslow_d[] = {
{ /* sentinel */ },
};
-struct samsung_div_clock s3c2410_common_dividers[] __initdata = {
+static struct samsung_div_clock s3c2410_common_dividers[] __initdata = {
DIV_T(0, "div_slow", "xti", CLKSLOW, 0, 3, divslow_d),
DIV(PCLK, "pclk", "hclk", CLKDIVN, 0, 1),
};
-struct samsung_gate_clock s3c2410_common_gates[] __initdata = {
+static struct samsung_gate_clock s3c2410_common_gates[] __initdata = {
GATE(PCLK_SPI, "spi", "pclk", CLKCON, 18, 0, 0),
GATE(PCLK_I2S, "i2s", "pclk", CLKCON, 17, 0, 0),
GATE(PCLK_I2C, "i2c", "pclk", CLKCON, 16, 0, 0),
@@ -135,7 +135,7 @@ struct samsung_gate_clock s3c2410_common_gates[] __initdata = {
};
/* should be added _after_ the soc-specific clocks are created */
-struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
ALIAS(PCLK_I2C, "s3c2410-i2c.0", "i2c"),
ALIAS(PCLK_ADC, NULL, "adc"),
ALIAS(PCLK_RTC, NULL, "rtc"),
@@ -162,34 +162,34 @@ struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
/* sorted in descending order */
/* 2410A extras */
- PLL_35XX_RATE(270000000, 127, 1, 1),
- PLL_35XX_RATE(268000000, 126, 1, 1),
- PLL_35XX_RATE(266000000, 125, 1, 1),
- PLL_35XX_RATE(226000000, 105, 1, 1),
- PLL_35XX_RATE(210000000, 132, 2, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 270000000, 127, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 268000000, 126, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 266000000, 125, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 226000000, 105, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 210000000, 132, 2, 1),
/* 2410 common */
- PLL_35XX_RATE(203000000, 161, 3, 1),
- PLL_35XX_RATE(192000000, 88, 1, 1),
- PLL_35XX_RATE(186000000, 85, 1, 1),
- PLL_35XX_RATE(180000000, 82, 1, 1),
- PLL_35XX_RATE(170000000, 77, 1, 1),
- PLL_35XX_RATE(158000000, 71, 1, 1),
- PLL_35XX_RATE(152000000, 68, 1, 1),
- PLL_35XX_RATE(147000000, 90, 2, 1),
- PLL_35XX_RATE(135000000, 82, 2, 1),
- PLL_35XX_RATE(124000000, 116, 1, 2),
- PLL_35XX_RATE(118000000, 150, 2, 2),
- PLL_35XX_RATE(113000000, 105, 1, 2),
- PLL_35XX_RATE(101000000, 127, 2, 2),
- PLL_35XX_RATE(90000000, 112, 2, 2),
- PLL_35XX_RATE(85000000, 105, 2, 2),
- PLL_35XX_RATE(79000000, 71, 1, 2),
- PLL_35XX_RATE(68000000, 82, 2, 2),
- PLL_35XX_RATE(56000000, 142, 2, 3),
- PLL_35XX_RATE(48000000, 120, 2, 3),
- PLL_35XX_RATE(51000000, 161, 3, 3),
- PLL_35XX_RATE(45000000, 82, 1, 3),
- PLL_35XX_RATE(34000000, 82, 2, 3),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 202800000, 161, 3, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 192000000, 88, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 186000000, 85, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 180000000, 82, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 170000000, 77, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 158000000, 71, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 152000000, 68, 1, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 147000000, 90, 2, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 135000000, 82, 2, 1),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 124000000, 116, 1, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 118500000, 150, 2, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 113000000, 105, 1, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 101250000, 127, 2, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 90000000, 112, 2, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 84750000, 105, 2, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 79000000, 71, 1, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 67500000, 82, 2, 2),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 56250000, 142, 2, 3),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 48000000, 120, 2, 3),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 50700000, 161, 3, 3),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 45000000, 82, 1, 3),
+ PLL_S3C2410_MPLL_RATE(12 * MHZ, 33750000, 82, 2, 3),
{ /* sentinel */ },
};
@@ -200,11 +200,11 @@ static struct samsung_pll_clock s3c2410_plls[] __initdata = {
LOCKTIME, UPLLCON, NULL),
};
-struct samsung_div_clock s3c2410_dividers[] __initdata = {
+static struct samsung_div_clock s3c2410_dividers[] __initdata = {
DIV(HCLK, "hclk", "mpll", CLKDIVN, 1, 1),
};
-struct samsung_fixed_factor_clock s3c2410_ffactor[] __initdata = {
+static struct samsung_fixed_factor_clock s3c2410_ffactor[] __initdata = {
/*
* armclk is directly supplied by the fclk, without
* switching possibility like on the s3c244x below.
@@ -215,7 +215,7 @@ struct samsung_fixed_factor_clock s3c2410_ffactor[] __initdata = {
FFACTOR(UCLK, "uclk", "upll", 1, 1, 0),
};
-struct samsung_clock_alias s3c2410_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2410_aliases[] __initdata = {
ALIAS(PCLK_UART0, "s3c2410-uart.0", "uart"),
ALIAS(PCLK_UART1, "s3c2410-uart.1", "uart"),
ALIAS(PCLK_UART2, "s3c2410-uart.2", "uart"),
@@ -229,33 +229,33 @@ struct samsung_clock_alias s3c2410_aliases[] __initdata = {
static struct samsung_pll_rate_table pll_s3c244x_12mhz_tbl[] __initdata = {
/* sorted in descending order */
- PLL_35XX_RATE(400000000, 0x5c, 1, 1),
- PLL_35XX_RATE(390000000, 0x7a, 2, 1),
- PLL_35XX_RATE(380000000, 0x57, 1, 1),
- PLL_35XX_RATE(370000000, 0xb1, 4, 1),
- PLL_35XX_RATE(360000000, 0x70, 2, 1),
- PLL_35XX_RATE(350000000, 0xa7, 4, 1),
- PLL_35XX_RATE(340000000, 0x4d, 1, 1),
- PLL_35XX_RATE(330000000, 0x66, 2, 1),
- PLL_35XX_RATE(320000000, 0x98, 4, 1),
- PLL_35XX_RATE(310000000, 0x93, 4, 1),
- PLL_35XX_RATE(300000000, 0x75, 3, 1),
- PLL_35XX_RATE(240000000, 0x70, 1, 2),
- PLL_35XX_RATE(230000000, 0x6b, 1, 2),
- PLL_35XX_RATE(220000000, 0x66, 1, 2),
- PLL_35XX_RATE(210000000, 0x84, 2, 2),
- PLL_35XX_RATE(200000000, 0x5c, 1, 2),
- PLL_35XX_RATE(190000000, 0x57, 1, 2),
- PLL_35XX_RATE(180000000, 0x70, 2, 2),
- PLL_35XX_RATE(170000000, 0x4d, 1, 2),
- PLL_35XX_RATE(160000000, 0x98, 4, 2),
- PLL_35XX_RATE(150000000, 0x75, 3, 2),
- PLL_35XX_RATE(120000000, 0x70, 1, 3),
- PLL_35XX_RATE(110000000, 0x66, 1, 3),
- PLL_35XX_RATE(100000000, 0x5c, 1, 3),
- PLL_35XX_RATE(90000000, 0x70, 2, 3),
- PLL_35XX_RATE(80000000, 0x98, 4, 3),
- PLL_35XX_RATE(75000000, 0x75, 3, 3),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 400000000, 0x5c, 1, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 390000000, 0x7a, 2, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 380000000, 0x57, 1, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 370000000, 0xb1, 4, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 360000000, 0x70, 2, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 350000000, 0xa7, 4, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 340000000, 0x4d, 1, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 330000000, 0x66, 2, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 320000000, 0x98, 4, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 310000000, 0x93, 4, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 300000000, 0x75, 3, 1),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 240000000, 0x70, 1, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 230000000, 0x6b, 1, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 220000000, 0x66, 1, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 210000000, 0x84, 2, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 200000000, 0x5c, 1, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 190000000, 0x57, 1, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 180000000, 0x70, 2, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 170000000, 0x4d, 1, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 160000000, 0x98, 4, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 150000000, 0x75, 3, 2),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 120000000, 0x70, 1, 3),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 110000000, 0x66, 1, 3),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 100000000, 0x5c, 1, 3),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 90000000, 0x70, 2, 3),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 80000000, 0x98, 4, 3),
+ PLL_S3C2440_MPLL_RATE(12 * MHZ, 75000000, 0x75, 3, 3),
{ /* sentinel */ },
};
@@ -269,12 +269,12 @@ static struct samsung_pll_clock s3c244x_common_plls[] __initdata = {
PNAME(hclk_p) = { "fclk", "div_hclk_2", "div_hclk_4", "div_hclk_3" };
PNAME(armclk_p) = { "fclk", "hclk" };
-struct samsung_mux_clock s3c244x_common_muxes[] __initdata = {
+static struct samsung_mux_clock s3c244x_common_muxes[] __initdata = {
MUX(HCLK, "hclk", hclk_p, CLKDIVN, 1, 2),
MUX(ARMCLK, "armclk", armclk_p, CAMDIVN, 12, 1),
};
-struct samsung_fixed_factor_clock s3c244x_common_ffactor[] __initdata = {
+static struct samsung_fixed_factor_clock s3c244x_common_ffactor[] __initdata = {
FFACTOR(0, "div_hclk_2", "fclk", 1, 2, 0),
FFACTOR(0, "ff_cam", "div_cam", 2, 1, CLK_SET_RATE_PARENT),
};
@@ -291,7 +291,7 @@ static struct clk_div_table div_hclk_3_d[] = {
{ /* sentinel */ },
};
-struct samsung_div_clock s3c244x_common_dividers[] __initdata = {
+static struct samsung_div_clock s3c244x_common_dividers[] __initdata = {
DIV(UCLK, "uclk", "upll", CLKDIVN, 3, 1),
DIV(0, "div_hclk", "fclk", CLKDIVN, 1, 1),
DIV_T(0, "div_hclk_4", "fclk", CAMDIVN, 9, 1, div_hclk_4_d),
@@ -299,11 +299,11 @@ struct samsung_div_clock s3c244x_common_dividers[] __initdata = {
DIV(0, "div_cam", "upll", CAMDIVN, 0, 3),
};
-struct samsung_gate_clock s3c244x_common_gates[] __initdata = {
+static struct samsung_gate_clock s3c244x_common_gates[] __initdata = {
GATE(HCLK_CAM, "cam", "hclk", CLKCON, 19, 0, 0),
};
-struct samsung_clock_alias s3c244x_common_aliases[] __initdata = {
+static struct samsung_clock_alias s3c244x_common_aliases[] __initdata = {
ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"),
ALIAS(PCLK_UART1, "s3c2440-uart.1", "uart"),
ALIAS(PCLK_UART2, "s3c2440-uart.2", "uart"),
@@ -318,23 +318,23 @@ struct samsung_clock_alias s3c244x_common_aliases[] __initdata = {
PNAME(s3c2440_camif_p) = { "upll", "ff_cam" };
-struct samsung_mux_clock s3c2440_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2440_muxes[] __initdata = {
MUX(CAMIF, "camif", s3c2440_camif_p, CAMDIVN, 4, 1),
};
-struct samsung_gate_clock s3c2440_gates[] __initdata = {
+static struct samsung_gate_clock s3c2440_gates[] __initdata = {
GATE(PCLK_AC97, "ac97", "pclk", CLKCON, 20, 0, 0),
};
/* S3C2442 specific clocks */
-struct samsung_fixed_factor_clock s3c2442_ffactor[] __initdata = {
+static struct samsung_fixed_factor_clock s3c2442_ffactor[] __initdata = {
FFACTOR(0, "upll_3", "upll", 1, 3, 0),
};
PNAME(s3c2442_camif_p) = { "upll", "ff_cam", "upll", "upll_3" };
-struct samsung_mux_clock s3c2442_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2442_muxes[] __initdata = {
MUX(CAMIF, "camif", s3c2442_camif_p, CAMDIVN, 4, 2),
};
@@ -343,7 +343,7 @@ struct samsung_mux_clock s3c2442_muxes[] __initdata = {
* Only necessary until the devicetree-move is complete
*/
#define XTI 1
-struct samsung_fixed_rate_clock s3c2410_common_frate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock s3c2410_common_frate_clks[] __initdata = {
FRATE(XTI, "xti", NULL, 0, 0),
};
@@ -468,18 +468,18 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
static void __init s3c2410_clk_init(struct device_node *np)
{
- s3c2410_common_clk_init(np, 0, S3C2410, 0);
+ s3c2410_common_clk_init(np, 0, S3C2410, NULL);
}
CLK_OF_DECLARE(s3c2410_clk, "samsung,s3c2410-clock", s3c2410_clk_init);
static void __init s3c2440_clk_init(struct device_node *np)
{
- s3c2410_common_clk_init(np, 0, S3C2440, 0);
+ s3c2410_common_clk_init(np, 0, S3C2440, NULL);
}
CLK_OF_DECLARE(s3c2440_clk, "samsung,s3c2440-clock", s3c2440_clk_init);
static void __init s3c2442_clk_init(struct device_node *np)
{
- s3c2410_common_clk_init(np, 0, S3C2442, 0);
+ s3c2410_common_clk_init(np, 0, S3C2442, NULL);
}
CLK_OF_DECLARE(s3c2442_clk, "samsung,s3c2442-clock", s3c2442_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index b8340a49921b..6bc94d3aff78 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -27,11 +27,6 @@
#define CLKSRC 0x1c
#define SWRST 0x30
-/* list of PLLs to be registered */
-enum s3c2412_plls {
- mpll, upll,
-};
-
static void __iomem *reg_base;
#ifdef CONFIG_PM_SLEEP
@@ -98,7 +93,7 @@ static struct clk_div_table divxti_d[] = {
{ /* sentinel */ },
};
-struct samsung_div_clock s3c2412_dividers[] __initdata = {
+static struct samsung_div_clock s3c2412_dividers[] __initdata = {
DIV_T(0, "div_xti", "xti", CLKSRC, 0, 3, divxti_d),
DIV(0, "div_cam", "mux_cam", CLKDIVN, 16, 4),
DIV(0, "div_i2s", "mux_i2s", CLKDIVN, 12, 4),
@@ -110,7 +105,7 @@ struct samsung_div_clock s3c2412_dividers[] __initdata = {
DIV(HCLK, "hclk", "armdiv", CLKDIVN, 0, 2),
};
-struct samsung_fixed_factor_clock s3c2412_ffactor[] __initdata = {
+static struct samsung_fixed_factor_clock s3c2412_ffactor[] __initdata = {
FFACTOR(0, "ff_hclk", "hclk", 2, 1, CLK_SET_RATE_PARENT),
};
@@ -130,7 +125,7 @@ PNAME(msysclk_p) = { "mdivclk", "mpll" };
PNAME(mdivclk_p) = { "xti", "div_xti" };
PNAME(armclk_p) = { "armdiv", "hclk" };
-struct samsung_mux_clock s3c2412_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2412_muxes[] __initdata = {
MUX(0, "erefclk", erefclk_p, CLKSRC, 14, 2),
MUX(0, "urefclk", urefclk_p, CLKSRC, 12, 2),
MUX(0, "mux_cam", camclk_p, CLKSRC, 11, 1),
@@ -144,13 +139,11 @@ struct samsung_mux_clock s3c2412_muxes[] __initdata = {
};
static struct samsung_pll_clock s3c2412_plls[] __initdata = {
- [mpll] = PLL(pll_s3c2440_mpll, MPLL, "mpll", "xti",
- LOCKTIME, MPLLCON, NULL),
- [upll] = PLL(pll_s3c2410_upll, UPLL, "upll", "urefclk",
- LOCKTIME, UPLLCON, NULL),
+ PLL(pll_s3c2440_mpll, MPLL, "mpll", "xti", LOCKTIME, MPLLCON, NULL),
+ PLL(pll_s3c2410_upll, UPLL, "upll", "urefclk", LOCKTIME, UPLLCON, NULL),
};
-struct samsung_gate_clock s3c2412_gates[] __initdata = {
+static struct samsung_gate_clock s3c2412_gates[] __initdata = {
GATE(PCLK_WDT, "wdt", "pclk", CLKCON, 28, 0, 0),
GATE(PCLK_SPI, "spi", "pclk", CLKCON, 27, 0, 0),
GATE(PCLK_I2S, "i2s", "pclk", CLKCON, 26, 0, 0),
@@ -181,7 +174,7 @@ struct samsung_gate_clock s3c2412_gates[] __initdata = {
GATE(HCLK_DMA0, "dma0", "hclk", CLKCON, 0, CLK_IGNORE_UNUSED, 0),
};
-struct samsung_clock_alias s3c2412_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2412_aliases[] __initdata = {
ALIAS(PCLK_UART0, "s3c2412-uart.0", "uart"),
ALIAS(PCLK_UART1, "s3c2412-uart.1", "uart"),
ALIAS(PCLK_UART2, "s3c2412-uart.2", "uart"),
@@ -231,7 +224,7 @@ static struct notifier_block s3c2412_restart_handler = {
* Only necessary until the devicetree-move is complete
*/
#define XTI 1
-struct samsung_fixed_rate_clock s3c2412_common_frate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock s3c2412_common_frate_clks[] __initdata = {
FRATE(XTI, "xti", NULL, 0, 0),
FRATE(0, "ext", NULL, 0, 0),
};
@@ -296,6 +289,6 @@ void __init s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f,
static void __init s3c2412_clk_init(struct device_node *np)
{
- s3c2412_common_clk_init(np, 0, 0, 0);
+ s3c2412_common_clk_init(np, 0, 0, NULL);
}
CLK_OF_DECLARE(s3c2412_clk, "samsung,s3c2412-clock", s3c2412_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index d94b85a42356..c46e6d5bc9bc 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -41,11 +41,6 @@ enum supported_socs {
S3C2450,
};
-/* list of PLLs to be registered */
-enum s3c2443_plls {
- mpll, epll,
-};
-
static void __iomem *reg_base;
#ifdef CONFIG_PM_SLEEP
@@ -113,7 +108,7 @@ PNAME(msysclk_p) = { "mpllref", "mpll" };
PNAME(armclk_p) = { "armdiv" , "hclk" };
PNAME(i2s0_p) = { "div_i2s0", "ext_i2s", "epllref", "epllref" };
-struct samsung_mux_clock s3c2443_common_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2443_common_muxes[] __initdata = {
MUX(0, "epllref", epllref_p, CLKSRC, 7, 2),
MUX(ESYSCLK, "esysclk", esysclk_p, CLKSRC, 6, 1),
MUX(0, "mpllref", mpllref_p, CLKSRC, 3, 1),
@@ -141,7 +136,7 @@ static struct clk_div_table mdivclk_d[] = {
{ /* sentinel */ },
};
-struct samsung_div_clock s3c2443_common_dividers[] __initdata = {
+static struct samsung_div_clock s3c2443_common_dividers[] __initdata = {
DIV_T(0, "mdivclk", "xti", CLKDIV0, 6, 3, mdivclk_d),
DIV(0, "prediv", "msysclk", CLKDIV0, 4, 2),
DIV_T(HCLK, "hclk", "prediv", CLKDIV0, 0, 2, hclk_d),
@@ -154,7 +149,7 @@ struct samsung_div_clock s3c2443_common_dividers[] __initdata = {
DIV(0, "div_usbhost", "esysclk", CLKDIV1, 4, 2),
};
-struct samsung_gate_clock s3c2443_common_gates[] __initdata = {
+static struct samsung_gate_clock s3c2443_common_gates[] __initdata = {
GATE(SCLK_HSMMC_EXT, "sclk_hsmmcext", "ext", SCLKCON, 13, 0, 0),
GATE(SCLK_HSMMC1, "sclk_hsmmc1", "div_hsmmc1", SCLKCON, 12, 0, 0),
GATE(SCLK_FIMD, "sclk_fimd", "div_fimd", SCLKCON, 10, 0, 0),
@@ -188,7 +183,7 @@ struct samsung_gate_clock s3c2443_common_gates[] __initdata = {
GATE(PCLK_UART0, "uart0", "pclk", PCLKCON, 0, 0, 0),
};
-struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
ALIAS(MSYSCLK, NULL, "msysclk"),
ALIAS(ARMCLK, NULL, "armclk"),
ALIAS(MPLL, NULL, "mpll"),
@@ -225,10 +220,8 @@ struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
/* S3C2416 specific clocks */
static struct samsung_pll_clock s3c2416_pll_clks[] __initdata = {
- [mpll] = PLL(pll_6552_s3c2416, MPLL, "mpll", "mpllref",
- LOCKCON0, MPLLCON, NULL),
- [epll] = PLL(pll_6553, EPLL, "epll", "epllref",
- LOCKCON1, EPLLCON, NULL),
+ PLL(pll_6552_s3c2416, MPLL, "mpll", "mpllref", LOCKCON0, MPLLCON, NULL),
+ PLL(pll_6553, EPLL, "epll", "epllref", LOCKCON1, EPLLCON, NULL),
};
PNAME(s3c2416_hsmmc0_p) = { "sclk_hsmmc0", "sclk_hsmmcext" };
@@ -245,19 +238,19 @@ static struct clk_div_table armdiv_s3c2416_d[] = {
{ /* sentinel */ },
};
-struct samsung_div_clock s3c2416_dividers[] __initdata = {
+static struct samsung_div_clock s3c2416_dividers[] __initdata = {
DIV_T(ARMDIV, "armdiv", "msysclk", CLKDIV0, 9, 3, armdiv_s3c2416_d),
DIV(0, "div_hsspi0_mpll", "msysclk", CLKDIV2, 0, 4),
DIV(0, "div_hsmmc0", "esysclk", CLKDIV2, 6, 2),
};
-struct samsung_mux_clock s3c2416_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2416_muxes[] __initdata = {
MUX(MUX_HSMMC0, "mux_hsmmc0", s3c2416_hsmmc0_p, CLKSRC, 16, 1),
MUX(MUX_HSMMC1, "mux_hsmmc1", s3c2416_hsmmc1_p, CLKSRC, 17, 1),
MUX(MUX_HSSPI0, "mux_hsspi0", s3c2416_hsspi0_p, CLKSRC, 18, 1),
};
-struct samsung_gate_clock s3c2416_gates[] __initdata = {
+static struct samsung_gate_clock s3c2416_gates[] __initdata = {
GATE(0, "hsspi0_mpll", "div_hsspi0_mpll", SCLKCON, 19, 0, 0),
GATE(0, "hsspi0_epll", "div_hsspi0_epll", SCLKCON, 14, 0, 0),
GATE(0, "sclk_hsmmc0", "div_hsmmc0", SCLKCON, 6, 0, 0),
@@ -267,7 +260,7 @@ struct samsung_gate_clock s3c2416_gates[] __initdata = {
GATE(PCLK_PCM, "pcm", "pclk", PCLKCON, 19, 0, 0),
};
-struct samsung_clock_alias s3c2416_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2416_aliases[] __initdata = {
ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "hsmmc"),
ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "mmc_busclk.0"),
ALIAS(MUX_HSMMC0, "s3c-sdhci.0", "mmc_busclk.2"),
@@ -279,10 +272,8 @@ struct samsung_clock_alias s3c2416_aliases[] __initdata = {
/* S3C2443 specific clocks */
static struct samsung_pll_clock s3c2443_pll_clks[] __initdata = {
- [mpll] = PLL(pll_3000, MPLL, "mpll", "mpllref",
- LOCKCON0, MPLLCON, NULL),
- [epll] = PLL(pll_2126, EPLL, "epll", "epllref",
- LOCKCON1, EPLLCON, NULL),
+ PLL(pll_3000, MPLL, "mpll", "mpllref", LOCKCON0, MPLLCON, NULL),
+ PLL(pll_2126, EPLL, "epll", "epllref", LOCKCON1, EPLLCON, NULL),
};
static struct clk_div_table armdiv_s3c2443_d[] = {
@@ -297,12 +288,12 @@ static struct clk_div_table armdiv_s3c2443_d[] = {
{ /* sentinel */ },
};
-struct samsung_div_clock s3c2443_dividers[] __initdata = {
+static struct samsung_div_clock s3c2443_dividers[] __initdata = {
DIV_T(ARMDIV, "armdiv", "msysclk", CLKDIV0, 9, 4, armdiv_s3c2443_d),
DIV(0, "div_cam", "esysclk", CLKDIV1, 26, 4),
};
-struct samsung_gate_clock s3c2443_gates[] __initdata = {
+static struct samsung_gate_clock s3c2443_gates[] __initdata = {
GATE(SCLK_HSSPI0, "sclk_hsspi0", "div_hsspi0_epll", SCLKCON, 14, 0, 0),
GATE(SCLK_CAM, "sclk_cam", "div_cam", SCLKCON, 11, 0, 0),
GATE(HCLK_CFC, "cfc", "hclk", HCLKCON, 17, CLK_IGNORE_UNUSED, 0),
@@ -311,7 +302,7 @@ struct samsung_gate_clock s3c2443_gates[] __initdata = {
GATE(PCLK_SDI, "sdi", "pclk", PCLKCON, 5, 0, 0),
};
-struct samsung_clock_alias s3c2443_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2443_aliases[] __initdata = {
ALIAS(SCLK_HSSPI0, "s3c2443-spi.0", "spi_busclk2"),
ALIAS(SCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.2"),
ALIAS(SCLK_CAM, NULL, "camif-upll"),
@@ -327,20 +318,20 @@ PNAME(s3c2450_cam_p) = { "div_cam", "hclk" };
PNAME(s3c2450_hsspi1_p) = { "hsspi1_epll", "hsspi1_mpll" };
PNAME(i2s1_p) = { "div_i2s1", "ext_i2s", "epllref", "epllref" };
-struct samsung_div_clock s3c2450_dividers[] __initdata = {
+static struct samsung_div_clock s3c2450_dividers[] __initdata = {
DIV(0, "div_cam", "esysclk", CLKDIV1, 26, 4),
DIV(0, "div_hsspi1_epll", "esysclk", CLKDIV2, 24, 2),
DIV(0, "div_hsspi1_mpll", "msysclk", CLKDIV2, 16, 4),
DIV(0, "div_i2s1", "esysclk", CLKDIV2, 12, 4),
};
-struct samsung_mux_clock s3c2450_muxes[] __initdata = {
+static struct samsung_mux_clock s3c2450_muxes[] __initdata = {
MUX(0, "mux_cam", s3c2450_cam_p, CLKSRC, 20, 1),
MUX(MUX_HSSPI1, "mux_hsspi1", s3c2450_hsspi1_p, CLKSRC, 19, 1),
MUX(0, "mux_i2s1", i2s1_p, CLKSRC, 12, 2),
};
-struct samsung_gate_clock s3c2450_gates[] __initdata = {
+static struct samsung_gate_clock s3c2450_gates[] __initdata = {
GATE(SCLK_I2S1, "sclk_i2s1", "div_i2s1", SCLKCON, 5, 0, 0),
GATE(HCLK_CFC, "cfc", "hclk", HCLKCON, 17, 0, 0),
GATE(HCLK_CAM, "cam", "hclk", HCLKCON, 8, 0, 0),
@@ -351,7 +342,7 @@ struct samsung_gate_clock s3c2450_gates[] __initdata = {
GATE(PCLK_SPI1, "spi1", "pclk", PCLKCON, 14, 0, 0),
};
-struct samsung_clock_alias s3c2450_aliases[] __initdata = {
+static struct samsung_clock_alias s3c2450_aliases[] __initdata = {
ALIAS(PCLK_SPI1, "s3c2443-spi.1", "spi"),
ALIAS(PCLK_SPI1, "s3c2443-spi.1", "spi_busclk0"),
ALIAS(MUX_HSSPI1, "s3c2443-spi.1", "spi_busclk2"),
@@ -374,7 +365,7 @@ static struct notifier_block s3c2443_restart_handler = {
* fixed rate clocks generated outside the soc
* Only necessary until the devicetree-move is complete
*/
-struct samsung_fixed_rate_clock s3c2443_common_frate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock s3c2443_common_frate_clks[] __initdata = {
FRATE(0, "xti", NULL, 0, 0),
FRATE(0, "ext", NULL, 0, 0),
FRATE(0, "ext_i2s", NULL, 0, 0),
@@ -470,18 +461,18 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
static void __init s3c2416_clk_init(struct device_node *np)
{
- s3c2443_common_clk_init(np, 0, S3C2416, 0);
+ s3c2443_common_clk_init(np, 0, S3C2416, NULL);
}
CLK_OF_DECLARE(s3c2416_clk, "samsung,s3c2416-clock", s3c2416_clk_init);
static void __init s3c2443_clk_init(struct device_node *np)
{
- s3c2443_common_clk_init(np, 0, S3C2443, 0);
+ s3c2443_common_clk_init(np, 0, S3C2443, NULL);
}
CLK_OF_DECLARE(s3c2443_clk, "samsung,s3c2443-clock", s3c2443_clk_init);
static void __init s3c2450_clk_init(struct device_node *np)
{
- s3c2443_common_clk_init(np, 0, S3C2450, 0);
+ s3c2443_common_clk_init(np, 0, S3C2450, NULL);
}
CLK_OF_DECLARE(s3c2450_clk, "samsung,s3c2450-clock", s3c2450_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index 7306867a0ab8..6db01cf5ab83 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -56,11 +56,6 @@
#define GATE_ON(_id, cname, pname, o, b) \
GATE(_id, cname, pname, o, b, CLK_IGNORE_UNUSED, 0)
-/* list of PLLs to be registered */
-enum s3c64xx_plls {
- apll, mpll, epll,
-};
-
static void __iomem *reg_base;
static bool is_s3c6400;
@@ -364,12 +359,12 @@ GATE_CLOCKS(s3c6410_gate_clks) __initdata = {
/* List of PLL clocks. */
static struct samsung_pll_clock s3c64xx_pll_clks[] __initdata = {
- [apll] = PLL(pll_6552, FOUT_APLL, "fout_apll", "fin_pll",
- APLL_LOCK, APLL_CON, NULL),
- [mpll] = PLL(pll_6552, FOUT_MPLL, "fout_mpll", "fin_pll",
- MPLL_LOCK, MPLL_CON, NULL),
- [epll] = PLL(pll_6553, FOUT_EPLL, "fout_epll", "fin_pll",
- EPLL_LOCK, EPLL_CON0, NULL),
+ PLL(pll_6552, FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON, NULL),
+ PLL(pll_6552, FOUT_MPLL, "fout_mpll", "fin_pll",
+ MPLL_LOCK, MPLL_CON, NULL),
+ PLL(pll_6553, FOUT_EPLL, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, NULL),
};
/* Aliases for common s3c64xx clocks. */
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
index 9146c20fe21f..ce5aa7802eb8 100644
--- a/drivers/clk/socfpga/Makefile
+++ b/drivers/clk/socfpga/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += clk.o
-obj-y += clk-gate.o
-obj-y += clk-pll.o
-obj-y += clk-periph.o
-obj-y += clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o
+obj-$(CONFIG_ARCH_SOCFPGA) += clk.o clk-gate.o clk-pll.o clk-periph.o
+obj-$(CONFIG_ARCH_SOCFPGA) += clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o
+obj-$(CONFIG_ARCH_STRATIX10) += clk-s10.o
+obj-$(CONFIG_ARCH_STRATIX10) += clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o
diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
new file mode 100644
index 000000000000..eee2d48ab656
--- /dev/null
+++ b/drivers/clk/socfpga/clk-gate-s10.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017, Intel Corporation
+ */
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include "stratix10-clk.h"
+#include "clk.h"
+
+#define SOCFPGA_CS_PDBG_CLK "cs_pdbg_clk"
+#define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw)
+
+static unsigned long socfpga_gate_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ u32 div = 1, val;
+
+ if (socfpgaclk->fixed_div) {
+ div = socfpgaclk->fixed_div;
+ } else if (socfpgaclk->div_reg) {
+ val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
+ val &= GENMASK(socfpgaclk->width - 1, 0);
+ div = (1 << val);
+ }
+ return parent_rate / div;
+}
+
+static unsigned long socfpga_dbg_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ u32 div = 1, val;
+
+ val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
+ val &= GENMASK(socfpgaclk->width - 1, 0);
+ div = (1 << val);
+ div = div ? 4 : 1;
+
+ return parent_rate / div;
+}
+
+static u8 socfpga_gate_get_parent(struct clk_hw *hwclk)
+{
+ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ u32 mask;
+ u8 parent = 0;
+
+ if (socfpgaclk->bypass_reg) {
+ mask = (0x1 << socfpgaclk->bypass_shift);
+ parent = ((readl(socfpgaclk->bypass_reg) & mask) >>
+ socfpgaclk->bypass_shift);
+ }
+ return parent;
+}
+
+static struct clk_ops gateclk_ops = {
+ .recalc_rate = socfpga_gate_clk_recalc_rate,
+ .get_parent = socfpga_gate_get_parent,
+};
+
+static const struct clk_ops dbgclk_ops = {
+ .recalc_rate = socfpga_dbg_clk_recalc_rate,
+ .get_parent = socfpga_gate_get_parent,
+};
+
+struct clk *s10_register_gate(const char *name, const char *parent_name,
+ const char * const *parent_names,
+ u8 num_parents, unsigned long flags,
+ void __iomem *regbase, unsigned long gate_reg,
+ unsigned long gate_idx, unsigned long div_reg,
+ unsigned long div_offset, u8 div_width,
+ unsigned long bypass_reg, u8 bypass_shift,
+ u8 fixed_div)
+{
+ struct clk *clk;
+ struct socfpga_gate_clk *socfpga_clk;
+ struct clk_init_data init;
+
+ socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
+ if (!socfpga_clk)
+ return NULL;
+
+ socfpga_clk->hw.reg = regbase + gate_reg;
+ socfpga_clk->hw.bit_idx = gate_idx;
+
+ gateclk_ops.enable = clk_gate_ops.enable;
+ gateclk_ops.disable = clk_gate_ops.disable;
+
+ socfpga_clk->fixed_div = fixed_div;
+
+ if (div_reg)
+ socfpga_clk->div_reg = regbase + div_reg;
+ else
+ socfpga_clk->div_reg = NULL;
+
+ socfpga_clk->width = div_width;
+ socfpga_clk->shift = div_offset;
+
+ if (bypass_reg)
+ socfpga_clk->bypass_reg = regbase + bypass_reg;
+ else
+ socfpga_clk->bypass_reg = NULL;
+ socfpga_clk->bypass_shift = bypass_shift;
+
+ if (streq(name, "cs_pdbg_clk"))
+ init.ops = &dbgclk_ops;
+ else
+ init.ops = &gateclk_ops;
+
+ init.name = name;
+ init.flags = flags;
+
+ init.num_parents = num_parents;
+ init.parent_names = parent_names ? parent_names : &parent_name;
+ socfpga_clk->hw.hw.init = &init;
+
+ clk = clk_register(NULL, &socfpga_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(socfpga_clk);
+ return NULL;
+ }
+
+ return clk;
+}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
new file mode 100644
index 000000000000..568f59b58ddf
--- /dev/null
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017, Intel Corporation
+ */
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+
+#include "stratix10-clk.h"
+#include "clk.h"
+
+#define CLK_MGR_FREE_SHIFT 16
+#define CLK_MGR_FREE_MASK 0x7
+#define SWCTRLBTCLKSEN_SHIFT 8
+
+#define to_periph_clk(p) container_of(p, struct socfpga_periph_clk, hw.hw)
+
+static unsigned long clk_peri_c_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_periph_clk *socfpgaclk = to_periph_clk(hwclk);
+ unsigned long div = 1;
+ u32 val;
+
+ val = readl(socfpgaclk->hw.reg);
+ val &= GENMASK(SWCTRLBTCLKSEN_SHIFT - 1, 0);
+ parent_rate /= val;
+
+ return parent_rate / div;
+}
+
+static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_periph_clk *socfpgaclk = to_periph_clk(hwclk);
+ unsigned long div = 1;
+
+ if (socfpgaclk->fixed_div) {
+ div = socfpgaclk->fixed_div;
+ } else {
+ if (!socfpgaclk->bypass_reg)
+ div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
+ }
+
+ return parent_rate / div;
+}
+
+static u8 clk_periclk_get_parent(struct clk_hw *hwclk)
+{
+ struct socfpga_periph_clk *socfpgaclk = to_periph_clk(hwclk);
+ u32 clk_src, mask;
+ u8 parent;
+
+ if (socfpgaclk->bypass_reg) {
+ mask = (0x1 << socfpgaclk->bypass_shift);
+ parent = ((readl(socfpgaclk->bypass_reg) & mask) >>
+ socfpgaclk->bypass_shift);
+ } else {
+ clk_src = readl(socfpgaclk->hw.reg);
+ parent = (clk_src >> CLK_MGR_FREE_SHIFT) &
+ CLK_MGR_FREE_MASK;
+ }
+ return parent;
+}
+
+static const struct clk_ops peri_c_clk_ops = {
+ .recalc_rate = clk_peri_c_clk_recalc_rate,
+ .get_parent = clk_periclk_get_parent,
+};
+
+static const struct clk_ops peri_cnt_clk_ops = {
+ .recalc_rate = clk_peri_cnt_clk_recalc_rate,
+ .get_parent = clk_periclk_get_parent,
+};
+
+struct clk *s10_register_periph(const char *name, const char *parent_name,
+ const char * const *parent_names,
+ u8 num_parents, unsigned long flags,
+ void __iomem *reg, unsigned long offset)
+{
+ struct clk *clk;
+ struct socfpga_periph_clk *periph_clk;
+ struct clk_init_data init;
+
+ periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
+ if (WARN_ON(!periph_clk))
+ return NULL;
+
+ periph_clk->hw.reg = reg + offset;
+
+ init.name = name;
+ init.ops = &peri_c_clk_ops;
+ init.flags = flags;
+
+ init.num_parents = num_parents;
+ init.parent_names = parent_names ? parent_names : &parent_name;
+
+ periph_clk->hw.hw.init = &init;
+
+ clk = clk_register(NULL, &periph_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(periph_clk);
+ return NULL;
+ }
+ return clk;
+}
+
+struct clk *s10_register_cnt_periph(const char *name, const char *parent_name,
+ const char * const *parent_names,
+ u8 num_parents, unsigned long flags,
+ void __iomem *regbase, unsigned long offset,
+ u8 fixed_divider, unsigned long bypass_reg,
+ unsigned long bypass_shift)
+{
+ struct clk *clk;
+ struct socfpga_periph_clk *periph_clk;
+ struct clk_init_data init;
+
+ periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
+ if (WARN_ON(!periph_clk))
+ return NULL;
+
+ if (offset)
+ periph_clk->hw.reg = regbase + offset;
+ else
+ periph_clk->hw.reg = NULL;
+
+ if (bypass_reg)
+ periph_clk->bypass_reg = regbase + bypass_reg;
+ else
+ periph_clk->bypass_reg = NULL;
+ periph_clk->bypass_shift = bypass_shift;
+ periph_clk->fixed_div = fixed_divider;
+
+ init.name = name;
+ init.ops = &peri_cnt_clk_ops;
+ init.flags = flags;
+
+ init.num_parents = num_parents;
+ init.parent_names = parent_names ? parent_names : &parent_name;
+
+ periph_clk->hw.hw.init = &init;
+
+ clk = clk_register(NULL, &periph_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(periph_clk);
+ return NULL;
+ }
+ return clk;
+}
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
new file mode 100644
index 000000000000..2d5d8b43727e
--- /dev/null
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017, Intel Corporation
+ */
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+
+#include "stratix10-clk.h"
+#include "clk.h"
+
+/* Clock Manager offsets */
+#define CLK_MGR_PLL_CLK_SRC_SHIFT 16
+#define CLK_MGR_PLL_CLK_SRC_MASK 0x3
+
+/* PLL Clock enable bits */
+#define SOCFPGA_PLL_POWER 0
+#define SOCFPGA_PLL_RESET_MASK 0x2
+#define SOCFPGA_PLL_REFDIV_MASK 0x00003F00
+#define SOCFPGA_PLL_REFDIV_SHIFT 8
+#define SOCFPGA_PLL_MDIV_MASK 0xFF000000
+#define SOCFPGA_PLL_MDIV_SHIFT 24
+#define SWCTRLBTCLKSEL_MASK 0x200
+#define SWCTRLBTCLKSEL_SHIFT 9
+
+#define SOCFPGA_BOOT_CLK "boot_clk"
+
+#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ unsigned long mdiv;
+ unsigned long refdiv;
+ unsigned long reg;
+ unsigned long long vco_freq;
+
+ /* read VCO1 reg for numerator and denominator */
+ reg = readl(socfpgaclk->hw.reg);
+ refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT;
+ vco_freq = (unsigned long long)parent_rate / refdiv;
+
+ /* Read mdiv and fdiv from the fdbck register */
+ reg = readl(socfpgaclk->hw.reg + 0x4);
+ mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
+ vco_freq = (unsigned long long)parent_rate * (mdiv + 6);
+
+ return (unsigned long)vco_freq;
+}
+
+static unsigned long clk_boot_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ u32 div = 1;
+
+ div = ((readl(socfpgaclk->hw.reg) &
+ SWCTRLBTCLKSEL_MASK) >>
+ SWCTRLBTCLKSEL_SHIFT);
+ div += 1;
+ return parent_rate /= div;
+}
+
+
+static u8 clk_pll_get_parent(struct clk_hw *hwclk)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ u32 pll_src;
+
+ pll_src = readl(socfpgaclk->hw.reg);
+ return (pll_src >> CLK_MGR_PLL_CLK_SRC_SHIFT) &
+ CLK_MGR_PLL_CLK_SRC_MASK;
+}
+
+static u8 clk_boot_get_parent(struct clk_hw *hwclk)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ u32 pll_src;
+
+ pll_src = readl(socfpgaclk->hw.reg);
+ return (pll_src >> SWCTRLBTCLKSEL_SHIFT) &
+ SWCTRLBTCLKSEL_MASK;
+}
+
+static int clk_pll_prepare(struct clk_hw *hwclk)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ u32 reg;
+
+ /* Bring PLL out of reset */
+ reg = readl(socfpgaclk->hw.reg);
+ reg |= SOCFPGA_PLL_RESET_MASK;
+ writel(reg, socfpgaclk->hw.reg);
+
+ return 0;
+}
+
+static struct clk_ops clk_pll_ops = {
+ .recalc_rate = clk_pll_recalc_rate,
+ .get_parent = clk_pll_get_parent,
+ .prepare = clk_pll_prepare,
+};
+
+static struct clk_ops clk_boot_ops = {
+ .recalc_rate = clk_boot_clk_recalc_rate,
+ .get_parent = clk_boot_get_parent,
+ .prepare = clk_pll_prepare,
+};
+
+struct clk *s10_register_pll(const char *name, const char * const *parent_names,
+ u8 num_parents, unsigned long flags,
+ void __iomem *reg, unsigned long offset)
+{
+ struct clk *clk;
+ struct socfpga_pll *pll_clk;
+ struct clk_init_data init;
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (WARN_ON(!pll_clk))
+ return NULL;
+
+ pll_clk->hw.reg = reg + offset;
+
+ if (streq(name, SOCFPGA_BOOT_CLK))
+ init.ops = &clk_boot_ops;
+ else
+ init.ops = &clk_pll_ops;
+
+ init.name = name;
+ init.flags = flags;
+
+ init.num_parents = num_parents;
+ init.parent_names = parent_names;
+ pll_clk->hw.hw.init = &init;
+
+ pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER;
+ clk_pll_ops.enable = clk_gate_ops.enable;
+ clk_pll_ops.disable = clk_gate_ops.disable;
+
+ clk = clk_register(NULL, &pll_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(pll_clk);
+ return NULL;
+ }
+ return clk;
+}
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
new file mode 100644
index 000000000000..3a11c382a663
--- /dev/null
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017, Intel Corporation
+ */
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/stratix10-clock.h>
+
+#include "stratix10-clk.h"
+
+static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk",
+ "f2s_free_clk",};
+static const char * const cntr_mux[] = { "main_pll", "periph_pll",
+ "osc1", "cb_intosc_hs_div2_clk",
+ "f2s_free_clk"};
+static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",};
+
+static const char * const noc_free_mux[] = {"main_noc_base_clk",
+ "peri_noc_base_clk",
+ "osc1", "cb_intosc_hs_div2_clk",
+ "f2s_free_clk"};
+
+static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
+static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
+static const char * const emac_ptp_free_mux[] = {"peri_emac_ptp_clk", "boot_clk"};
+static const char * const gpio_db_free_mux[] = {"peri_gpio_db_clk", "boot_clk"};
+static const char * const sdmmc_free_mux[] = {"peri_sdmmc_clk", "boot_clk"};
+static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"};
+static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
+static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
+
+static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
+static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
+static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
+
+/* clocks in AO (always on) controller */
+static const struct stratix10_pll_clock s10_pll_clks[] = {
+ { STRATIX10_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
+ 0x0},
+ { STRATIX10_MAIN_PLL_CLK, "main_pll", pll_mux, ARRAY_SIZE(pll_mux),
+ 0, 0x74},
+ { STRATIX10_PERIPH_PLL_CLK, "periph_pll", pll_mux, ARRAY_SIZE(pll_mux),
+ 0, 0xe4},
+};
+
+static const struct stratix10_perip_c_clock s10_main_perip_c_clks[] = {
+ { STRATIX10_MAIN_MPU_BASE_CLK, "main_mpu_base_clk", "main_pll", NULL, 1, 0, 0x84},
+ { STRATIX10_MAIN_NOC_BASE_CLK, "main_noc_base_clk", "main_pll", NULL, 1, 0, 0x88},
+ { STRATIX10_PERI_MPU_BASE_CLK, "peri_mpu_base_clk", "periph_pll", NULL, 1, 0,
+ 0xF4},
+ { STRATIX10_PERI_NOC_BASE_CLK, "peri_noc_base_clk", "periph_pll", NULL, 1, 0,
+ 0xF8},
+};
+
+static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
+ { STRATIX10_MPU_FREE_CLK, "mpu_free_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0x48, 0, 0, 0},
+ { STRATIX10_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
+ 0, 0x4C, 0, 0, 0},
+ { STRATIX10_MAIN_EMACA_CLK, "main_emaca_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x50, 0, 0, 0},
+ { STRATIX10_MAIN_EMACB_CLK, "main_emacb_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x54, 0, 0, 0},
+ { STRATIX10_MAIN_EMAC_PTP_CLK, "main_emac_ptp_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x58, 0, 0, 0},
+ { STRATIX10_MAIN_GPIO_DB_CLK, "main_gpio_db_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x5C, 0, 0, 0},
+ { STRATIX10_MAIN_SDMMC_CLK, "main_sdmmc_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x60, 0, 0, 0},
+ { STRATIX10_MAIN_S2F_USR0_CLK, "main_s2f_usr0_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0x64, 0, 0, 0},
+ { STRATIX10_MAIN_S2F_USR1_CLK, "main_s2f_usr1_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x68, 0, 0, 0},
+ { STRATIX10_MAIN_PSI_REF_CLK, "main_psi_ref_clk", "main_noc_base_clk", NULL, 1, 0,
+ 0x6C, 0, 0, 0},
+ { STRATIX10_PERI_EMACA_CLK, "peri_emaca_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0xBC, 0, 0, 0},
+ { STRATIX10_PERI_EMACB_CLK, "peri_emacb_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0xC0, 0, 0, 0},
+ { STRATIX10_PERI_EMAC_PTP_CLK, "peri_emac_ptp_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0xC4, 0, 0, 0},
+ { STRATIX10_PERI_GPIO_DB_CLK, "peri_gpio_db_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0xC8, 0, 0, 0},
+ { STRATIX10_PERI_SDMMC_CLK, "peri_sdmmc_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0xCC, 0, 0, 0},
+ { STRATIX10_PERI_S2F_USR0_CLK, "peri_s2f_usr0_clk", "peri_noc_base_clk", NULL, 1, 0,
+ 0xD0, 0, 0, 0},
+ { STRATIX10_PERI_S2F_USR1_CLK, "peri_s2f_usr1_clk", NULL, cntr_mux, ARRAY_SIZE(cntr_mux),
+ 0, 0xD4, 0, 0, 0},
+ { STRATIX10_PERI_PSI_REF_CLK, "peri_psi_ref_clk", "peri_noc_base_clk", NULL, 1, 0,
+ 0xD8, 0, 0, 0},
+ { STRATIX10_L4_SYS_FREE_CLK, "l4_sys_free_clk", "noc_free_clk", NULL, 1, 0,
+ 0, 4, 0, 0},
+ { STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
+ 0, 0, 0, 0x3C, 1},
+ { STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
+ 0, 0, 4, 0xB0, 0},
+ { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
+ 0, 0, 4, 0xB0, 1},
+ { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
+ ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2},
+ { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
+ ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3},
+ { STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
+ ARRAY_SIZE(sdmmc_free_mux), 0, 0, 0, 0xB0, 4},
+ { STRATIX10_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, s2f_usr1_free_mux,
+ ARRAY_SIZE(s2f_usr1_free_mux), 0, 0, 0, 0xB0, 5},
+ { STRATIX10_PSI_REF_FREE_CLK, "psi_ref_free_clk", NULL, psi_ref_free_mux,
+ ARRAY_SIZE(psi_ref_free_mux), 0, 0, 0, 0xB0, 6},
+};
+
+static const struct stratix10_gate_clock s10_gate_clks[] = {
+ { STRATIX10_MPU_CLK, "mpu_clk", NULL, mpu_mux, ARRAY_SIZE(mpu_mux), 0, 0x30,
+ 0, 0, 0, 0, 0x3C, 0, 0},
+ { STRATIX10_MPU_PERIPH_CLK, "mpu_periph_clk", "mpu_clk", NULL, 1, 0, 0x30,
+ 0, 0, 0, 0, 0, 0, 4},
+ { STRATIX10_MPU_L2RAM_CLK, "mpu_l2ram_clk", "mpu_clk", NULL, 1, 0, 0x30,
+ 0, 0, 0, 0, 0, 0, 2},
+ { STRATIX10_L4_MAIN_CLK, "l4_main_clk", "noc_clk", NULL, 1, 0, 0x30,
+ 1, 0x70, 0, 2, 0, 0, 0},
+ { STRATIX10_L4_MP_CLK, "l4_mp_clk", "noc_clk", NULL, 1, 0, 0x30,
+ 2, 0x70, 8, 2, 0, 0, 0},
+ { STRATIX10_L4_SP_CLK, "l4_sp_clk", "noc_clk", NULL, 1, CLK_IS_CRITICAL, 0x30,
+ 3, 0x70, 16, 2, 0, 0, 0},
+ { STRATIX10_CS_AT_CLK, "cs_at_clk", "noc_clk", NULL, 1, 0, 0x30,
+ 4, 0x70, 24, 2, 0, 0, 0},
+ { STRATIX10_CS_TRACE_CLK, "cs_trace_clk", "noc_clk", NULL, 1, 0, 0x30,
+ 4, 0x70, 26, 2, 0, 0, 0},
+ { STRATIX10_CS_PDBG_CLK, "cs_pdbg_clk", "cs_at_clk", NULL, 1, 0, 0x30,
+ 4, 0x70, 28, 1, 0, 0, 0},
+ { STRATIX10_CS_TIMER_CLK, "cs_timer_clk", "noc_clk", NULL, 1, 0, 0x30,
+ 5, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x30,
+ 6, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0xA4,
+ 0, 0, 0, 0, 0xDC, 26, 0},
+ { STRATIX10_EMAC1_CLK, "emac1_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0xA4,
+ 1, 0, 0, 0, 0xDC, 27, 0},
+ { STRATIX10_EMAC2_CLK, "emac2_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0xA4,
+ 2, 0, 0, 0, 0xDC, 28, 0},
+ { STRATIX10_EMAC_PTP_CLK, "emac_ptp_clk", "emac_ptp_free_clk", NULL, 1, 0, 0xA4,
+ 3, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_GPIO_DB_CLK, "gpio_db_clk", "gpio_db_free_clk", NULL, 1, 0, 0xA4,
+ 4, 0xE0, 0, 16, 0, 0, 0},
+ { STRATIX10_SDMMC_CLK, "sdmmc_clk", "sdmmc_free_clk", NULL, 1, 0, 0xA4,
+ 5, 0, 0, 0, 0, 0, 4},
+ { STRATIX10_S2F_USER1_CLK, "s2f_user1_clk", "s2f_user1_free_clk", NULL, 1, 0, 0xA4,
+ 6, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_PSI_REF_CLK, "psi_ref_clk", "psi_ref_free_clk", NULL, 1, 0, 0xA4,
+ 7, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_USB_CLK, "usb_clk", "l4_mp_clk", NULL, 1, 0, 0xA4,
+ 8, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0xA4,
+ 9, 0, 0, 0, 0, 0, 0},
+ { STRATIX10_NAND_CLK, "nand_clk", "l4_main_clk", NULL, 1, 0, 0xA4,
+ 10, 0, 0, 0, 0, 0, 0},
+};
+
+static int s10_clk_register_c_perip(const struct stratix10_perip_c_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_periph(clks[i].name, clks[i].parent_name,
+ clks[i].parent_names, clks[i].num_parents,
+ clks[i].flags, base, clks[i].offset);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+ return 0;
+}
+
+static int s10_clk_register_cnt_perip(const struct stratix10_perip_cnt_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_cnt_periph(clks[i].name, clks[i].parent_name,
+ clks[i].parent_names,
+ clks[i].num_parents,
+ clks[i].flags, base,
+ clks[i].offset,
+ clks[i].fixed_divider,
+ clks[i].bypass_reg,
+ clks[i].bypass_shift);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
+static int s10_clk_register_gate(const struct stratix10_gate_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_gate(clks[i].name, clks[i].parent_name,
+ clks[i].parent_names,
+ clks[i].num_parents,
+ clks[i].flags, base,
+ clks[i].gate_reg,
+ clks[i].gate_idx, clks[i].div_reg,
+ clks[i].div_offset, clks[i].div_width,
+ clks[i].bypass_reg,
+ clks[i].bypass_shift,
+ clks[i].fixed_div);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
+static int s10_clk_register_pll(const struct stratix10_pll_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_pll(clks[i].name, clks[i].parent_names,
+ clks[i].num_parents,
+ clks[i].flags, base,
+ clks[i].offset);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
+static struct stratix10_clock_data *__socfpga_s10_clk_init(struct device_node *np,
+ int nr_clks)
+{
+ struct stratix10_clock_data *clk_data;
+ struct clk **clk_table;
+ void __iomem *base;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%s: failed to map clock registers\n", __func__);
+ goto err;
+ }
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ goto err;
+
+ clk_data->base = base;
+ clk_table = kcalloc(nr_clks, sizeof(*clk_table), GFP_KERNEL);
+ if (!clk_table)
+ goto err_data;
+
+ clk_data->clk_data.clks = clk_table;
+ clk_data->clk_data.clk_num = nr_clks;
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data->clk_data);
+ return clk_data;
+
+err_data:
+ kfree(clk_data);
+err:
+ return NULL;
+}
+
+static int s10_clkmgr_init(struct device_node *np)
+{
+ struct stratix10_clock_data *clk_data;
+
+ clk_data = __socfpga_s10_clk_init(np, STRATIX10_NUM_CLKS);
+ if (!clk_data)
+ return -ENOMEM;
+
+ s10_clk_register_pll(s10_pll_clks, ARRAY_SIZE(s10_pll_clks), clk_data);
+
+ s10_clk_register_c_perip(s10_main_perip_c_clks,
+ ARRAY_SIZE(s10_main_perip_c_clks), clk_data);
+
+ s10_clk_register_cnt_perip(s10_main_perip_cnt_clks,
+ ARRAY_SIZE(s10_main_perip_cnt_clks),
+ clk_data);
+
+ s10_clk_register_gate(s10_gate_clks, ARRAY_SIZE(s10_gate_clks),
+ clk_data);
+ return 0;
+}
+
+static int s10_clkmgr_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ s10_clkmgr_init(np);
+
+ return 0;
+}
+
+static const struct of_device_id stratix10_clkmgr_match_table[] = {
+ { .compatible = "intel,stratix10-clkmgr",
+ .data = s10_clkmgr_init },
+ { }
+};
+
+static struct platform_driver stratix10_clkmgr_driver = {
+ .probe = s10_clkmgr_probe,
+ .driver = {
+ .name = "stratix10-clkmgr",
+ .of_match_table = stratix10_clkmgr_match_table,
+ },
+};
+
+static int __init s10_clk_init(void)
+{
+ return platform_driver_register(&stratix10_clkmgr_driver);
+}
+core_initcall(s10_clk_init);
diff --git a/drivers/clk/socfpga/clk.h b/drivers/clk/socfpga/clk.h
index 9cf1230115b1..26c3a265cf78 100644
--- a/drivers/clk/socfpga/clk.h
+++ b/drivers/clk/socfpga/clk.h
@@ -54,9 +54,11 @@ struct socfpga_gate_clk {
char *parent_name;
u32 fixed_div;
void __iomem *div_reg;
+ void __iomem *bypass_reg;
struct regmap *sys_mgr_base_addr;
u32 width; /* only valid if div_reg != 0 */
u32 shift; /* only valid if div_reg != 0 */
+ u32 bypass_shift; /* only valid if bypass_reg != 0 */
u32 clk_phase[2];
};
@@ -65,8 +67,10 @@ struct socfpga_periph_clk {
char *parent_name;
u32 fixed_div;
void __iomem *div_reg;
+ void __iomem *bypass_reg;
u32 width; /* only valid if div_reg != 0 */
u32 shift; /* only valid if div_reg != 0 */
+ u32 bypass_shift; /* only valid if bypass_reg != 0 */
};
#endif /* SOCFPGA_CLK_H */
diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
new file mode 100644
index 000000000000..e8e121907952
--- /dev/null
+++ b/drivers/clk/socfpga/stratix10-clk.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017, Intel Corporation
+ */
+
+#ifndef __STRATIX10_CLK_H
+#define __STRATIX10_CLK_H
+
+struct stratix10_clock_data {
+ struct clk_onecell_data clk_data;
+ void __iomem *base;
+};
+
+struct stratix10_pll_clock {
+ unsigned int id;
+ const char *name;
+ const char *const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+};
+
+struct stratix10_perip_c_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ const char *const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+};
+
+struct stratix10_perip_cnt_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ const char *const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+ u8 fixed_divider;
+ unsigned long bypass_reg;
+ unsigned long bypass_shift;
+};
+
+struct stratix10_gate_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ const char *const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long gate_reg;
+ u8 gate_idx;
+ unsigned long div_reg;
+ u8 div_offset;
+ u8 div_width;
+ unsigned long bypass_reg;
+ u8 bypass_shift;
+ u8 fixed_div;
+};
+
+struct clk *s10_register_pll(const char *, const char *const *, u8,
+ unsigned long, void __iomem *, unsigned long);
+
+struct clk *s10_register_periph(const char *, const char *,
+ const char * const *, u8, unsigned long,
+ void __iomem *, unsigned long);
+struct clk *s10_register_cnt_periph(const char *, const char *,
+ const char * const *, u8,
+ unsigned long, void __iomem *,
+ unsigned long, u8, unsigned long,
+ unsigned long);
+struct clk *s10_register_gate(const char *, const char *,
+ const char * const *, u8,
+ unsigned long, void __iomem *,
+ unsigned long, unsigned long,
+ unsigned long, unsigned long, u8,
+ unsigned long, u8, u8);
+#endif /* __STRATIX10_CLK_H */
diff --git a/drivers/clk/sprd/sc9860-clk.c b/drivers/clk/sprd/sc9860-clk.c
index ed5c027df0f4..9980ab55271b 100644
--- a/drivers/clk/sprd/sc9860-clk.c
+++ b/drivers/clk/sprd/sc9860-clk.c
@@ -959,6 +959,44 @@ static SPRD_SC_GATE_CLK(sdio2_2x_en, "sdio2-2x-en", "aon-apb", 0x13c,
0x1000, BIT(6), 0, 0);
static SPRD_SC_GATE_CLK(emmc_2x_en, "emmc-2x-en", "aon-apb", 0x13c,
0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK(arch_rtc_eb, "arch-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(kpb_rtc_eb, "kpb-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(aon_syst_rtc_eb, "aon-syst-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(ap_syst_rtc_eb, "ap-syst-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(3), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(aon_tmr_rtc_eb, "aon-tmr-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(ap_tmr0_rtc_eb, "ap-tmr0-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(5), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(eic_rtc_eb, "eic-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(6), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(eic_rtcdv5_eb, "eic-rtcdv5-eb", "aon-apb", 0x10,
+ 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(ap_wdg_rtc_eb, "ap-wdg-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(ap_tmr1_rtc_eb, "ap-tmr1-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(ap_tmr2_rtc_eb, "ap-tmr2-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(dcxo_tmr_rtc_eb, "dcxo-tmr-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(bb_cal_rtc_eb, "bb-cal-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(18), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(avs_big_rtc_eb, "avs-big-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(20), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(avs_lit_rtc_eb, "avs-lit-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(21), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(avs_gpu0_rtc_eb, "avs-gpu0-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(22), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(avs_gpu1_rtc_eb, "avs-gpu1-rtc-eb", "aon-apb", 0x10,
+ 0x1000, BIT(23), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(gpu_ts_eb, "gpu-ts-eb", "aon-apb", 0x10,
+ 0x1000, BIT(24), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK(rtcdv10_eb, "rtcdv10-eb", "aon-apb", 0x10,
+ 0x1000, BIT(27), CLK_IGNORE_UNUSED, 0);
static struct sprd_clk_common *sc9860_aon_gate[] = {
/* address base is 0x402e0000 */
@@ -1030,6 +1068,25 @@ static struct sprd_clk_common *sc9860_aon_gate[] = {
&sdio1_2x_en.common,
&sdio2_2x_en.common,
&emmc_2x_en.common,
+ &arch_rtc_eb.common,
+ &kpb_rtc_eb.common,
+ &aon_syst_rtc_eb.common,
+ &ap_syst_rtc_eb.common,
+ &aon_tmr_rtc_eb.common,
+ &ap_tmr0_rtc_eb.common,
+ &eic_rtc_eb.common,
+ &eic_rtcdv5_eb.common,
+ &ap_wdg_rtc_eb.common,
+ &ap_tmr1_rtc_eb.common,
+ &ap_tmr2_rtc_eb.common,
+ &dcxo_tmr_rtc_eb.common,
+ &bb_cal_rtc_eb.common,
+ &avs_big_rtc_eb.common,
+ &avs_lit_rtc_eb.common,
+ &avs_gpu0_rtc_eb.common,
+ &avs_gpu1_rtc_eb.common,
+ &gpu_ts_eb.common,
+ &rtcdv10_eb.common,
};
static struct clk_hw_onecell_data sc9860_aon_gate_hws = {
@@ -1102,6 +1159,25 @@ static struct clk_hw_onecell_data sc9860_aon_gate_hws = {
[CLK_SDIO1_2X_EN] = &sdio1_2x_en.common.hw,
[CLK_SDIO2_2X_EN] = &sdio2_2x_en.common.hw,
[CLK_EMMC_2X_EN] = &emmc_2x_en.common.hw,
+ [CLK_ARCH_RTC_EB] = &arch_rtc_eb.common.hw,
+ [CLK_KPB_RTC_EB] = &kpb_rtc_eb.common.hw,
+ [CLK_AON_SYST_RTC_EB] = &aon_syst_rtc_eb.common.hw,
+ [CLK_AP_SYST_RTC_EB] = &ap_syst_rtc_eb.common.hw,
+ [CLK_AON_TMR_RTC_EB] = &aon_tmr_rtc_eb.common.hw,
+ [CLK_AP_TMR0_RTC_EB] = &ap_tmr0_rtc_eb.common.hw,
+ [CLK_EIC_RTC_EB] = &eic_rtc_eb.common.hw,
+ [CLK_EIC_RTCDV5_EB] = &eic_rtcdv5_eb.common.hw,
+ [CLK_AP_WDG_RTC_EB] = &ap_wdg_rtc_eb.common.hw,
+ [CLK_AP_TMR1_RTC_EB] = &ap_tmr1_rtc_eb.common.hw,
+ [CLK_AP_TMR2_RTC_EB] = &ap_tmr2_rtc_eb.common.hw,
+ [CLK_DCXO_TMR_RTC_EB] = &dcxo_tmr_rtc_eb.common.hw,
+ [CLK_BB_CAL_RTC_EB] = &bb_cal_rtc_eb.common.hw,
+ [CLK_AVS_BIG_RTC_EB] = &avs_big_rtc_eb.common.hw,
+ [CLK_AVS_LIT_RTC_EB] = &avs_lit_rtc_eb.common.hw,
+ [CLK_AVS_GPU0_RTC_EB] = &avs_gpu0_rtc_eb.common.hw,
+ [CLK_AVS_GPU1_RTC_EB] = &avs_gpu1_rtc_eb.common.hw,
+ [CLK_GPU_TS_EB] = &gpu_ts_eb.common.hw,
+ [CLK_RTCDV10_EB] = &rtcdv10_eb.common.hw,
},
.num = CLK_AON_GATE_NUM,
};
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 6427d0ebe2de..79dfd296c3d1 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -11,15 +11,13 @@ config SUN50I_A64_CCU
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+config SUN50I_H6_CCU
+ bool "Support for the Allwinner H6 CCU"
+ default ARM64 && ARCH_SUNXI
+ depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+
config SUN4I_A10_CCU
bool "Support for the Allwinner A10/A20 CCU"
- select SUNXI_CCU_DIV
- select SUNXI_CCU_MULT
- select SUNXI_CCU_NK
- select SUNXI_CCU_NKM
- select SUNXI_CCU_NM
- select SUNXI_CCU_MP
- select SUNXI_CCU_PHASE
default MACH_SUN4I
default MACH_SUN7I
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 4141c3fe08ae..128a40ee5c5e 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -22,6 +22,7 @@ lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o
# SoC support
obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
+obj-$(CONFIG_SUN50I_H6_CCU) += ccu-sun50i-h6.o
obj-$(CONFIG_SUN4I_A10_CCU) += ccu-sun4i-a10.o
obj-$(CONFIG_SUN5I_CCU) += ccu-sun5i.o
obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
new file mode 100644
index 000000000000..bdbfe78fe133
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+
+#include "ccu-sun50i-h6.h"
+
+/*
+ * The CPU PLL is actually NP clock, with P being /1, /2 or /4. However
+ * P should only be used for output frequencies lower than 288 MHz.
+ *
+ * For now we can just model it as a multiplier clock, and force P to /1.
+ *
+ * The M factor is present in the register's description, but not in the
+ * frequency formula, and it's documented as "M is only used for backdoor
+ * testing", so it's not modelled and then force to 0.
+ */
+#define SUN50I_H6_PLL_CPUX_REG 0x000
+static struct ccu_mult pll_cpux_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .mult = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT("pll-cpux", "osc24M",
+ &ccu_mult_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/* Some PLLs are input * N / div1 / P. Model them as NKMP with no K */
+#define SUN50I_H6_PLL_DDR0_REG 0x010
+static struct ccu_nkmp pll_ddr0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x010,
+ .hw.init = CLK_HW_INIT("pll-ddr0", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_PERIPH0_REG 0x020
+static struct ccu_nkmp pll_periph0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .fixed_post_div = 4,
+ .common = {
+ .reg = 0x020,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph0", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_PERIPH1_REG 0x028
+static struct ccu_nkmp pll_periph1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .fixed_post_div = 4,
+ .common = {
+ .reg = 0x028,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph1", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_GPU_REG 0x030
+static struct ccu_nkmp pll_gpu_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x030,
+ .hw.init = CLK_HW_INIT("pll-gpu", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * For Video PLLs, the output divider is described as "used for testing"
+ * in the user manual. So it's not modelled and forced to 0.
+ */
+#define SUN50I_H6_PLL_VIDEO0_REG 0x040
+static struct ccu_nm pll_video0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .common = {
+ .reg = 0x040,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video0", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_VIDEO1_REG 0x048
+static struct ccu_nm pll_video1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .common = {
+ .reg = 0x048,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video1", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_VE_REG 0x058
+static struct ccu_nkmp pll_ve_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x058,
+ .hw.init = CLK_HW_INIT("pll-ve", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_DE_REG 0x060
+static struct ccu_nkmp pll_de_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x060,
+ .hw.init = CLK_HW_INIT("pll-de", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_H6_PLL_HSIC_REG 0x070
+static struct ccu_nkmp pll_hsic_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x070,
+ .hw.init = CLK_HW_INIT("pll-hsic", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * The Audio PLL is supposed to have 3 outputs: 2 fixed factors from
+ * the base (2x and 4x), and one variable divider (the one true pll audio).
+ *
+ * We don't have any need for the variable divider for now, so we just
+ * hardcode it to match with the clock names.
+ */
+#define SUN50I_H6_PLL_AUDIO_REG 0x078
+static struct ccu_nm pll_audio_base_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x078,
+ .hw.init = CLK_HW_INIT("pll-audio-base", "osc24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const char * const cpux_parents[] = { "osc24M", "osc32k",
+ "iosc", "pll-cpux" };
+static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents,
+ 0x500, 24, 2, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x500, 0, 2, 0);
+static SUNXI_CCU_M(cpux_apb_clk, "cpux-apb", "cpux", 0x500, 8, 2, 0);
+
+static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k",
+ "iosc", "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
+ psi_ahb1_ahb2_parents,
+ 0x510,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
+ "psi-ahb1-ahb2",
+ "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-ddr0", "pll-periph0-4x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents, 0x540,
+ 0, 3, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL);
+
+static const char * const de_parents[] = { "pll-de", "pll-periph0-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, 0x600,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "psi-ahb1-ahb2",
+ 0x60c, BIT(0), 0);
+
+static const char * const deinterlace_parents[] = { "pll-periph0",
+ "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace",
+ deinterlace_parents,
+ 0x620,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "psi-ahb1-ahb2",
+ 0x62c, BIT(0), 0);
+
+static const char * const gpu_parents[] = { "pll-gpu" };
+static SUNXI_CCU_M_WITH_MUX_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
+ 0, 3, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "psi-ahb1-ahb2",
+ 0x67c, BIT(0), 0);
+
+/* Also applies to EMCE */
+static const char * const ce_parents[] = { "osc24M", "pll-periph0-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", ce_parents, 0x680,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 1, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "psi-ahb1-ahb2",
+ 0x68c, BIT(0), 0);
+
+static const char * const ve_parents[] = { "pll-ve" };
+static SUNXI_CCU_M_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
+ 0, 3, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "psi-ahb1-ahb2",
+ 0x69c, BIT(0), 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(emce_clk, "emce", ce_parents, 0x6b0,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 1, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_emce_clk, "bus-emce", "psi-ahb1-ahb2",
+ 0x6bc, BIT(0), 0);
+
+static const char * const vp9_parents[] = { "pll-ve", "pll-periph0-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(vp9_clk, "vp9", vp9_parents, 0x6c0,
+ 0, 3, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_vp9_clk, "bus-vp9", "psi-ahb1-ahb2",
+ 0x6cc, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "psi-ahb1-ahb2",
+ 0x70c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "psi-ahb1-ahb2",
+ 0x71c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "psi-ahb1-ahb2",
+ 0x72c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "psi-ahb1-ahb2",
+ 0x73c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", 0x740, BIT(31), 0);
+
+static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "psi-ahb1-ahb2",
+ 0x78c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_psi_clk, "bus-psi", "psi-ahb1-ahb2",
+ 0x79c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_pwm_clk, "bus-pwm", "apb1", 0x79c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_iommu_clk, "bus-iommu", "apb1", 0x7bc, BIT(0), 0);
+
+static const char * const dram_parents[] = { "pll-ddr0" };
+static struct ccu_div dram_clk = {
+ .div = _SUNXI_CCU_DIV(0, 2),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0x800,
+ .hw.init = CLK_HW_INIT_PARENTS("dram",
+ dram_parents,
+ &ccu_div_ops,
+ CLK_IS_CRITICAL),
+ },
+};
+
+static SUNXI_CCU_GATE(mbus_dma_clk, "mbus-dma", "mbus",
+ 0x804, BIT(0), 0);
+static SUNXI_CCU_GATE(mbus_ve_clk, "mbus-ve", "mbus",
+ 0x804, BIT(1), 0);
+static SUNXI_CCU_GATE(mbus_ce_clk, "mbus-ce", "mbus",
+ 0x804, BIT(2), 0);
+static SUNXI_CCU_GATE(mbus_ts_clk, "mbus-ts", "mbus",
+ 0x804, BIT(3), 0);
+static SUNXI_CCU_GATE(mbus_nand_clk, "mbus-nand", "mbus",
+ 0x804, BIT(5), 0);
+static SUNXI_CCU_GATE(mbus_csi_clk, "mbus-csi", "mbus",
+ 0x804, BIT(8), 0);
+static SUNXI_CCU_GATE(mbus_deinterlace_clk, "mbus-deinterlace", "mbus",
+ 0x804, BIT(11), 0);
+
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "psi-ahb1-ahb2",
+ 0x80c, BIT(0), CLK_IS_CRITICAL);
+
+static const char * const nand_spi_parents[] = { "osc24M", "pll-periph0",
+ "pll-periph1", "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand0_clk, "nand0", nand_spi_parents, 0x810,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand1_clk, "nand1", nand_spi_parents, 0x814,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb3", 0x82c, BIT(0), 0);
+
+static const char * const mmc_parents[] = { "osc24M", "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc_parents, 0x830,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc_parents, 0x834,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc_parents, 0x838,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb3", 0x84c, BIT(2), 0);
+
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2", 0x90c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2", 0x90c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2", 0x90c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2", 0x90c, BIT(3), 0);
+
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", 0x91c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2", 0x91c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2", 0x91c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_i2c3_clk, "bus-i2c3", "apb2", 0x91c, BIT(3), 0);
+
+static SUNXI_CCU_GATE(bus_scr0_clk, "bus-scr0", "apb2", 0x93c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_scr1_clk, "bus-scr1", "apb2", 0x93c, BIT(1), 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", nand_spi_parents, 0x940,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", nand_spi_parents, 0x944,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 3, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb3", 0x96c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb3", 0x96c, BIT(1), 0);
+
+static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb3", 0x97c, BIT(0), 0);
+
+static const char * const ts_parents[] = { "osc24M", "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", ts_parents, 0x9b0,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 1, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb3", 0x9bc, BIT(0), 0);
+
+static const char * const ir_tx_parents[] = { "osc32k", "osc24M" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ir_tx_clk, "ir-tx", ir_tx_parents, 0x9c0,
+ 0, 4, /* M */
+ 8, 2, /* N */
+ 24, 1, /* mux */
+ BIT(31),/* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ir_tx_clk, "bus-ir-tx", "apb1", 0x9cc, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1", 0x9fc, BIT(0), 0);
+
+static const char * const audio_parents[] = { "pll-audio", "pll-audio-2x", "pll-audio-4x" };
+static struct ccu_div i2s3_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa0c,
+ .hw.init = CLK_HW_INIT_PARENTS("i2s3",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_div i2s0_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa10,
+ .hw.init = CLK_HW_INIT_PARENTS("i2s0",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_div i2s1_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa14,
+ .hw.init = CLK_HW_INIT_PARENTS("i2s1",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_div i2s2_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa18,
+ .hw.init = CLK_HW_INIT_PARENTS("i2s2",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1", 0xa1c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1", 0xa1c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2s2_clk, "bus-i2s2", "apb1", 0xa1c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_i2s3_clk, "bus-i2s3", "apb1", 0xa1c, BIT(3), 0);
+
+static struct ccu_div spdif_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa20,
+ .hw.init = CLK_HW_INIT_PARENTS("spdif",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1", 0xa2c, BIT(0), 0);
+
+static struct ccu_div dmic_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa40,
+ .hw.init = CLK_HW_INIT_PARENTS("dmic",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_dmic_clk, "bus-dmic", "apb1", 0xa4c, BIT(0), 0);
+
+static struct ccu_div audio_hub_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa60,
+ .hw.init = CLK_HW_INIT_PARENTS("audio-hub",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_audio_hub_clk, "bus-audio-hub", "apb1", 0xa6c, BIT(0), 0);
+
+/*
+ * There are OHCI 12M clock source selection bits for 2 USB 2.0 ports.
+ * We will force them to 0 (12M divided from 48M).
+ */
+#define SUN50I_H6_USB0_CLK_REG 0xa70
+#define SUN50I_H6_USB3_CLK_REG 0xa7c
+
+static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc12M", 0xa70, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M", 0xa70, BIT(29), 0);
+
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M", 0xa74, BIT(29), 0);
+
+static SUNXI_CCU_GATE(usb_ohci3_clk, "usb-ohci3", "osc12M", 0xa7c, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy3_clk, "usb-phy3", "osc12M", 0xa7c, BIT(29), 0);
+static SUNXI_CCU_GATE(usb_hsic_12m_clk, "usb-hsic-12M", "osc12M", 0xa7c, BIT(27), 0);
+static SUNXI_CCU_GATE(usb_hsic_clk, "usb-hsic", "pll-hsic", 0xa7c, BIT(26), 0);
+
+static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb3", 0xa8c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_ohci3_clk, "bus-ohci3", "ahb3", 0xa8c, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb3", 0xa8c, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_xhci_clk, "bus-xhci", "ahb3", 0xa8c, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_ehci3_clk, "bus-ehci3", "ahb3", 0xa8c, BIT(7), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb3", 0xa8c, BIT(8), 0);
+
+static CLK_FIXED_FACTOR(pcie_ref_100m_clk, "pcie-ref-100M",
+ "pll-periph0-4x", 24, 1, 0);
+static SUNXI_CCU_GATE(pcie_ref_clk, "pcie-ref", "pcie-ref-100M",
+ 0xab0, BIT(31), 0);
+static SUNXI_CCU_GATE(pcie_ref_out_clk, "pcie-ref-out", "pcie-ref",
+ 0xab0, BIT(30), 0);
+
+static SUNXI_CCU_M_WITH_GATE(pcie_maxi_clk, "pcie-maxi",
+ "pll-periph0", 0xab4,
+ 0, 4, /* M */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_WITH_GATE(pcie_aux_clk, "pcie-aux", "osc24M", 0xab8,
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_pcie_clk, "bus-pcie", "psi-ahb1-ahb2",
+ 0xabc, BIT(0), 0);
+
+static const char * const hdmi_parents[] = { "pll-video0", "pll-video1",
+ "pll-video1-4x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents, 0xb00,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(hdmi_slow_clk, "hdmi-slow", "osc24M", 0xb04, BIT(31), 0);
+
+static const char * const hdmi_cec_parents[] = { "osc32k", "pll-periph0-2x" };
+static const struct ccu_mux_fixed_prediv hdmi_cec_predivs[] = {
+ { .index = 1, .div = 36621 },
+};
+static struct ccu_mux hdmi_cec_clk = {
+ .enable = BIT(31),
+
+ .mux = {
+ .shift = 24,
+ .width = 2,
+
+ .fixed_predivs = hdmi_cec_predivs,
+ .n_predivs = ARRAY_SIZE(hdmi_cec_predivs),
+ },
+
+ .common = {
+ .reg = 0xb10,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("hdmi-cec",
+ hdmi_cec_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_hdmi_clk, "bus-hdmi", "ahb3", 0xb1c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_tcon_top_clk, "bus-tcon-top", "ahb3",
+ 0xb5c, BIT(0), 0);
+
+static const char * const tcon_lcd0_parents[] = { "pll-video0",
+ "pll-video0-4x",
+ "pll-video1" };
+static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd0_clk, "tcon-lcd0",
+ tcon_lcd0_parents, 0xb60,
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_tcon_lcd0_clk, "bus-tcon-lcd0", "ahb3",
+ 0xb7c, BIT(0), 0);
+
+static const char * const tcon_tv0_parents[] = { "pll-video0",
+ "pll-video0-4x",
+ "pll-video1",
+ "pll-video1-4x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0",
+ tcon_tv0_parents, 0xb80,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_tcon_tv0_clk, "bus-tcon-tv0", "ahb3",
+ 0xb9c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(csi_cci_clk, "csi-cci", "osc24M", 0xc00, BIT(0), 0);
+
+static const char * const csi_top_parents[] = { "pll-video0", "pll-ve",
+ "pll-periph0" };
+static const u8 csi_top_table[] = { 0, 2, 3 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_top_clk, "csi-top",
+ csi_top_parents, csi_top_table, 0xc04,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const csi_mclk_parents[] = { "osc24M", "pll-video0",
+ "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk",
+ csi_mclk_parents, 0xc08,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb3", 0xc2c, BIT(0), 0);
+
+static const char * const hdcp_parents[] = { "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdcp_clk, "hdcp", hdcp_parents, 0xc40,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_hdcp_clk, "bus-hdcp", "ahb3", 0xc4c, BIT(0), 0);
+
+/* Fixed factor clocks */
+static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0);
+
+/*
+ * The divider of pll-audio is fixed to 8 now, as pll-audio-4x has a
+ * fixed post-divider 2.
+ */
+static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
+ "pll-audio-base", 8, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
+ "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
+ "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
+
+static CLK_FIXED_FACTOR(pll_periph0_4x_clk, "pll-periph0-4x",
+ "pll-periph0", 1, 4, 0);
+static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x",
+ "pll-periph0", 1, 2, 0);
+
+static CLK_FIXED_FACTOR(pll_periph1_4x_clk, "pll-periph1-4x",
+ "pll-periph1", 1, 4, 0);
+static CLK_FIXED_FACTOR(pll_periph1_2x_clk, "pll-periph1-2x",
+ "pll-periph1", 1, 2, 0);
+
+static CLK_FIXED_FACTOR(pll_video0_4x_clk, "pll-video0-4x",
+ "pll-video0", 1, 4, CLK_SET_RATE_PARENT);
+
+static CLK_FIXED_FACTOR(pll_video1_4x_clk, "pll-video1-4x",
+ "pll-video1", 1, 4, CLK_SET_RATE_PARENT);
+
+static struct ccu_common *sun50i_h6_ccu_clks[] = {
+ &pll_cpux_clk.common,
+ &pll_ddr0_clk.common,
+ &pll_periph0_clk.common,
+ &pll_periph1_clk.common,
+ &pll_gpu_clk.common,
+ &pll_video0_clk.common,
+ &pll_video1_clk.common,
+ &pll_ve_clk.common,
+ &pll_de_clk.common,
+ &pll_hsic_clk.common,
+ &pll_audio_base_clk.common,
+ &cpux_clk.common,
+ &axi_clk.common,
+ &cpux_apb_clk.common,
+ &psi_ahb1_ahb2_clk.common,
+ &ahb3_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &mbus_clk.common,
+ &de_clk.common,
+ &bus_de_clk.common,
+ &deinterlace_clk.common,
+ &bus_deinterlace_clk.common,
+ &gpu_clk.common,
+ &bus_gpu_clk.common,
+ &ce_clk.common,
+ &bus_ce_clk.common,
+ &ve_clk.common,
+ &bus_ve_clk.common,
+ &emce_clk.common,
+ &bus_emce_clk.common,
+ &vp9_clk.common,
+ &bus_vp9_clk.common,
+ &bus_dma_clk.common,
+ &bus_msgbox_clk.common,
+ &bus_spinlock_clk.common,
+ &bus_hstimer_clk.common,
+ &avs_clk.common,
+ &bus_dbg_clk.common,
+ &bus_psi_clk.common,
+ &bus_pwm_clk.common,
+ &bus_iommu_clk.common,
+ &dram_clk.common,
+ &mbus_dma_clk.common,
+ &mbus_ve_clk.common,
+ &mbus_ce_clk.common,
+ &mbus_ts_clk.common,
+ &mbus_nand_clk.common,
+ &mbus_csi_clk.common,
+ &mbus_deinterlace_clk.common,
+ &bus_dram_clk.common,
+ &nand0_clk.common,
+ &nand1_clk.common,
+ &bus_nand_clk.common,
+ &mmc0_clk.common,
+ &mmc1_clk.common,
+ &mmc2_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_i2c3_clk.common,
+ &bus_scr0_clk.common,
+ &bus_scr1_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_emac_clk.common,
+ &ts_clk.common,
+ &bus_ts_clk.common,
+ &ir_tx_clk.common,
+ &bus_ir_tx_clk.common,
+ &bus_ths_clk.common,
+ &i2s3_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &bus_i2s3_clk.common,
+ &spdif_clk.common,
+ &bus_spdif_clk.common,
+ &dmic_clk.common,
+ &bus_dmic_clk.common,
+ &audio_hub_clk.common,
+ &bus_audio_hub_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_phy0_clk.common,
+ &usb_phy1_clk.common,
+ &usb_ohci3_clk.common,
+ &usb_phy3_clk.common,
+ &usb_hsic_12m_clk.common,
+ &usb_hsic_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci3_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_xhci_clk.common,
+ &bus_ehci3_clk.common,
+ &bus_otg_clk.common,
+ &pcie_ref_clk.common,
+ &pcie_ref_out_clk.common,
+ &pcie_maxi_clk.common,
+ &pcie_aux_clk.common,
+ &bus_pcie_clk.common,
+ &hdmi_clk.common,
+ &hdmi_slow_clk.common,
+ &hdmi_cec_clk.common,
+ &bus_hdmi_clk.common,
+ &bus_tcon_top_clk.common,
+ &tcon_lcd0_clk.common,
+ &bus_tcon_lcd0_clk.common,
+ &tcon_tv0_clk.common,
+ &bus_tcon_tv0_clk.common,
+ &csi_cci_clk.common,
+ &csi_top_clk.common,
+ &csi_mclk_clk.common,
+ &bus_csi_clk.common,
+ &hdcp_clk.common,
+ &bus_hdcp_clk.common,
+};
+
+static struct clk_hw_onecell_data sun50i_h6_hw_clks = {
+ .hws = {
+ [CLK_OSC12M] = &osc12M_clk.hw,
+ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw,
+ [CLK_PLL_PERIPH0_4X] = &pll_periph0_4x_clk.hw,
+ [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw,
+ [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw,
+ [CLK_PLL_PERIPH1_4X] = &pll_periph1_4x_clk.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
+ [CLK_PLL_VIDEO0_4X] = &pll_video0_4x_clk.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
+ [CLK_PLL_VIDEO1_4X] = &pll_video1_4x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DE] = &pll_de_clk.common.hw,
+ [CLK_PLL_HSIC] = &pll_hsic_clk.common.hw,
+ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_CPUX] = &cpux_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_CPUX_APB] = &cpux_apb_clk.common.hw,
+ [CLK_PSI_AHB1_AHB2] = &psi_ahb1_ahb2_clk.common.hw,
+ [CLK_AHB3] = &ahb3_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_DEINTERLACE] = &deinterlace_clk.common.hw,
+ [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_EMCE] = &emce_clk.common.hw,
+ [CLK_BUS_EMCE] = &bus_emce_clk.common.hw,
+ [CLK_VP9] = &vp9_clk.common.hw,
+ [CLK_BUS_VP9] = &bus_vp9_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_BUS_PSI] = &bus_psi_clk.common.hw,
+ [CLK_BUS_PWM] = &bus_pwm_clk.common.hw,
+ [CLK_BUS_IOMMU] = &bus_iommu_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_MBUS_DMA] = &mbus_dma_clk.common.hw,
+ [CLK_MBUS_VE] = &mbus_ve_clk.common.hw,
+ [CLK_MBUS_CE] = &mbus_ce_clk.common.hw,
+ [CLK_MBUS_TS] = &mbus_ts_clk.common.hw,
+ [CLK_MBUS_NAND] = &mbus_nand_clk.common.hw,
+ [CLK_MBUS_CSI] = &mbus_csi_clk.common.hw,
+ [CLK_MBUS_DEINTERLACE] = &mbus_deinterlace_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_NAND0] = &nand0_clk.common.hw,
+ [CLK_NAND1] = &nand1_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw,
+ [CLK_BUS_SCR0] = &bus_scr0_clk.common.hw,
+ [CLK_BUS_SCR1] = &bus_scr1_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_EMAC] = &bus_emac_clk.common.hw,
+ [CLK_TS] = &ts_clk.common.hw,
+ [CLK_BUS_TS] = &bus_ts_clk.common.hw,
+ [CLK_IR_TX] = &ir_tx_clk.common.hw,
+ [CLK_BUS_IR_TX] = &bus_ir_tx_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_I2S3] = &i2s3_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_I2S2] = &i2s2_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_BUS_I2S3] = &bus_i2s3_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_DMIC] = &dmic_clk.common.hw,
+ [CLK_BUS_DMIC] = &bus_dmic_clk.common.hw,
+ [CLK_AUDIO_HUB] = &audio_hub_clk.common.hw,
+ [CLK_BUS_AUDIO_HUB] = &bus_audio_hub_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_USB_OHCI3] = &usb_ohci3_clk.common.hw,
+ [CLK_USB_PHY3] = &usb_phy3_clk.common.hw,
+ [CLK_USB_HSIC_12M] = &usb_hsic_12m_clk.common.hw,
+ [CLK_USB_HSIC] = &usb_hsic_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI3] = &bus_ohci3_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_XHCI] = &bus_xhci_clk.common.hw,
+ [CLK_BUS_EHCI3] = &bus_ehci3_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_PCIE_REF_100M] = &pcie_ref_100m_clk.hw,
+ [CLK_PCIE_REF] = &pcie_ref_clk.common.hw,
+ [CLK_PCIE_REF_OUT] = &pcie_ref_out_clk.common.hw,
+ [CLK_PCIE_MAXI] = &pcie_maxi_clk.common.hw,
+ [CLK_PCIE_AUX] = &pcie_aux_clk.common.hw,
+ [CLK_BUS_PCIE] = &bus_pcie_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_HDMI_SLOW] = &hdmi_slow_clk.common.hw,
+ [CLK_HDMI_CEC] = &hdmi_cec_clk.common.hw,
+ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
+ [CLK_BUS_TCON_TOP] = &bus_tcon_top_clk.common.hw,
+ [CLK_TCON_LCD0] = &tcon_lcd0_clk.common.hw,
+ [CLK_BUS_TCON_LCD0] = &bus_tcon_lcd0_clk.common.hw,
+ [CLK_TCON_TV0] = &tcon_tv0_clk.common.hw,
+ [CLK_BUS_TCON_TV0] = &bus_tcon_tv0_clk.common.hw,
+ [CLK_CSI_CCI] = &csi_cci_clk.common.hw,
+ [CLK_CSI_TOP] = &csi_top_clk.common.hw,
+ [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_HDCP] = &hdcp_clk.common.hw,
+ [CLK_BUS_HDCP] = &bus_hdcp_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun50i_h6_ccu_resets[] = {
+ [RST_MBUS] = { 0x540, BIT(30) },
+
+ [RST_BUS_DE] = { 0x60c, BIT(16) },
+ [RST_BUS_DEINTERLACE] = { 0x62c, BIT(16) },
+ [RST_BUS_GPU] = { 0x67c, BIT(16) },
+ [RST_BUS_CE] = { 0x68c, BIT(16) },
+ [RST_BUS_VE] = { 0x69c, BIT(16) },
+ [RST_BUS_EMCE] = { 0x6bc, BIT(16) },
+ [RST_BUS_VP9] = { 0x6cc, BIT(16) },
+ [RST_BUS_DMA] = { 0x70c, BIT(16) },
+ [RST_BUS_MSGBOX] = { 0x71c, BIT(16) },
+ [RST_BUS_SPINLOCK] = { 0x72c, BIT(16) },
+ [RST_BUS_HSTIMER] = { 0x73c, BIT(16) },
+ [RST_BUS_DBG] = { 0x78c, BIT(16) },
+ [RST_BUS_PSI] = { 0x79c, BIT(16) },
+ [RST_BUS_PWM] = { 0x7ac, BIT(16) },
+ [RST_BUS_IOMMU] = { 0x7bc, BIT(16) },
+ [RST_BUS_DRAM] = { 0x80c, BIT(16) },
+ [RST_BUS_NAND] = { 0x82c, BIT(16) },
+ [RST_BUS_MMC0] = { 0x84c, BIT(16) },
+ [RST_BUS_MMC1] = { 0x84c, BIT(17) },
+ [RST_BUS_MMC2] = { 0x84c, BIT(18) },
+ [RST_BUS_UART0] = { 0x90c, BIT(16) },
+ [RST_BUS_UART1] = { 0x90c, BIT(17) },
+ [RST_BUS_UART2] = { 0x90c, BIT(18) },
+ [RST_BUS_UART3] = { 0x90c, BIT(19) },
+ [RST_BUS_I2C0] = { 0x91c, BIT(16) },
+ [RST_BUS_I2C1] = { 0x91c, BIT(17) },
+ [RST_BUS_I2C2] = { 0x91c, BIT(18) },
+ [RST_BUS_I2C3] = { 0x91c, BIT(19) },
+ [RST_BUS_SCR0] = { 0x93c, BIT(16) },
+ [RST_BUS_SCR1] = { 0x93c, BIT(17) },
+ [RST_BUS_SPI0] = { 0x96c, BIT(16) },
+ [RST_BUS_SPI1] = { 0x96c, BIT(17) },
+ [RST_BUS_EMAC] = { 0x97c, BIT(16) },
+ [RST_BUS_TS] = { 0x9bc, BIT(16) },
+ [RST_BUS_IR_TX] = { 0x9cc, BIT(16) },
+ [RST_BUS_THS] = { 0x9fc, BIT(16) },
+ [RST_BUS_I2S0] = { 0xa1c, BIT(16) },
+ [RST_BUS_I2S1] = { 0xa1c, BIT(17) },
+ [RST_BUS_I2S2] = { 0xa1c, BIT(18) },
+ [RST_BUS_I2S3] = { 0xa1c, BIT(19) },
+ [RST_BUS_SPDIF] = { 0xa2c, BIT(16) },
+ [RST_BUS_DMIC] = { 0xa4c, BIT(16) },
+ [RST_BUS_AUDIO_HUB] = { 0xa6c, BIT(16) },
+
+ [RST_USB_PHY0] = { 0xa70, BIT(30) },
+ [RST_USB_PHY1] = { 0xa74, BIT(30) },
+ [RST_USB_PHY3] = { 0xa7c, BIT(30) },
+ [RST_USB_HSIC] = { 0xa7c, BIT(28) },
+
+ [RST_BUS_OHCI0] = { 0xa8c, BIT(16) },
+ [RST_BUS_OHCI3] = { 0xa8c, BIT(19) },
+ [RST_BUS_EHCI0] = { 0xa8c, BIT(20) },
+ [RST_BUS_XHCI] = { 0xa8c, BIT(21) },
+ [RST_BUS_EHCI3] = { 0xa8c, BIT(23) },
+ [RST_BUS_OTG] = { 0xa8c, BIT(24) },
+ [RST_BUS_PCIE] = { 0xabc, BIT(16) },
+
+ [RST_PCIE_POWERUP] = { 0xabc, BIT(17) },
+
+ [RST_BUS_HDMI] = { 0xb1c, BIT(16) },
+ [RST_BUS_HDMI_SUB] = { 0xb1c, BIT(17) },
+ [RST_BUS_TCON_TOP] = { 0xb5c, BIT(16) },
+ [RST_BUS_TCON_LCD0] = { 0xb7c, BIT(16) },
+ [RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) },
+ [RST_BUS_CSI] = { 0xc2c, BIT(16) },
+ [RST_BUS_HDCP] = { 0xc4c, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun50i_h6_ccu_desc = {
+ .ccu_clks = sun50i_h6_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_h6_ccu_clks),
+
+ .hw_clks = &sun50i_h6_hw_clks,
+
+ .resets = sun50i_h6_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h6_ccu_resets),
+};
+
+static const u32 pll_regs[] = {
+ SUN50I_H6_PLL_CPUX_REG,
+ SUN50I_H6_PLL_DDR0_REG,
+ SUN50I_H6_PLL_PERIPH0_REG,
+ SUN50I_H6_PLL_PERIPH1_REG,
+ SUN50I_H6_PLL_GPU_REG,
+ SUN50I_H6_PLL_VIDEO0_REG,
+ SUN50I_H6_PLL_VIDEO1_REG,
+ SUN50I_H6_PLL_VE_REG,
+ SUN50I_H6_PLL_DE_REG,
+ SUN50I_H6_PLL_HSIC_REG,
+ SUN50I_H6_PLL_AUDIO_REG,
+};
+
+static const u32 pll_video_regs[] = {
+ SUN50I_H6_PLL_VIDEO0_REG,
+ SUN50I_H6_PLL_VIDEO1_REG,
+};
+
+static const u32 usb2_clk_regs[] = {
+ SUN50I_H6_USB0_CLK_REG,
+ SUN50I_H6_USB3_CLK_REG,
+};
+
+static int sun50i_h6_ccu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *reg;
+ u32 val;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ /* Enable the lock bits on all PLLs */
+ for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
+ val = readl(reg + pll_regs[i]);
+ val |= BIT(29);
+ writel(val, reg + pll_regs[i]);
+ }
+
+ /*
+ * Force the output divider of video PLLs to 0.
+ *
+ * See the comment before pll-video0 definition for the reason.
+ */
+ for (i = 0; i < ARRAY_SIZE(pll_video_regs); i++) {
+ val = readl(reg + pll_video_regs[i]);
+ val &= ~BIT(0);
+ writel(val, reg + pll_video_regs[i]);
+ }
+
+ /*
+ * Force OHCI 12M clock sources to 00 (12MHz divided from 48MHz)
+ *
+ * This clock mux is still mysterious, and the code just enforces
+ * it to have a valid clock parent.
+ */
+ for (i = 0; i < ARRAY_SIZE(usb2_clk_regs); i++) {
+ val = readl(reg + usb2_clk_regs[i]);
+ val &= ~GENMASK(25, 24);
+ writel (val, reg + usb2_clk_regs[i]);
+ }
+
+ /*
+ * Force the post-divider of pll-audio to 8 and the output divider
+ * of it to 1, to make the clock name represents the real frequency.
+ */
+ val = readl(reg + SUN50I_H6_PLL_AUDIO_REG);
+ val &= ~(GENMASK(21, 16) | BIT(0));
+ writel(val | (7 << 16), reg + SUN50I_H6_PLL_AUDIO_REG);
+
+ return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_h6_ccu_desc);
+}
+
+static const struct of_device_id sun50i_h6_ccu_ids[] = {
+ { .compatible = "allwinner,sun50i-h6-ccu" },
+ { }
+};
+
+static struct platform_driver sun50i_h6_ccu_driver = {
+ .probe = sun50i_h6_ccu_probe,
+ .driver = {
+ .name = "sun50i-h6-ccu",
+ .of_match_table = sun50i_h6_ccu_ids,
+ },
+};
+builtin_platform_driver(sun50i_h6_ccu_driver);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.h b/drivers/clk/sunxi-ng/ccu-sun50i-h6.h
new file mode 100644
index 000000000000..2ccfe4428260
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.h
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2016 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#ifndef _CCU_SUN50I_H6_H_
+#define _CCU_SUN50I_H6_H_
+
+#include <dt-bindings/clock/sun50i-h6-ccu.h>
+#include <dt-bindings/reset/sun50i-h6-ccu.h>
+
+#define CLK_OSC12M 0
+#define CLK_PLL_CPUX 1
+#define CLK_PLL_DDR0 2
+
+/* PLL_PERIPH0 exported for PRCM */
+
+#define CLK_PLL_PERIPH0_2X 4
+#define CLK_PLL_PERIPH0_4X 5
+#define CLK_PLL_PERIPH1 6
+#define CLK_PLL_PERIPH1_2X 7
+#define CLK_PLL_PERIPH1_4X 8
+#define CLK_PLL_GPU 9
+#define CLK_PLL_VIDEO0 10
+#define CLK_PLL_VIDEO0_4X 11
+#define CLK_PLL_VIDEO1 12
+#define CLK_PLL_VIDEO1_4X 13
+#define CLK_PLL_VE 14
+#define CLK_PLL_DE 15
+#define CLK_PLL_HSIC 16
+#define CLK_PLL_AUDIO_BASE 17
+#define CLK_PLL_AUDIO 18
+#define CLK_PLL_AUDIO_2X 19
+#define CLK_PLL_AUDIO_4X 20
+
+/* CPUX clock exported for DVFS */
+
+#define CLK_AXI 22
+#define CLK_CPUX_APB 23
+#define CLK_PSI_AHB1_AHB2 24
+#define CLK_AHB3 25
+
+/* APB1 clock exported for PIO */
+
+#define CLK_APB2 27
+#define CLK_MBUS 28
+
+/* All module clocks and bus gates are exported except DRAM */
+
+#define CLK_DRAM 52
+
+#define CLK_BUS_DRAM 60
+
+#define CLK_NUMBER (CLK_BUS_HDCP + 1)
+
+#endif /* _CCU_SUN50I_H6_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 29bc0566b776..77ed0b0ba681 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -69,17 +69,18 @@ static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
-static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
- "osc24M", 0x0010,
- 8, 7, /* N */
- 0, 4, /* M */
- BIT(24), /* frac enable */
- BIT(25), /* frac select */
- 270000000, /* frac rate 0 */
- 297000000, /* frac rate 1 */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(pll_video_clk, "pll-video",
+ "osc24M", 0x0010,
+ 192000000, /* Minimum rate */
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
"osc24M", 0x0018,
@@ -451,11 +452,13 @@ static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram",
static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
- 0x104, 0, 4, 24, 3, BIT(31), 0);
+ 0x104, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static const char * const tcon_parents[] = { "pll-video" };
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents,
- 0x118, 0, 4, 24, 3, BIT(31), 0);
+ 0x118, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static const char * const tve_parents[] = { "pll-de", "pll-periph1" };
static SUNXI_CCU_M_WITH_MUX_GATE(tve_clk, "tve", tve_parents,
@@ -486,7 +489,8 @@ static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
static const char * const hdmi_parents[] = { "pll-video" };
static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents,
- 0x150, 0, 4, 24, 2, BIT(31), 0);
+ 0x150, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M",
0x154, BIT(31), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
index 1b4baea37d81..73d7392c968c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
@@ -26,7 +26,9 @@
#define CLK_PLL_AUDIO_2X 3
#define CLK_PLL_AUDIO_4X 4
#define CLK_PLL_AUDIO_8X 5
-#define CLK_PLL_VIDEO 6
+
+/* PLL_VIDEO is exported */
+
#define CLK_PLL_VE 7
#define CLK_PLL_DDR 8
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index e58c95787f94..ebd9436d2c7c 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -20,6 +20,18 @@ struct _ccu_nkmp {
unsigned long p, min_p, max_p;
};
+static unsigned long ccu_nkmp_calc_rate(unsigned long parent,
+ unsigned long n, unsigned long k,
+ unsigned long m, unsigned long p)
+{
+ u64 rate = parent;
+
+ rate *= n * k;
+ do_div(rate, m * p);
+
+ return rate;
+}
+
static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
struct _ccu_nkmp *nkmp)
{
@@ -33,7 +45,9 @@ static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
for (_p = nkmp->min_p; _p <= nkmp->max_p; _p <<= 1) {
unsigned long tmp_rate;
- tmp_rate = parent * _n * _k / (_m * _p);
+ tmp_rate = ccu_nkmp_calc_rate(parent,
+ _n, _k,
+ _m, _p);
if (tmp_rate > rate)
continue;
@@ -81,7 +95,7 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
- unsigned long n, m, k, p;
+ unsigned long n, m, k, p, rate;
u32 reg;
reg = readl(nkmp->common.base + nkmp->common.reg);
@@ -107,7 +121,11 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
p = reg >> nkmp->p.shift;
p &= (1 << nkmp->p.width) - 1;
- return (parent_rate * n * k >> p) / m;
+ rate = ccu_nkmp_calc_rate(parent_rate, n, k, m, 1 << p);
+ if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate /= nkmp->fixed_post_div;
+
+ return rate;
}
static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -116,6 +134,9 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
struct _ccu_nkmp _nkmp;
+ if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate *= nkmp->fixed_post_div;
+
_nkmp.min_n = nkmp->n.min ?: 1;
_nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
_nkmp.min_k = nkmp->k.min ?: 1;
@@ -127,17 +148,26 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
- return *parent_rate * _nkmp.n * _nkmp.k / (_nkmp.m * _nkmp.p);
+ rate = ccu_nkmp_calc_rate(*parent_rate, _nkmp.n, _nkmp.k,
+ _nkmp.m, _nkmp.p);
+ if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate = rate / nkmp->fixed_post_div;
+
+ return rate;
}
static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
+ u32 n_mask, k_mask, m_mask, p_mask;
struct _ccu_nkmp _nkmp;
unsigned long flags;
u32 reg;
+ if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate = rate * nkmp->fixed_post_div;
+
_nkmp.min_n = nkmp->n.min ?: 1;
_nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
_nkmp.min_k = nkmp->k.min ?: 1;
@@ -149,18 +179,20 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
+ n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
+ k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
+ m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
+ p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
+
spin_lock_irqsave(nkmp->common.lock, flags);
reg = readl(nkmp->common.base + nkmp->common.reg);
- reg &= ~GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
- reg &= ~GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
- reg &= ~GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
- reg &= ~GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
-
- reg |= (_nkmp.n - nkmp->n.offset) << nkmp->n.shift;
- reg |= (_nkmp.k - nkmp->k.offset) << nkmp->k.shift;
- reg |= (_nkmp.m - nkmp->m.offset) << nkmp->m.shift;
- reg |= ilog2(_nkmp.p) << nkmp->p.shift;
+ reg &= ~(n_mask | k_mask | m_mask | p_mask);
+
+ reg |= ((_nkmp.n - nkmp->n.offset) << nkmp->n.shift) & n_mask;
+ reg |= ((_nkmp.k - nkmp->k.offset) << nkmp->k.shift) & k_mask;
+ reg |= ((_nkmp.m - nkmp->m.offset) << nkmp->m.shift) & m_mask;
+ reg |= (ilog2(_nkmp.p) << nkmp->p.shift) & p_mask;
writel(reg, nkmp->common.base + nkmp->common.reg);
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.h b/drivers/clk/sunxi-ng/ccu_nkmp.h
index a82facbc6144..6940503e7fc4 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.h
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.h
@@ -34,6 +34,8 @@ struct ccu_nkmp {
struct ccu_div_internal m;
struct ccu_div_internal p;
+ unsigned int fixed_post_div;
+
struct ccu_common common;
};
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index a16de092bf94..4e2073307f34 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -117,6 +117,13 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate *= nm->fixed_post_div;
+ if (rate < nm->min_rate) {
+ rate = nm->min_rate;
+ if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate /= nm->fixed_post_div;
+ return rate;
+ }
+
if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate /= nm->fixed_post_div;
diff --git a/drivers/clk/sunxi-ng/ccu_nm.h b/drivers/clk/sunxi-ng/ccu_nm.h
index eba586b4c7d0..1d8b459c50b7 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.h
+++ b/drivers/clk/sunxi-ng/ccu_nm.h
@@ -37,6 +37,7 @@ struct ccu_nm {
struct ccu_sdm_internal sdm;
unsigned int fixed_post_div;
+ unsigned int min_rate;
struct ccu_common common;
};
@@ -88,6 +89,32 @@ struct ccu_nm {
}, \
}
+#define SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN(_struct, _name, _parent, \
+ _reg, _min_rate, \
+ _nshift, _nwidth, \
+ _mshift, _mwidth, \
+ _frac_en, _frac_sel, \
+ _frac_rate_0, _frac_rate_1,\
+ _gate, _lock, _flags) \
+ struct ccu_nm _struct = { \
+ .enable = _gate, \
+ .lock = _lock, \
+ .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .frac = _SUNXI_CCU_FRAC(_frac_en, _frac_sel, \
+ _frac_rate_0, \
+ _frac_rate_1), \
+ .min_rate = _min_rate, \
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_FRACTIONAL, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_nm_ops, \
+ _flags), \
+ }, \
+ }
+
#define SUNXI_CCU_NM_WITH_GATE_LOCK(_struct, _name, _parent, _reg, \
_nshift, _nwidth, \
_mshift, _mwidth, \
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 11a5066e5c27..5234acd30e89 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -515,7 +515,7 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
init.name = "emc";
init.ops = &tegra_clk_emc_ops;
- init.flags = 0;
+ init.flags = CLK_IS_CRITICAL;
init.parent_names = emc_parent_clk_names;
init.num_parents = ARRAY_SIZE(emc_parent_clk_names);
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 7c369e21c91c..830d1c87fa7c 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -1151,6 +1151,8 @@ static const struct clk_ops tegra_clk_pllu_ops = {
.enable = clk_pllu_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
};
static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index c02711927d79..2acba2986bc6 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -830,7 +830,7 @@ static struct tegra_periph_init_data gate_clks[] = {
GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0),
GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0),
GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0),
- GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED),
+ GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IS_CRITICAL),
GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0),
GATE("ispa", "isp", 23, 0, tegra_clk_ispa, 0),
GATE("ispb", "isp", 3, 0, tegra_clk_ispb, 0),
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 10047107c1dc..89d6b47a27a8 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -125,7 +125,8 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
/* SCLK */
dt_clk = tegra_lookup_dt_id(tegra_clk_sclk, tegra_clks);
if (dt_clk) {
- clk = clk_register_divider(NULL, "sclk", "sclk_mux", 0,
+ clk = clk_register_divider(NULL, "sclk", "sclk_mux",
+ CLK_IS_CRITICAL,
clk_base + SCLK_DIVIDER, 0, 8,
0, &sysrate_lock);
*dt_clk = clk;
@@ -137,7 +138,8 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
clk = tegra_clk_register_super_mux("sclk",
gen_info->sclk_parents,
gen_info->num_sclk_parents,
- CLK_SET_RATE_PARENT,
+ CLK_SET_RATE_PARENT |
+ CLK_IS_CRITICAL,
clk_base + SCLK_BURST_POLICY,
0, 4, 0, 0, NULL);
*dt_clk = clk;
@@ -151,7 +153,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
&sysrate_lock);
clk = clk_register_gate(NULL, "hclk", "hclk_div",
- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
clk_base + SYSTEM_CLK_RATE,
7, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
*dt_clk = clk;
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 63087d17c3e2..5d5a22d529f5 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -955,8 +955,7 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
/* PLLM */
clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
- &pll_m_params, NULL);
+ CLK_SET_RATE_GATE, &pll_m_params, NULL);
clks[TEGRA114_CLK_PLL_M] = clk;
/* PLLM_OUT1 */
@@ -1190,6 +1189,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA114_CLK_XUSB_HS_SRC, TEGRA114_CLK_XUSB_SS_DIV2, 61200000, 0 },
{ TEGRA114_CLK_XUSB_FALCON_SRC, TEGRA114_CLK_PLL_P, 204000000, 0 },
{ TEGRA114_CLK_XUSB_HOST_SRC, TEGRA114_CLK_PLL_P, 102000000, 0 },
+ { TEGRA114_CLK_VDE, TEGRA114_CLK_CLK_MAX, 600000000, 0 },
/* must be the last entry */
{ TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index e81ea5b11577..50088e976611 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1089,8 +1089,7 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
/* PLLM */
clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
- &pll_m_params, NULL);
+ CLK_SET_RATE_GATE, &pll_m_params, NULL);
clk_register_clkdev(clk, "pll_m", NULL);
clks[TEGRA124_CLK_PLL_M] = clk;
@@ -1099,7 +1098,7 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
8, 8, 1, NULL);
clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
- clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ clk_base + PLLM_OUT, 1, 0,
CLK_SET_RATE_PARENT, 0, NULL);
clk_register_clkdev(clk, "pll_m_out1", NULL);
clks[TEGRA124_CLK_PLL_M_OUT1] = clk;
@@ -1268,11 +1267,11 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_I2S2, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S3, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
- { TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0 },
+ { TEGRA124_CLK_VDE, TEGRA124_CLK_CLK_MAX, 600000000, 0 },
{ TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1 },
{ TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0 },
{ TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0 },
- { TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1 },
+ { TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 0 },
{ TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1 },
{ TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1 },
{ TEGRA124_CLK_PLL_C, TEGRA124_CLK_CLK_MAX, 768000000, 0 },
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index cbd5a2e5c569..0ee56dd04cec 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -576,6 +576,7 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
[tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
[tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
[tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
+ [tegra_clk_emc] = { .dt_id = TEGRA20_CLK_EMC, .present = true },
};
static unsigned long tegra20_clk_measure_input_freq(void)
@@ -651,8 +652,7 @@ static void tegra20_pll_init(void)
/* PLLM */
clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, NULL,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
- &pll_m_params, NULL);
+ CLK_SET_RATE_GATE, &pll_m_params, NULL);
clks[TEGRA20_CLK_PLL_M] = clk;
/* PLLM_OUT1 */
@@ -660,7 +660,7 @@ static void tegra20_pll_init(void)
clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
8, 8, 1, NULL);
clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
- clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ clk_base + PLLM_OUT, 1, 0,
CLK_SET_RATE_PARENT, 0, NULL);
clks[TEGRA20_CLK_PLL_M_OUT1] = clk;
@@ -723,7 +723,8 @@ static void tegra20_super_clk_init(void)
/* SCLK */
clk = tegra_clk_register_super_mux("sclk", sclk_parents,
- ARRAY_SIZE(sclk_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sclk_parents),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
clks[TEGRA20_CLK_SCLK] = clk;
@@ -814,9 +815,6 @@ static void __init tegra20_periph_clk_init(void)
CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
30, 2, 0, &emc_lock);
- clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
- 57, periph_clk_enb_refcnt);
- clks[TEGRA20_CLK_EMC] = clk;
clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
&emc_lock);
@@ -1019,13 +1017,12 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA20_CLK_PLL_P_OUT2, TEGRA20_CLK_CLK_MAX, 48000000, 1 },
{ TEGRA20_CLK_PLL_P_OUT3, TEGRA20_CLK_CLK_MAX, 72000000, 1 },
{ TEGRA20_CLK_PLL_P_OUT4, TEGRA20_CLK_CLK_MAX, 24000000, 1 },
- { TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 1 },
- { TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 216000000, 1 },
- { TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 0, 1 },
- { TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 0, 1 },
- { TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 1 },
+ { TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 0 },
+ { TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 240000000, 0 },
+ { TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 240000000, 0 },
+ { TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 240000000, 0 },
+ { TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 0 },
{ TEGRA20_CLK_CSITE, TEGRA20_CLK_CLK_MAX, 0, 1 },
- { TEGRA20_CLK_EMC, TEGRA20_CLK_CLK_MAX, 0, 1 },
{ TEGRA20_CLK_CCLK, TEGRA20_CLK_CLK_MAX, 0, 1 },
{ TEGRA20_CLK_UARTA, TEGRA20_CLK_PLL_P, 0, 0 },
{ TEGRA20_CLK_UARTB, TEGRA20_CLK_PLL_P, 0, 0 },
@@ -1051,6 +1048,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA20_CLK_DISP2, TEGRA20_CLK_PLL_P, 600000000, 0 },
{ TEGRA20_CLK_GR2D, TEGRA20_CLK_PLL_C, 300000000, 0 },
{ TEGRA20_CLK_GR3D, TEGRA20_CLK_PLL_C, 300000000, 0 },
+ { TEGRA20_CLK_VDE, TEGRA20_CLK_CLK_MAX, 300000000, 0 },
/* must be the last entry */
{ TEGRA20_CLK_CLK_MAX, TEGRA20_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 9e6260869eb9..9fb5d51ccce4 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -22,10 +22,12 @@
#include <linux/of_address.h>
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/mutex.h>
#include <linux/clk/tegra.h>
#include <dt-bindings/clock/tegra210-car.h>
#include <dt-bindings/reset/tegra210-car.h>
#include <linux/iopoll.h>
+#include <soc/tegra/pmc.h>
#include "clk.h"
#include "clk-id.h"
@@ -41,6 +43,7 @@
#define CLK_SOURCE_CSITE 0x1d4
#define CLK_SOURCE_EMC 0x19c
#define CLK_SOURCE_SOR1 0x410
+#define CLK_SOURCE_LA 0x1f8
#define PLLC_BASE 0x80
#define PLLC_OUT 0x84
@@ -231,6 +234,30 @@
#define CLK_RST_CONTROLLER_RST_DEV_Y_SET 0x2a8
#define CLK_RST_CONTROLLER_RST_DEV_Y_CLR 0x2ac
+#define LVL2_CLK_GATE_OVRA 0xf8
+#define LVL2_CLK_GATE_OVRC 0x3a0
+#define LVL2_CLK_GATE_OVRD 0x3a4
+#define LVL2_CLK_GATE_OVRE 0x554
+
+/* I2S registers to handle during APE MBIST WAR */
+#define TEGRA210_I2S_BASE 0x1000
+#define TEGRA210_I2S_SIZE 0x100
+#define TEGRA210_I2S_CTRLS 5
+#define TEGRA210_I2S_CG 0x88
+#define TEGRA210_I2S_CTRL 0xa0
+
+/* DISPA registers to handle during MBIST WAR */
+#define DC_CMD_DISPLAY_COMMAND 0xc8
+#define DC_COM_DSC_TOP_CTL 0xcf8
+
+/* VIC register to handle during MBIST WAR */
+#define NV_PVIC_THI_SLCG_OVERRIDE_LOW 0x8c
+
+/* APE, DISPA and VIC base addesses needed for MBIST WAR */
+#define TEGRA210_AHUB_BASE 0x702d0000
+#define TEGRA210_DISPA_BASE 0x54200000
+#define TEGRA210_VIC_BASE 0x54340000
+
/*
* SDM fractional divisor is 16-bit 2's complement signed number within
* (-2^12 ... 2^12-1) range. Represented in PLL data structure as unsigned
@@ -255,8 +282,22 @@ static struct cpu_clk_suspend_context {
} tegra210_cpu_clk_sctx;
#endif
+struct tegra210_domain_mbist_war {
+ void (*handle_lvl2_ovr)(struct tegra210_domain_mbist_war *mbist);
+ const u32 lvl2_offset;
+ const u32 lvl2_mask;
+ const unsigned int num_clks;
+ const unsigned int *clk_init_data;
+ struct clk_bulk_data *clks;
+};
+
+static struct clk **clks;
+
static void __iomem *clk_base;
static void __iomem *pmc_base;
+static void __iomem *ahub_base;
+static void __iomem *dispa_base;
+static void __iomem *vic_base;
static unsigned long osc_freq;
static unsigned long pll_ref_freq;
@@ -267,6 +308,7 @@ static DEFINE_SPINLOCK(pll_re_lock);
static DEFINE_SPINLOCK(pll_u_lock);
static DEFINE_SPINLOCK(sor1_lock);
static DEFINE_SPINLOCK(emc_lock);
+static DEFINE_MUTEX(lvl2_ovr_lock);
/* possible OSC frequencies in Hz */
static unsigned long tegra210_input_freq[] = {
@@ -310,6 +352,8 @@ static const char *mux_pllmcp_clkm[] = {
#define PLLA_MISC2_WRITE_MASK 0x06ffffff
/* PLLD */
+#define PLLD_BASE_CSI_CLKSOURCE (1 << 23)
+
#define PLLD_MISC0_EN_SDM (1 << 16)
#define PLLD_MISC0_LOCK_OVERRIDE (1 << 17)
#define PLLD_MISC0_LOCK_ENABLE (1 << 18)
@@ -513,6 +557,115 @@ void tegra210_set_sata_pll_seq_sw(bool state)
}
EXPORT_SYMBOL_GPL(tegra210_set_sata_pll_seq_sw);
+static void tegra210_generic_mbist_war(struct tegra210_domain_mbist_war *mbist)
+{
+ u32 val;
+
+ val = readl_relaxed(clk_base + mbist->lvl2_offset);
+ writel_relaxed(val | mbist->lvl2_mask, clk_base + mbist->lvl2_offset);
+ fence_udelay(1, clk_base);
+ writel_relaxed(val, clk_base + mbist->lvl2_offset);
+ fence_udelay(1, clk_base);
+}
+
+static void tegra210_venc_mbist_war(struct tegra210_domain_mbist_war *mbist)
+{
+ u32 csi_src, ovra, ovre;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&pll_d_lock, flags);
+
+ csi_src = readl_relaxed(clk_base + PLLD_BASE);
+ writel_relaxed(csi_src | PLLD_BASE_CSI_CLKSOURCE, clk_base + PLLD_BASE);
+ fence_udelay(1, clk_base);
+
+ ovra = readl_relaxed(clk_base + LVL2_CLK_GATE_OVRA);
+ writel_relaxed(ovra | BIT(15), clk_base + LVL2_CLK_GATE_OVRA);
+ ovre = readl_relaxed(clk_base + LVL2_CLK_GATE_OVRE);
+ writel_relaxed(ovre | BIT(3), clk_base + LVL2_CLK_GATE_OVRE);
+ fence_udelay(1, clk_base);
+
+ writel_relaxed(ovra, clk_base + LVL2_CLK_GATE_OVRA);
+ writel_relaxed(ovre, clk_base + LVL2_CLK_GATE_OVRE);
+ writel_relaxed(csi_src, clk_base + PLLD_BASE);
+ fence_udelay(1, clk_base);
+
+ spin_unlock_irqrestore(&pll_d_lock, flags);
+}
+
+static void tegra210_disp_mbist_war(struct tegra210_domain_mbist_war *mbist)
+{
+ u32 ovra, dsc_top_ctrl;
+
+ ovra = readl_relaxed(clk_base + LVL2_CLK_GATE_OVRA);
+ writel_relaxed(ovra | BIT(1), clk_base + LVL2_CLK_GATE_OVRA);
+ fence_udelay(1, clk_base);
+
+ dsc_top_ctrl = readl_relaxed(dispa_base + DC_COM_DSC_TOP_CTL);
+ writel_relaxed(dsc_top_ctrl | BIT(2), dispa_base + DC_COM_DSC_TOP_CTL);
+ readl_relaxed(dispa_base + DC_CMD_DISPLAY_COMMAND);
+ writel_relaxed(dsc_top_ctrl, dispa_base + DC_COM_DSC_TOP_CTL);
+ readl_relaxed(dispa_base + DC_CMD_DISPLAY_COMMAND);
+
+ writel_relaxed(ovra, clk_base + LVL2_CLK_GATE_OVRA);
+ fence_udelay(1, clk_base);
+}
+
+static void tegra210_vic_mbist_war(struct tegra210_domain_mbist_war *mbist)
+{
+ u32 ovre, val;
+
+ ovre = readl_relaxed(clk_base + LVL2_CLK_GATE_OVRE);
+ writel_relaxed(ovre | BIT(5), clk_base + LVL2_CLK_GATE_OVRE);
+ fence_udelay(1, clk_base);
+
+ val = readl_relaxed(vic_base + NV_PVIC_THI_SLCG_OVERRIDE_LOW);
+ writel_relaxed(val | BIT(0) | GENMASK(7, 2) | BIT(24),
+ vic_base + NV_PVIC_THI_SLCG_OVERRIDE_LOW);
+ fence_udelay(1, vic_base + NV_PVIC_THI_SLCG_OVERRIDE_LOW);
+
+ writel_relaxed(val, vic_base + NV_PVIC_THI_SLCG_OVERRIDE_LOW);
+ readl(vic_base + NV_PVIC_THI_SLCG_OVERRIDE_LOW);
+
+ writel_relaxed(ovre, clk_base + LVL2_CLK_GATE_OVRE);
+ fence_udelay(1, clk_base);
+}
+
+static void tegra210_ape_mbist_war(struct tegra210_domain_mbist_war *mbist)
+{
+ void __iomem *i2s_base;
+ unsigned int i;
+ u32 ovrc, ovre;
+
+ ovrc = readl_relaxed(clk_base + LVL2_CLK_GATE_OVRC);
+ ovre = readl_relaxed(clk_base + LVL2_CLK_GATE_OVRE);
+ writel_relaxed(ovrc | BIT(1), clk_base + LVL2_CLK_GATE_OVRC);
+ writel_relaxed(ovre | BIT(10) | BIT(11),
+ clk_base + LVL2_CLK_GATE_OVRE);
+ fence_udelay(1, clk_base);
+
+ i2s_base = ahub_base + TEGRA210_I2S_BASE;
+
+ for (i = 0; i < TEGRA210_I2S_CTRLS; i++) {
+ u32 i2s_ctrl;
+
+ i2s_ctrl = readl_relaxed(i2s_base + TEGRA210_I2S_CTRL);
+ writel_relaxed(i2s_ctrl | BIT(10),
+ i2s_base + TEGRA210_I2S_CTRL);
+ writel_relaxed(0, i2s_base + TEGRA210_I2S_CG);
+ readl(i2s_base + TEGRA210_I2S_CG);
+ writel_relaxed(1, i2s_base + TEGRA210_I2S_CG);
+ writel_relaxed(i2s_ctrl, i2s_base + TEGRA210_I2S_CTRL);
+ readl(i2s_base + TEGRA210_I2S_CTRL);
+
+ i2s_base += TEGRA210_I2S_SIZE;
+ }
+
+ writel_relaxed(ovrc, clk_base + LVL2_CLK_GATE_OVRC);
+ writel_relaxed(ovre, clk_base + LVL2_CLK_GATE_OVRE);
+ fence_udelay(1, clk_base);
+}
+
static inline void _pll_misc_chk_default(void __iomem *base,
struct tegra_clk_pll_params *params,
u8 misc_num, u32 default_val, u32 mask)
@@ -2411,13 +2564,150 @@ static struct tegra_audio_clk_info tegra210_audio_plls[] = {
{ "pll_a1", &pll_a1_params, tegra_clk_pll_a1, "pll_ref" },
};
-static struct clk **clks;
-
static const char * const aclk_parents[] = {
"pll_a1", "pll_c", "pll_p", "pll_a_out0", "pll_c2", "pll_c3",
"clk_m"
};
+static const unsigned int nvjpg_slcg_clkids[] = { TEGRA210_CLK_NVDEC };
+static const unsigned int nvdec_slcg_clkids[] = { TEGRA210_CLK_NVJPG };
+static const unsigned int sor_slcg_clkids[] = { TEGRA210_CLK_HDA2CODEC_2X,
+ TEGRA210_CLK_HDA2HDMI, TEGRA210_CLK_DISP1, TEGRA210_CLK_DISP2 };
+static const unsigned int disp_slcg_clkids[] = { TEGRA210_CLK_LA,
+ TEGRA210_CLK_HOST1X};
+static const unsigned int xusba_slcg_clkids[] = { TEGRA210_CLK_XUSB_HOST,
+ TEGRA210_CLK_XUSB_DEV };
+static const unsigned int xusbb_slcg_clkids[] = { TEGRA210_CLK_XUSB_HOST,
+ TEGRA210_CLK_XUSB_SS };
+static const unsigned int xusbc_slcg_clkids[] = { TEGRA210_CLK_XUSB_DEV,
+ TEGRA210_CLK_XUSB_SS };
+static const unsigned int venc_slcg_clkids[] = { TEGRA210_CLK_HOST1X,
+ TEGRA210_CLK_PLL_D };
+static const unsigned int ape_slcg_clkids[] = { TEGRA210_CLK_ACLK,
+ TEGRA210_CLK_I2S0, TEGRA210_CLK_I2S1, TEGRA210_CLK_I2S2,
+ TEGRA210_CLK_I2S3, TEGRA210_CLK_I2S4, TEGRA210_CLK_SPDIF_OUT,
+ TEGRA210_CLK_D_AUDIO };
+static const unsigned int vic_slcg_clkids[] = { TEGRA210_CLK_HOST1X };
+
+static struct tegra210_domain_mbist_war tegra210_pg_mbist_war[] = {
+ [TEGRA_POWERGATE_VENC] = {
+ .handle_lvl2_ovr = tegra210_venc_mbist_war,
+ .num_clks = ARRAY_SIZE(venc_slcg_clkids),
+ .clk_init_data = venc_slcg_clkids,
+ },
+ [TEGRA_POWERGATE_SATA] = {
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_mask = BIT(0) | BIT(17) | BIT(19),
+ },
+ [TEGRA_POWERGATE_MPE] = {
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRE,
+ .lvl2_mask = BIT(2),
+ },
+ [TEGRA_POWERGATE_SOR] = {
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .num_clks = ARRAY_SIZE(sor_slcg_clkids),
+ .clk_init_data = sor_slcg_clkids,
+ .lvl2_offset = LVL2_CLK_GATE_OVRA,
+ .lvl2_mask = BIT(1) | BIT(2),
+ },
+ [TEGRA_POWERGATE_DIS] = {
+ .handle_lvl2_ovr = tegra210_disp_mbist_war,
+ .num_clks = ARRAY_SIZE(disp_slcg_clkids),
+ .clk_init_data = disp_slcg_clkids,
+ },
+ [TEGRA_POWERGATE_DISB] = {
+ .num_clks = ARRAY_SIZE(disp_slcg_clkids),
+ .clk_init_data = disp_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRA,
+ .lvl2_mask = BIT(2),
+ },
+ [TEGRA_POWERGATE_XUSBA] = {
+ .num_clks = ARRAY_SIZE(xusba_slcg_clkids),
+ .clk_init_data = xusba_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_mask = BIT(30) | BIT(31),
+ },
+ [TEGRA_POWERGATE_XUSBB] = {
+ .num_clks = ARRAY_SIZE(xusbb_slcg_clkids),
+ .clk_init_data = xusbb_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_mask = BIT(30) | BIT(31),
+ },
+ [TEGRA_POWERGATE_XUSBC] = {
+ .num_clks = ARRAY_SIZE(xusbc_slcg_clkids),
+ .clk_init_data = xusbc_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_mask = BIT(30) | BIT(31),
+ },
+ [TEGRA_POWERGATE_VIC] = {
+ .num_clks = ARRAY_SIZE(vic_slcg_clkids),
+ .clk_init_data = vic_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_vic_mbist_war,
+ },
+ [TEGRA_POWERGATE_NVDEC] = {
+ .num_clks = ARRAY_SIZE(nvdec_slcg_clkids),
+ .clk_init_data = nvdec_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_mask = BIT(9) | BIT(31),
+ },
+ [TEGRA_POWERGATE_NVJPG] = {
+ .num_clks = ARRAY_SIZE(nvjpg_slcg_clkids),
+ .clk_init_data = nvjpg_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRC,
+ .lvl2_mask = BIT(9) | BIT(31),
+ },
+ [TEGRA_POWERGATE_AUD] = {
+ .num_clks = ARRAY_SIZE(ape_slcg_clkids),
+ .clk_init_data = ape_slcg_clkids,
+ .handle_lvl2_ovr = tegra210_ape_mbist_war,
+ },
+ [TEGRA_POWERGATE_VE2] = {
+ .handle_lvl2_ovr = tegra210_generic_mbist_war,
+ .lvl2_offset = LVL2_CLK_GATE_OVRD,
+ .lvl2_mask = BIT(22),
+ },
+};
+
+int tegra210_clk_handle_mbist_war(unsigned int id)
+{
+ int err;
+ struct tegra210_domain_mbist_war *mbist_war;
+
+ if (id >= ARRAY_SIZE(tegra210_pg_mbist_war)) {
+ WARN(1, "unknown domain id in MBIST WAR handler\n");
+ return -EINVAL;
+ }
+
+ mbist_war = &tegra210_pg_mbist_war[id];
+ if (!mbist_war->handle_lvl2_ovr)
+ return 0;
+
+ if (mbist_war->num_clks && !mbist_war->clks)
+ return -ENODEV;
+
+ err = clk_bulk_prepare_enable(mbist_war->num_clks, mbist_war->clks);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&lvl2_ovr_lock);
+
+ mbist_war->handle_lvl2_ovr(mbist_war);
+
+ mutex_unlock(&lvl2_ovr_lock);
+
+ clk_bulk_disable_unprepare(mbist_war->num_clks, mbist_war->clks);
+
+ return 0;
+}
+
void tegra210_put_utmipll_in_iddq(void)
{
u32 reg;
@@ -2654,6 +2944,13 @@ static struct tegra_periph_init_data tegra210_periph[] = {
sor1_parents_idx, 0, &sor1_lock),
};
+static const char * const la_parents[] = {
+ "pll_p", "pll_c2", "pll_c", "pll_c3", "pll_re_out1", "pll_a1", "clk_m", "pll_c4_out0"
+};
+
+static struct tegra_clk_periph tegra210_la =
+ TEGRA_CLK_PERIPH(29, 7, 9, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, 76, 0, NULL, 0);
+
static __init void tegra210_periph_clk_init(void __iomem *clk_base,
void __iomem *pmc_base)
{
@@ -2700,6 +2997,12 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
periph_clk_enb_refcnt);
clks[TEGRA210_CLK_DSIB] = clk;
+ /* la */
+ clk = tegra_clk_register_periph("la", la_parents,
+ ARRAY_SIZE(la_parents), &tegra210_la, clk_base,
+ CLK_SOURCE_LA, 0);
+ clks[TEGRA210_CLK_LA] = clk;
+
/* emc mux */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
ARRAY_SIZE(mux_pllmcp_clkm), 0,
@@ -3025,7 +3328,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_I2S4, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 },
{ TEGRA210_CLK_HOST1X, TEGRA210_CLK_PLL_P, 136000000, 1 },
{ TEGRA210_CLK_SCLK_MUX, TEGRA210_CLK_PLL_P, 0, 1 },
- { TEGRA210_CLK_SCLK, TEGRA210_CLK_CLK_MAX, 102000000, 1 },
+ { TEGRA210_CLK_SCLK, TEGRA210_CLK_CLK_MAX, 102000000, 0 },
{ TEGRA210_CLK_DFLL_SOC, TEGRA210_CLK_PLL_P, 51000000, 1 },
{ TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
{ TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
@@ -3040,7 +3343,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_XUSB_DEV_SRC, TEGRA210_CLK_PLL_P_OUT_XUSB, 102000000, 0 },
{ TEGRA210_CLK_SATA, TEGRA210_CLK_PLL_P, 104000000, 0 },
{ TEGRA210_CLK_SATA_OOB, TEGRA210_CLK_PLL_P, 204000000, 0 },
- { TEGRA210_CLK_EMC, TEGRA210_CLK_CLK_MAX, 0, 1 },
{ TEGRA210_CLK_MSELECT, TEGRA210_CLK_CLK_MAX, 0, 1 },
{ TEGRA210_CLK_CSITE, TEGRA210_CLK_CLK_MAX, 0, 1 },
/* TODO find a way to enable this on-demand */
@@ -3149,6 +3451,37 @@ static int tegra210_reset_deassert(unsigned long id)
return 0;
}
+static void tegra210_mbist_clk_init(void)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(tegra210_pg_mbist_war); i++) {
+ unsigned int num_clks = tegra210_pg_mbist_war[i].num_clks;
+ struct clk_bulk_data *clk_data;
+
+ if (!num_clks)
+ continue;
+
+ clk_data = kmalloc_array(num_clks, sizeof(*clk_data),
+ GFP_KERNEL);
+ if (WARN_ON(!clk_data))
+ return;
+
+ tegra210_pg_mbist_war[i].clks = clk_data;
+ for (j = 0; j < num_clks; j++) {
+ int clk_id = tegra210_pg_mbist_war[i].clk_init_data[j];
+ struct clk *clk = clks[clk_id];
+
+ if (WARN(IS_ERR(clk), "clk_id: %d\n", clk_id)) {
+ kfree(clk_data);
+ tegra210_pg_mbist_war[i].clks = NULL;
+ break;
+ }
+ clk_data[j].clk = clk;
+ }
+ }
+}
+
/**
* tegra210_clock_init - Tegra210-specific clock initialization
* @np: struct device_node * of the DT node for the SoC CAR IP block
@@ -3183,6 +3516,24 @@ static void __init tegra210_clock_init(struct device_node *np)
return;
}
+ ahub_base = ioremap(TEGRA210_AHUB_BASE, SZ_64K);
+ if (!ahub_base) {
+ pr_err("ioremap tegra210 APE failed\n");
+ return;
+ }
+
+ dispa_base = ioremap(TEGRA210_DISPA_BASE, SZ_256K);
+ if (!dispa_base) {
+ pr_err("ioremap tegra210 DISPA failed\n");
+ return;
+ }
+
+ vic_base = ioremap(TEGRA210_VIC_BASE, SZ_256K);
+ if (!vic_base) {
+ pr_err("ioremap tegra210 VIC failed\n");
+ return;
+ }
+
clks = tegra_clk_init(clk_base, TEGRA210_CLK_CLK_MAX,
TEGRA210_CAR_BANK_COUNT);
if (!clks)
@@ -3219,6 +3570,8 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra_add_of_provider(np);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
+ tegra210_mbist_clk_init();
+
tegra_cpu_car_ops = &tegra210_cpu_car_ops;
}
CLK_OF_DECLARE(tegra210, "nvidia,tegra210-car", tegra210_clock_init);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index bee84c554932..b316dfb6f6c7 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -819,6 +819,7 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
[tegra_clk_pll_a] = { .dt_id = TEGRA30_CLK_PLL_A, .present = true },
[tegra_clk_pll_a_out0] = { .dt_id = TEGRA30_CLK_PLL_A_OUT0, .present = true },
[tegra_clk_cec] = { .dt_id = TEGRA30_CLK_CEC, .present = true },
+ [tegra_clk_emc] = { .dt_id = TEGRA30_CLK_EMC, .present = true },
};
static const char *pll_e_parents[] = { "pll_ref", "pll_p" };
@@ -843,8 +844,7 @@ static void __init tegra30_pll_init(void)
/* PLLM */
clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
- &pll_m_params, NULL);
+ CLK_SET_RATE_GATE, &pll_m_params, NULL);
clks[TEGRA30_CLK_PLL_M] = clk;
/* PLLM_OUT1 */
@@ -852,7 +852,7 @@ static void __init tegra30_pll_init(void)
clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
8, 8, 1, NULL);
clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
- clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ clk_base + PLLM_OUT, 1, 0,
CLK_SET_RATE_PARENT, 0, NULL);
clks[TEGRA30_CLK_PLL_M_OUT1] = clk;
@@ -990,7 +990,7 @@ static void __init tegra30_super_clk_init(void)
/* SCLK */
clk = tegra_clk_register_super_mux("sclk", sclk_parents,
ARRAY_SIZE(sclk_parents),
- CLK_SET_RATE_PARENT,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
clk_base + SCLK_BURST_POLICY,
0, 4, 0, 0, NULL);
clks[TEGRA30_CLK_SCLK] = clk;
@@ -1060,9 +1060,6 @@ static void __init tegra30_periph_clk_init(void)
CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
30, 2, 0, &emc_lock);
- clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
- 57, periph_clk_enb_refcnt);
- clks[TEGRA30_CLK_EMC] = clk;
clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
&emc_lock);
@@ -1252,10 +1249,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_SDMMC1, TEGRA30_CLK_PLL_P, 48000000, 0 },
{ TEGRA30_CLK_SDMMC2, TEGRA30_CLK_PLL_P, 48000000, 0 },
{ TEGRA30_CLK_SDMMC3, TEGRA30_CLK_PLL_P, 48000000, 0 },
- { TEGRA30_CLK_PLL_M, TEGRA30_CLK_CLK_MAX, 0, 1 },
- { TEGRA30_CLK_PCLK, TEGRA30_CLK_CLK_MAX, 0, 1 },
{ TEGRA30_CLK_CSITE, TEGRA30_CLK_CLK_MAX, 0, 1 },
- { TEGRA30_CLK_EMC, TEGRA30_CLK_CLK_MAX, 0, 1 },
{ TEGRA30_CLK_MSELECT, TEGRA30_CLK_CLK_MAX, 0, 1 },
{ TEGRA30_CLK_SBC1, TEGRA30_CLK_PLL_P, 100000000, 0 },
{ TEGRA30_CLK_SBC2, TEGRA30_CLK_PLL_P, 100000000, 0 },
@@ -1272,6 +1266,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0 },
{ TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
{ TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
+ { TEGRA30_CLK_VDE, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
/* must be the last entry */
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 3b2763df51c2..ba7e20e6a82b 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -812,4 +812,11 @@ int tegra_pll_wait_for_lock(struct tegra_clk_pll *pll);
u16 tegra_pll_get_fixed_mdiv(struct clk_hw *hw, unsigned long input_rate);
int tegra_pll_p_div_to_hw(struct tegra_clk_pll *pll, u8 p_div);
+/* Combined read fence with delay */
+#define fence_udelay(delay, reg) \
+ do { \
+ readl(reg); \
+ udelay(delay); \
+ } while (0)
+
#endif /* TEGRA_CLK_H */
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index f4d6802a8544..7d22e1af2247 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -55,6 +55,29 @@ static void clk_memmap_writel(u32 val, const struct clk_omap_reg *reg)
writel_relaxed(val, io->mem + reg->offset);
}
+static void _clk_rmw(u32 val, u32 mask, void __iomem *ptr)
+{
+ u32 v;
+
+ v = readl_relaxed(ptr);
+ v &= ~mask;
+ v |= val;
+ writel_relaxed(v, ptr);
+}
+
+static void clk_memmap_rmw(u32 val, u32 mask, const struct clk_omap_reg *reg)
+{
+ struct clk_iomap *io = clk_memmaps[reg->index];
+
+ if (reg->ptr) {
+ _clk_rmw(val, mask, reg->ptr);
+ } else if (io->regmap) {
+ regmap_update_bits(io->regmap, reg->offset, mask, val);
+ } else {
+ _clk_rmw(val, mask, io->mem + reg->offset);
+ }
+}
+
static u32 clk_memmap_readl(const struct clk_omap_reg *reg)
{
u32 val;
@@ -89,6 +112,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
ti_clk_ll_ops = ops;
ops->clk_readl = clk_memmap_readl;
ops->clk_writel = clk_memmap_writel;
+ ops->clk_rmw = clk_memmap_rmw;
return 0;
}
@@ -251,6 +275,20 @@ int ti_clk_get_reg_addr(struct device_node *node, int index,
return 0;
}
+void ti_clk_latch(struct clk_omap_reg *reg, s8 shift)
+{
+ u32 latch;
+
+ if (shift < 0)
+ return;
+
+ latch = 1 << shift;
+
+ ti_clk_ll_ops->clk_rmw(latch, latch, reg);
+ ti_clk_ll_ops->clk_rmw(0, latch, reg);
+ ti_clk_ll_ops->clk_readl(reg); /* OCP barrier */
+}
+
/**
* omap2_clk_provider_init - init master clock provider
* @parent: master node
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index d9b43bfc2532..b58278077226 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -22,6 +22,7 @@ struct clk_omap_divider {
u8 shift;
u8 width;
u8 flags;
+ s8 latch;
const struct clk_div_table *table;
};
@@ -33,6 +34,7 @@ struct clk_omap_mux {
u32 *table;
u32 mask;
u8 shift;
+ s8 latch;
u8 flags;
};
@@ -74,6 +76,11 @@ enum {
#define CLKF_CORE (1 << 9)
#define CLKF_J_TYPE (1 << 10)
+/* CLKCTRL flags */
+#define CLKF_SW_SUP BIT(5)
+#define CLKF_HW_SUP BIT(6)
+#define CLKF_NO_IDLEST BIT(7)
+
#define CLK(dev, con, ck) \
{ \
.lk = { \
@@ -183,10 +190,6 @@ extern const struct omap_clkctrl_data am438x_clkctrl_data[];
extern const struct omap_clkctrl_data dm814_clkctrl_data[];
extern const struct omap_clkctrl_data dm816_clkctrl_data[];
-#define CLKF_SW_SUP BIT(0)
-#define CLKF_HW_SUP BIT(1)
-#define CLKF_NO_IDLEST BIT(2)
-
typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
@@ -194,6 +197,8 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con);
void ti_clk_add_aliases(void);
+void ti_clk_latch(struct clk_omap_reg *reg, s8 shift);
+
struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup);
int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 77f93f6d2806..aaa277dd6d99 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -263,6 +263,8 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
val |= value << divider->shift;
ti_clk_ll_ops->clk_writel(val, &divider->reg);
+ ti_clk_latch(&divider->reg, divider->latch);
+
return 0;
}
@@ -276,7 +278,8 @@ static struct clk *_register_divider(struct device *dev, const char *name,
const char *parent_name,
unsigned long flags,
struct clk_omap_reg *reg,
- u8 shift, u8 width, u8 clk_divider_flags,
+ u8 shift, u8 width, s8 latch,
+ u8 clk_divider_flags,
const struct clk_div_table *table)
{
struct clk_omap_divider *div;
@@ -305,6 +308,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
memcpy(&div->reg, reg, sizeof(*reg));
div->shift = shift;
div->width = width;
+ div->latch = latch;
div->flags = clk_divider_flags;
div->hw.init = &init;
div->table = table;
@@ -420,6 +424,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
div->table = _get_div_table_from_setup(setup, &div->width);
div->shift = setup->bit_shift;
+ div->latch = -EINVAL;
return &div->hw;
}
@@ -452,7 +457,7 @@ struct clk *ti_clk_register_divider(struct ti_clk *setup)
clk = _register_divider(NULL, setup->name, div->parent,
flags, &reg, div->bit_shift,
- width, div_flags, table);
+ width, -EINVAL, div_flags, table);
if (IS_ERR(clk))
kfree(table);
@@ -556,7 +561,7 @@ static int _get_divider_width(struct device_node *node,
static int __init ti_clk_divider_populate(struct device_node *node,
struct clk_omap_reg *reg, const struct clk_div_table **table,
- u32 *flags, u8 *div_flags, u8 *width, u8 *shift)
+ u32 *flags, u8 *div_flags, u8 *width, u8 *shift, s8 *latch)
{
u32 val;
int ret;
@@ -570,6 +575,13 @@ static int __init ti_clk_divider_populate(struct device_node *node,
else
*shift = 0;
+ if (latch) {
+ if (!of_property_read_u32(node, "ti,latch-bit", &val))
+ *latch = val;
+ else
+ *latch = -EINVAL;
+ }
+
*flags = 0;
*div_flags = 0;
@@ -606,17 +618,18 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
u8 clk_divider_flags = 0;
u8 width = 0;
u8 shift = 0;
+ s8 latch = -EINVAL;
const struct clk_div_table *table = NULL;
u32 flags = 0;
parent_name = of_clk_get_parent_name(node, 0);
if (ti_clk_divider_populate(node, &reg, &table, &flags,
- &clk_divider_flags, &width, &shift))
+ &clk_divider_flags, &width, &shift, &latch))
goto cleanup;
clk = _register_divider(NULL, node->name, parent_name, flags, &reg,
- shift, width, clk_divider_flags, table);
+ shift, width, latch, clk_divider_flags, table);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
@@ -639,7 +652,8 @@ static void __init of_ti_composite_divider_clk_setup(struct device_node *node)
return;
if (ti_clk_divider_populate(node, &div->reg, &div->table, &val,
- &div->flags, &div->width, &div->shift) < 0)
+ &div->flags, &div->width, &div->shift,
+ NULL) < 0)
goto cleanup;
if (!ti_clk_add_component(node, &div->hw, CLK_COMPONENT_TYPE_DIVIDER))
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index d4705803f3d3..69a4308a5a98 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -86,6 +86,7 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
}
val |= index << mux->shift;
ti_clk_ll_ops->clk_writel(val, &mux->reg);
+ ti_clk_latch(&mux->reg, mux->latch);
return 0;
}
@@ -100,7 +101,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
const char * const *parent_names,
u8 num_parents, unsigned long flags,
struct clk_omap_reg *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table)
+ s8 latch, u8 clk_mux_flags, u32 *table)
{
struct clk_omap_mux *mux;
struct clk *clk;
@@ -121,6 +122,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
memcpy(&mux->reg, reg, sizeof(*reg));
mux->shift = shift;
mux->mask = mask;
+ mux->latch = latch;
mux->flags = clk_mux_flags;
mux->table = table;
mux->hw.init = &init;
@@ -160,7 +162,7 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup)
flags |= CLK_SET_RATE_PARENT;
return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
- flags, &reg, mux->bit_shift, mask,
+ flags, &reg, mux->bit_shift, mask, -EINVAL,
mux_flags, NULL);
}
@@ -179,6 +181,7 @@ static void of_mux_clk_setup(struct device_node *node)
u8 clk_mux_flags = 0;
u32 mask = 0;
u32 shift = 0;
+ s32 latch = -EINVAL;
u32 flags = CLK_SET_RATE_NO_REPARENT;
num_parents = of_clk_get_parent_count(node);
@@ -197,6 +200,8 @@ static void of_mux_clk_setup(struct device_node *node)
of_property_read_u32(node, "ti,bit-shift", &shift);
+ of_property_read_u32(node, "ti,latch-bit", &latch);
+
if (of_property_read_bool(node, "ti,index-starts-at-one"))
clk_mux_flags |= CLK_MUX_INDEX_ONE;
@@ -211,7 +216,8 @@ static void of_mux_clk_setup(struct device_node *node)
mask = (1 << fls(mask)) - 1;
clk = _register_mux(NULL, node->name, parent_names, num_parents,
- flags, &reg, shift, mask, clk_mux_flags, NULL);
+ flags, &reg, shift, mask, latch, clk_mux_flags,
+ NULL);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
@@ -234,6 +240,7 @@ struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup)
return ERR_PTR(-ENOMEM);
mux->shift = setup->bit_shift;
+ mux->latch = -EINVAL;
mux->reg.index = setup->module;
mux->reg.offset = setup->reg;
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index d244e724e198..ebc78ab2df05 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -57,6 +57,14 @@
#define UNIPHIER_PRO4_SYS_CLK_USB3(idx, ch) \
UNIPHIER_CLK_GATE("usb3" #ch, (idx), NULL, 0x2104, 16 + (ch))
+#define UNIPHIER_PRO4_SYS_CLK_AIO(idx) \
+ UNIPHIER_CLK_FACTOR("aio-io200m", -1, "spll", 1, 8), \
+ UNIPHIER_CLK_GATE("aio", (idx), "aio-io200m", 0x2104, 13)
+
+#define UNIPHIER_PRO5_SYS_CLK_AIO(idx) \
+ UNIPHIER_CLK_FACTOR("aio-io200m", -1, "spll", 1, 12), \
+ UNIPHIER_CLK_GATE("aio", (idx), "aio-io200m", 0x2104, 13)
+
#define UNIPHIER_LD11_SYS_CLK_AIO(idx) \
UNIPHIER_CLK_FACTOR("aio-io200m", -1, "spll", 1, 10), \
UNIPHIER_CLK_GATE("aio", (idx), "aio-io200m", 0x2108, 0)
@@ -94,16 +102,22 @@ const struct uniphier_clk_data uniphier_pro4_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("upll", -1, "ref", 288, 25), /* 288 MHz */
UNIPHIER_CLK_FACTOR("a2pll", -1, "upll", 256, 125), /* 589.824 MHz */
UNIPHIER_CLK_FACTOR("vpll27a", -1, "ref", 270, 25), /* 270 MHz */
+ UNIPHIER_CLK_FACTOR("gpll", -1, "ref", 10, 1), /* 250 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "a2pll", 1, 8),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 32),
UNIPHIER_LD4_SYS_CLK_NAND(2),
UNIPHIER_LD4_SYS_CLK_SD,
UNIPHIER_CLK_FACTOR("usb2", -1, "upll", 1, 12),
UNIPHIER_PRO4_SYS_CLK_ETHER(6),
+ UNIPHIER_CLK_GATE("ether-gb", 7, "gpll", 0x2104, 5),
UNIPHIER_LD4_SYS_CLK_STDMAC(8), /* HSC, MIO, RLE */
+ UNIPHIER_CLK_GATE("ether-phy", 10, "ref", 0x2260, 0),
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* Ether, SATA, USB3 */
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
+ UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x2104, 18),
+ UNIPHIER_CLK_GATE("sata1", 29, NULL, 0x2104, 19),
+ UNIPHIER_PRO4_SYS_CLK_AIO(40),
{ /* sentinel */ }
};
@@ -132,6 +146,8 @@ const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* PCIe, USB3 */
UNIPHIER_PRO4_SYS_CLK_USB3(14, 0),
UNIPHIER_PRO4_SYS_CLK_USB3(15, 1),
+ UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x2108, 2),
+ UNIPHIER_PRO5_SYS_CLK_AIO(40),
{ /* sentinel */ }
};
@@ -149,6 +165,8 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
/* The document mentions 0x2104 bit 18, but not functional */
UNIPHIER_CLK_GATE("usb30-phy", 16, NULL, 0x2104, 19),
UNIPHIER_CLK_GATE("usb31-phy", 20, NULL, 0x2104, 20),
+ UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x2104, 22),
+ UNIPHIER_PRO5_SYS_CLK_AIO(40),
{ /* sentinel */ }
};
@@ -205,6 +223,7 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
UNIPHIER_CLK_GATE("usb30", 14, NULL, 0x210c, 14),
UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 12),
UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 13),
+ UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x210c, 4),
UNIPHIER_LD11_SYS_CLK_AIO(40),
UNIPHIER_LD11_SYS_CLK_EVEA(41),
UNIPHIER_LD11_SYS_CLK_EXIV(42),
@@ -233,6 +252,8 @@ const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = {
UNIPHIER_LD20_SYS_CLK_SD,
UNIPHIER_LD11_SYS_CLK_NAND(2),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
+ UNIPHIER_CLK_GATE("ether0", 6, NULL, 0x210c, 9),
+ UNIPHIER_CLK_GATE("ether1", 7, NULL, 0x210c, 10),
UNIPHIER_CLK_GATE("usb30", 12, NULL, 0x210c, 4), /* =GIO0 */
UNIPHIER_CLK_GATE("usb31-0", 13, NULL, 0x210c, 5), /* =GIO1 */
UNIPHIER_CLK_GATE("usb31-1", 14, NULL, 0x210c, 6), /* =GIO1-1 */
@@ -241,6 +262,10 @@ const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = {
UNIPHIER_CLK_GATE("usb30-phy2", 18, NULL, 0x210c, 20),
UNIPHIER_CLK_GATE("usb31-phy0", 20, NULL, 0x210c, 17),
UNIPHIER_CLK_GATE("usb31-phy1", 21, NULL, 0x210c, 19),
+ UNIPHIER_CLK_GATE("pcie", 24, NULL, 0x210c, 3),
+ UNIPHIER_CLK_GATE("sata0", 28, NULL, 0x210c, 7),
+ UNIPHIER_CLK_GATE("sata1", 29, NULL, 0x210c, 8),
+ UNIPHIER_CLK_GATE("sata-phy", 30, NULL, 0x210c, 21),
/* CPU gears */
UNIPHIER_CLK_DIV4("cpll", 2, 3, 4, 8),
UNIPHIER_CLK_DIV4("spll", 2, 3, 4, 8),
diff --git a/drivers/clk/ux500/Makefile b/drivers/clk/ux500/Makefile
index fedc083dc8be..53fd29002401 100644
--- a/drivers/clk/ux500/Makefile
+++ b/drivers/clk/ux500/Makefile
@@ -10,8 +10,6 @@ obj-y += clk-sysctrl.o
# Clock definitions
obj-y += u8500_of_clk.o
-obj-y += u9540_clk.o
-obj-y += u8540_clk.o
# ABX500 clock driver
obj-y += abx500-clk.o
diff --git a/drivers/clk/ux500/abx500-clk.c b/drivers/clk/ux500/abx500-clk.c
index 2257d12ba988..5a86cd8fe5de 100644
--- a/drivers/clk/ux500/abx500-clk.c
+++ b/drivers/clk/ux500/abx500-clk.c
@@ -88,18 +88,6 @@ static int ab8500_reg_clks(struct device *dev)
return 0;
}
-/* Clock definitions for ab8540 */
-static int ab8540_reg_clks(struct device *dev)
-{
- return 0;
-}
-
-/* Clock definitions for ab9540 */
-static int ab9540_reg_clks(struct device *dev)
-{
- return 0;
-}
-
static int abx500_clk_probe(struct platform_device *pdev)
{
struct ab8500 *parent = dev_get_drvdata(pdev->dev.parent);
@@ -107,10 +95,6 @@ static int abx500_clk_probe(struct platform_device *pdev)
if (is_ab8500(parent) || is_ab8505(parent)) {
ret = ab8500_reg_clks(&pdev->dev);
- } else if (is_ab8540(parent)) {
- ret = ab8540_reg_clks(&pdev->dev);
- } else if (is_ab9540(parent)) {
- ret = ab9540_reg_clks(&pdev->dev);
} else {
dev_err(&pdev->dev, "non supported plf id\n");
return -ENODEV;
diff --git a/drivers/clk/ux500/u8540_clk.c b/drivers/clk/ux500/u8540_clk.c
deleted file mode 100644
index 133859f0e2bf..000000000000
--- a/drivers/clk/ux500/u8540_clk.c
+++ /dev/null
@@ -1,597 +0,0 @@
-/*
- * Clock definitions for u8540 platform.
- *
- * Copyright (C) 2012 ST-Ericsson SA
- * Author: Ulf Hansson <ulf.hansson@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/mfd/dbx500-prcmu.h>
-#include "clk.h"
-
-/* CLKRST4 is missing making it hard to index things */
-enum clkrst_index {
- CLKRST1_INDEX = 0,
- CLKRST2_INDEX,
- CLKRST3_INDEX,
- CLKRST5_INDEX,
- CLKRST6_INDEX,
- CLKRST_MAX,
-};
-
-static void u8540_clk_init(struct device_node *np)
-{
- struct clk *clk;
- u32 bases[CLKRST_MAX];
- int i;
-
- for (i = 0; i < ARRAY_SIZE(bases); i++) {
- struct resource r;
-
- if (of_address_to_resource(np, i, &r))
- /* Not much choice but to continue */
- pr_err("failed to get CLKRST %d base address\n",
- i + 1);
- bases[i] = r.start;
- }
-
- /* Clock sources. */
- /* Fixed ClockGen */
- clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
- CLK_IGNORE_UNUSED);
- clk_register_clkdev(clk, "soc0_pll", NULL);
-
- clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1,
- CLK_IGNORE_UNUSED);
- clk_register_clkdev(clk, "soc1_pll", NULL);
-
- clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR,
- CLK_IGNORE_UNUSED);
- clk_register_clkdev(clk, "ddr_pll", NULL);
-
- clk = clk_register_fixed_rate(NULL, "rtc32k", NULL,
- CLK_IGNORE_UNUSED,
- 32768);
- clk_register_clkdev(clk, "clk32k", NULL);
- clk_register_clkdev(clk, "apb_pclk", "rtc-pl031");
-
- clk = clk_register_fixed_rate(NULL, "ulp38m4", NULL,
- CLK_IGNORE_UNUSED,
- 38400000);
-
- clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, 0);
- clk_register_clkdev(clk, NULL, "UART");
-
- /* msp02clk needs a abx500 clk as parent. Handle by abx500 clk driver */
- clk = clk_reg_prcmu_gate("msp02clk", "ab9540_sysclk12_b1",
- PRCMU_MSP02CLK, 0);
- clk_register_clkdev(clk, NULL, "MSP02");
-
- clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, 0);
- clk_register_clkdev(clk, NULL, "MSP1");
-
- clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, 0);
- clk_register_clkdev(clk, NULL, "I2C");
-
- clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, 0);
- clk_register_clkdev(clk, NULL, "slim");
-
- clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, 0);
- clk_register_clkdev(clk, NULL, "PERIPH1");
-
- clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, 0);
- clk_register_clkdev(clk, NULL, "PERIPH2");
-
- clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, 0);
- clk_register_clkdev(clk, NULL, "PERIPH3");
-
- clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, 0);
- clk_register_clkdev(clk, NULL, "PERIPH5");
-
- clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, 0);
- clk_register_clkdev(clk, NULL, "PERIPH6");
-
- clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, 0);
- clk_register_clkdev(clk, NULL, "PERIPH7");
-
- clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0,
- CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "lcd");
- clk_register_clkdev(clk, "lcd", "mcde");
-
- clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, 0);
- clk_register_clkdev(clk, NULL, "bml");
-
- clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0,
- CLK_SET_RATE_GATE);
-
- clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0,
- CLK_SET_RATE_GATE);
-
- clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0,
- CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "hdmi");
- clk_register_clkdev(clk, "hdmi", "mcde");
-
- clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, 0);
- clk_register_clkdev(clk, NULL, "apeat");
-
- clk = clk_reg_prcmu_gate("apetraceclk", NULL, PRCMU_APETRACECLK, 0);
- clk_register_clkdev(clk, NULL, "apetrace");
-
- clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, 0);
- clk_register_clkdev(clk, NULL, "mcde");
- clk_register_clkdev(clk, "mcde", "mcde");
- clk_register_clkdev(clk, NULL, "dsilink.0");
- clk_register_clkdev(clk, NULL, "dsilink.1");
- clk_register_clkdev(clk, NULL, "dsilink.2");
-
- clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK, 0);
- clk_register_clkdev(clk, NULL, "ipi2");
-
- clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK, 0);
- clk_register_clkdev(clk, NULL, "dsialt");
-
- clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, 0);
- clk_register_clkdev(clk, NULL, "dma40.0");
-
- clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, 0);
- clk_register_clkdev(clk, NULL, "b2r2");
- clk_register_clkdev(clk, NULL, "b2r2_core");
- clk_register_clkdev(clk, NULL, "U8500-B2R2.0");
- clk_register_clkdev(clk, NULL, "b2r2_1_core");
-
- clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0,
- CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "tv");
- clk_register_clkdev(clk, "tv", "mcde");
-
- clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, 0);
- clk_register_clkdev(clk, NULL, "SSP");
-
- clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, 0);
- clk_register_clkdev(clk, NULL, "rngclk");
-
- clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, 0);
- clk_register_clkdev(clk, NULL, "uicc");
-
- clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, 0);
- clk_register_clkdev(clk, NULL, "mtu0");
- clk_register_clkdev(clk, NULL, "mtu1");
-
- clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL,
- PRCMU_SDMMCCLK, 100000000,
- CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdmmc");
-
- clk = clk_reg_prcmu_opp_volt_scalable("sdmmchclk", NULL,
- PRCMU_SDMMCHCLK, 400000000,
- CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdmmchclk");
-
- clk = clk_reg_prcmu_gate("hvaclk", NULL, PRCMU_HVACLK, 0);
- clk_register_clkdev(clk, NULL, "hva");
-
- clk = clk_reg_prcmu_gate("g1clk", NULL, PRCMU_G1CLK, 0);
- clk_register_clkdev(clk, NULL, "g1");
-
- clk = clk_reg_prcmu_scalable("spare1clk", NULL, PRCMU_SPARE1CLK, 0,
- CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsilcd", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk",
- PRCMU_PLLDSI, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs2", "mcde");
- clk_register_clkdev(clk, "hs_clk", "dsilink.2");
-
- clk = clk_reg_prcmu_scalable("dsilcd_pll", "spare1clk",
- PRCMU_PLLDSI_LCD, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsilcd_pll", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi0clk", "dsi_pll",
- PRCMU_DSI0CLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs0", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi0lcdclk", "dsilcd_pll",
- PRCMU_DSI0CLK_LCD, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs0", "mcde");
- clk_register_clkdev(clk, "hs_clk", "dsilink.0");
-
- clk = clk_reg_prcmu_scalable("dsi1clk", "dsi_pll",
- PRCMU_DSI1CLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs1", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi1lcdclk", "dsilcd_pll",
- PRCMU_DSI1CLK_LCD, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "dsihs1", "mcde");
- clk_register_clkdev(clk, "hs_clk", "dsilink.1");
-
- clk = clk_reg_prcmu_scalable("dsi0escclk", "tvclk",
- PRCMU_DSI0ESCCLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "lp_clk", "dsilink.0");
- clk_register_clkdev(clk, "dsilp0", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi1escclk", "tvclk",
- PRCMU_DSI1ESCCLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "lp_clk", "dsilink.1");
- clk_register_clkdev(clk, "dsilp1", "mcde");
-
- clk = clk_reg_prcmu_scalable("dsi2escclk", "tvclk",
- PRCMU_DSI2ESCCLK, 0, CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, "lp_clk", "dsilink.2");
- clk_register_clkdev(clk, "dsilp2", "mcde");
-
- clk = clk_reg_prcmu_scalable_rate("armss", NULL,
- PRCMU_ARMSS, 0, CLK_IGNORE_UNUSED);
- clk_register_clkdev(clk, "armss", NULL);
-
- clk = clk_register_fixed_factor(NULL, "smp_twd", "armss",
- CLK_IGNORE_UNUSED, 1, 2);
- clk_register_clkdev(clk, NULL, "smp_twd");
-
- /* PRCC P-clocks */
- /* Peripheral 1 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", bases[CLKRST1_INDEX],
- BIT(0), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart0");
-
- clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", bases[CLKRST1_INDEX],
- BIT(1), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart1");
-
- clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", bases[CLKRST1_INDEX],
- BIT(2), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
-
- clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", bases[CLKRST1_INDEX],
- BIT(3), 0);
- clk_register_clkdev(clk, "apb_pclk", "msp0");
- clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.0");
-
- clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", bases[CLKRST1_INDEX],
- BIT(4), 0);
- clk_register_clkdev(clk, "apb_pclk", "msp1");
- clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.1");
-
- clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", bases[CLKRST1_INDEX],
- BIT(5), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi0");
-
- clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", bases[CLKRST1_INDEX],
- BIT(6), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
-
- clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", bases[CLKRST1_INDEX],
- BIT(7), 0);
- clk_register_clkdev(clk, NULL, "spi3");
-
- clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", bases[CLKRST1_INDEX],
- BIT(8), 0);
- clk_register_clkdev(clk, "apb_pclk", "slimbus0");
-
- clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", bases[CLKRST1_INDEX],
- BIT(9), 0);
- clk_register_clkdev(clk, NULL, "gpio.0");
- clk_register_clkdev(clk, NULL, "gpio.1");
- clk_register_clkdev(clk, NULL, "gpioblock0");
- clk_register_clkdev(clk, "apb_pclk", "ab85xx-codec.0");
-
- clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", bases[CLKRST1_INDEX],
- BIT(10), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
-
- clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", bases[CLKRST1_INDEX],
- BIT(11), 0);
- clk_register_clkdev(clk, "apb_pclk", "msp3");
- clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.3");
-
- /* Peripheral 2 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", bases[CLKRST2_INDEX],
- BIT(0), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
-
- clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", bases[CLKRST2_INDEX],
- BIT(1), 0);
- clk_register_clkdev(clk, NULL, "spi2");
-
- clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", bases[CLKRST2_INDEX],
- BIT(2), 0);
- clk_register_clkdev(clk, NULL, "spi1");
-
- clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", bases[CLKRST2_INDEX],
- BIT(3), 0);
- clk_register_clkdev(clk, NULL, "pwl");
-
- clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", bases[CLKRST2_INDEX],
- BIT(4), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi4");
-
- clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", bases[CLKRST2_INDEX],
- BIT(5), 0);
- clk_register_clkdev(clk, "apb_pclk", "msp2");
- clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.2");
-
- clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", bases[CLKRST2_INDEX],
- BIT(6), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi1");
-
- clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", bases[CLKRST2_INDEX],
- BIT(7), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi3");
-
- clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", bases[CLKRST2_INDEX],
- BIT(8), 0);
- clk_register_clkdev(clk, NULL, "spi0");
-
- clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", bases[CLKRST2_INDEX],
- BIT(9), 0);
- clk_register_clkdev(clk, "hsir_hclk", "ste_hsi.0");
-
- clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", bases[CLKRST2_INDEX],
- BIT(10), 0);
- clk_register_clkdev(clk, "hsit_hclk", "ste_hsi.0");
-
- clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", bases[CLKRST2_INDEX],
- BIT(11), 0);
- clk_register_clkdev(clk, NULL, "gpio.6");
- clk_register_clkdev(clk, NULL, "gpio.7");
- clk_register_clkdev(clk, NULL, "gpioblock1");
-
- clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", bases[CLKRST2_INDEX],
- BIT(12), 0);
- clk_register_clkdev(clk, "msp4-pclk", "ab85xx-codec.0");
-
- /* Peripheral 3 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", bases[CLKRST3_INDEX],
- BIT(0), 0);
- clk_register_clkdev(clk, NULL, "fsmc");
-
- clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", bases[CLKRST3_INDEX],
- BIT(1), 0);
- clk_register_clkdev(clk, "apb_pclk", "ssp0");
-
- clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", bases[CLKRST3_INDEX],
- BIT(2), 0);
- clk_register_clkdev(clk, "apb_pclk", "ssp1");
-
- clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", bases[CLKRST3_INDEX],
- BIT(3), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
-
- clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", bases[CLKRST3_INDEX],
- BIT(4), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi2");
-
- clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", bases[CLKRST3_INDEX],
- BIT(5), 0);
- clk_register_clkdev(clk, "apb_pclk", "ske");
- clk_register_clkdev(clk, "apb_pclk", "nmk-ske-keypad");
-
- clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", bases[CLKRST3_INDEX],
- BIT(6), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart2");
-
- clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", bases[CLKRST3_INDEX],
- BIT(7), 0);
- clk_register_clkdev(clk, "apb_pclk", "sdi5");
-
- clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", bases[CLKRST3_INDEX],
- BIT(8), 0);
- clk_register_clkdev(clk, NULL, "gpio.2");
- clk_register_clkdev(clk, NULL, "gpio.3");
- clk_register_clkdev(clk, NULL, "gpio.4");
- clk_register_clkdev(clk, NULL, "gpio.5");
- clk_register_clkdev(clk, NULL, "gpioblock2");
-
- clk = clk_reg_prcc_pclk("p3_pclk9", "per3clk", bases[CLKRST3_INDEX],
- BIT(9), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.5");
-
- clk = clk_reg_prcc_pclk("p3_pclk10", "per3clk", bases[CLKRST3_INDEX],
- BIT(10), 0);
- clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.6");
-
- clk = clk_reg_prcc_pclk("p3_pclk11", "per3clk", bases[CLKRST3_INDEX],
- BIT(11), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart3");
-
- clk = clk_reg_prcc_pclk("p3_pclk12", "per3clk", bases[CLKRST3_INDEX],
- BIT(12), 0);
- clk_register_clkdev(clk, "apb_pclk", "uart4");
-
- /* Peripheral 5 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", bases[CLKRST5_INDEX],
- BIT(0), 0);
- clk_register_clkdev(clk, "usb", "musb-ux500.0");
- clk_register_clkdev(clk, "usbclk", "ab-iddet.0");
-
- clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", bases[CLKRST5_INDEX],
- BIT(1), 0);
- clk_register_clkdev(clk, NULL, "gpio.8");
- clk_register_clkdev(clk, NULL, "gpioblock3");
-
- /* Peripheral 6 : PRCC P-clocks */
- clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", bases[CLKRST6_INDEX],
- BIT(0), 0);
- clk_register_clkdev(clk, "apb_pclk", "rng");
-
- clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", bases[CLKRST6_INDEX],
- BIT(1), 0);
- clk_register_clkdev(clk, NULL, "cryp0");
- clk_register_clkdev(clk, NULL, "cryp1");
-
- clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", bases[CLKRST6_INDEX],
- BIT(2), 0);
- clk_register_clkdev(clk, NULL, "hash0");
-
- clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", bases[CLKRST6_INDEX],
- BIT(3), 0);
- clk_register_clkdev(clk, NULL, "pka");
-
- clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", bases[CLKRST6_INDEX],
- BIT(4), 0);
- clk_register_clkdev(clk, NULL, "db8540-hash1");
-
- clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", bases[CLKRST6_INDEX],
- BIT(5), 0);
- clk_register_clkdev(clk, NULL, "cfgreg");
-
- clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", bases[CLKRST6_INDEX],
- BIT(6), 0);
- clk_register_clkdev(clk, "apb_pclk", "mtu0");
-
- clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", bases[CLKRST6_INDEX],
- BIT(7), 0);
- clk_register_clkdev(clk, "apb_pclk", "mtu1");
-
- /*
- * PRCC K-clocks ==> see table PRCC_PCKEN/PRCC_KCKEN
- * This differs from the internal implementation:
- * We don't use the PERPIH[n| clock as parent, since those _should_
- * only be used as parents for the P-clocks.
- * TODO: "parentjoin" with corresponding P-clocks for all K-clocks.
- */
-
- /* Peripheral 1 : PRCC K-clocks */
- clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk",
- bases[CLKRST1_INDEX], BIT(0), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart0");
-
- clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk",
- bases[CLKRST1_INDEX], BIT(1), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart1");
-
- clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
- bases[CLKRST1_INDEX], BIT(2), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.1");
-
- clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
- bases[CLKRST1_INDEX], BIT(3), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp0");
- clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.0");
-
- clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
- bases[CLKRST1_INDEX], BIT(4), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp1");
- clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.1");
-
- clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmchclk",
- bases[CLKRST1_INDEX], BIT(5), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi0");
-
- clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
- bases[CLKRST1_INDEX], BIT(6), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.2");
-
- clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
- bases[CLKRST1_INDEX], BIT(8), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "slimbus0");
-
- clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
- bases[CLKRST1_INDEX], BIT(9), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.4");
-
- clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
- bases[CLKRST1_INDEX], BIT(10), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp3");
- clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.3");
-
- /* Peripheral 2 : PRCC K-clocks */
- clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
- bases[CLKRST2_INDEX], BIT(0), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.3");
-
- clk = clk_reg_prcc_kclk("p2_pwl_kclk", "rtc32k",
- bases[CLKRST2_INDEX], BIT(1), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "pwl");
-
- clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmchclk",
- bases[CLKRST2_INDEX], BIT(2), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi4");
-
- clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
- bases[CLKRST2_INDEX], BIT(3), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp2");
- clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.2");
-
- clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmchclk",
- bases[CLKRST2_INDEX], BIT(4), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi1");
-
- clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk",
- bases[CLKRST2_INDEX], BIT(5), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi3");
-
- clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk",
- bases[CLKRST2_INDEX], BIT(6),
- CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
- clk_register_clkdev(clk, "hsir_hsirxclk", "ste_hsi.0");
-
- clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk",
- bases[CLKRST2_INDEX], BIT(7),
- CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
- clk_register_clkdev(clk, "hsit_hsitxclk", "ste_hsi.0");
-
- /* Should only be 9540, but might be added for 85xx as well */
- clk = clk_reg_prcc_kclk("p2_msp4_kclk", "msp02clk",
- bases[CLKRST2_INDEX], BIT(9), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "msp4");
- clk_register_clkdev(clk, "msp4", "ab85xx-codec.0");
-
- /* Peripheral 3 : PRCC K-clocks */
- clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
- bases[CLKRST3_INDEX], BIT(1), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "ssp0");
-
- clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
- bases[CLKRST3_INDEX], BIT(2), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "ssp1");
-
- clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
- bases[CLKRST3_INDEX], BIT(3), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.0");
-
- clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmchclk",
- bases[CLKRST3_INDEX], BIT(4), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi2");
-
- clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k",
- bases[CLKRST3_INDEX], BIT(5), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "ske");
- clk_register_clkdev(clk, NULL, "nmk-ske-keypad");
-
- clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk",
- bases[CLKRST3_INDEX], BIT(6), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart2");
-
- clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk",
- bases[CLKRST3_INDEX], BIT(7), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "sdi5");
-
- clk = clk_reg_prcc_kclk("p3_i2c5_kclk", "i2cclk",
- bases[CLKRST3_INDEX], BIT(8), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.5");
-
- clk = clk_reg_prcc_kclk("p3_i2c6_kclk", "i2cclk",
- bases[CLKRST3_INDEX], BIT(9), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "nmk-i2c.6");
-
- clk = clk_reg_prcc_kclk("p3_uart3_kclk", "uartclk",
- bases[CLKRST3_INDEX], BIT(10), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart3");
-
- clk = clk_reg_prcc_kclk("p3_uart4_kclk", "uartclk",
- bases[CLKRST3_INDEX], BIT(11), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "uart4");
-
- /* Peripheral 6 : PRCC K-clocks */
- clk = clk_reg_prcc_kclk("p6_rng_kclk", "rngclk",
- bases[CLKRST6_INDEX], BIT(0), CLK_SET_RATE_GATE);
- clk_register_clkdev(clk, NULL, "rng");
-}
-CLK_OF_DECLARE(u8540_clks, "stericsson,u8540-clks", u8540_clk_init);
diff --git a/drivers/clk/ux500/u9540_clk.c b/drivers/clk/ux500/u9540_clk.c
deleted file mode 100644
index 7b6bca49ce42..000000000000
--- a/drivers/clk/ux500/u9540_clk.c
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Clock definitions for u9540 platform.
- *
- * Copyright (C) 2012 ST-Ericsson SA
- * Author: Ulf Hansson <ulf.hansson@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/clk-provider.h>
-#include <linux/mfd/dbx500-prcmu.h>
-#include "clk.h"
-
-static void u9540_clk_init(struct device_node *np)
-{
- /* register clocks here */
-}
-CLK_OF_DECLARE(u9540_clks, "stericsson,u9540-clks", u9540_clk_init);
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index e7a868b83fe5..dd08ecb498be 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -44,10 +44,10 @@ static long vexpress_osc_round_rate(struct clk_hw *hw, unsigned long rate,
{
struct vexpress_osc *osc = to_vexpress_osc(hw);
- if (WARN_ON(osc->rate_min && rate < osc->rate_min))
+ if (osc->rate_min && rate < osc->rate_min)
rate = osc->rate_min;
- if (WARN_ON(osc->rate_max && rate > osc->rate_max))
+ if (osc->rate_max && rate > osc->rate_max)
rate = osc->rate_max;
return rate;
@@ -104,6 +104,7 @@ static int vexpress_osc_probe(struct platform_device *pdev)
return PTR_ERR(clk);
of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get, clk);
+ clk_hw_set_rate_range(&osc->hw, osc->rate_min, osc->rate_max);
dev_dbg(&pdev->dev, "Registered clock '%s'\n", init.name);
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index c6ebc88a7d8d..72a2975499db 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -202,6 +202,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
cur_frequency = clk_get_rate(clk);
if (!cur_frequency) {
dev_err(cpu_dev, "Failed to get clock rate for CPU\n");
+ clk_put(clk);
return -EINVAL;
}
@@ -210,6 +211,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
return -EINVAL;
armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
+ clk_put(clk);
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
load_lvl++) {
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 8300a9fcb80c..bc5fc1630876 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -162,14 +162,23 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu->perf_caps.highest_perf;
policy->cpuinfo.max_freq = cppc_dmi_max_khz;
- policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
policy->transition_delay_us = cppc_get_transition_latency(cpu_num) /
NSEC_PER_USEC;
policy->shared_type = cpu->shared_type;
- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
+ int i;
+
cpumask_copy(policy->cpus, cpu->shared_cpu_map);
- else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
+
+ for_each_cpu(i, policy->cpus) {
+ if (unlikely(i == policy->cpu))
+ continue;
+
+ memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
+ sizeof(cpu->perf_caps));
+ }
+ } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
/* Support only SW_ANY for now. */
pr_debug("Unsupported CPU co-ord type\n");
return -EFAULT;
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 10e119ae66dd..3a8cc99e6815 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -352,20 +352,6 @@ static int set_freq_table_sorted(struct cpufreq_policy *policy)
return 0;
}
-int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table)
-{
- int ret;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, table);
- if (ret)
- return ret;
-
- policy->freq_table = table;
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
-
int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
{
int ret;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6d084c61ee25..17e566afbb41 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -26,7 +26,6 @@
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/fs.h>
-#include <linux/debugfs.h>
#include <linux/acpi.h>
#include <linux/vmalloc.h>
#include <trace/events/power.h>
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 959a1dbe3835..b4dbc77459b6 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -159,13 +159,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
priv->domain_id = handle->perf_ops->device_domain_id(cpu_dev);
policy->driver_data = priv;
-
- ret = cpufreq_table_validate_and_show(policy, freq_table);
- if (ret) {
- dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
- ret);
- goto out_free_cpufreq_table;
- }
+ policy->freq_table = freq_table;
/* SCMI allows DVFS request for any domain from any CPU */
policy->dvfs_possible_from_any_cpu = true;
@@ -179,8 +173,6 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = true;
return 0;
-out_free_cpufreq_table:
- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_priv:
kfree(priv);
out_free_opp:
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index a099b7bf74cd..6ba709b6f095 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -304,7 +304,7 @@ static struct platform_driver ti_cpufreq_driver = {
.name = "ti-cpufreq",
},
};
-module_platform_driver(ti_cpufreq_driver);
+builtin_platform_driver(ti_cpufreq_driver);
MODULE_DESCRIPTION("TI CPUFreq/OPP hw-supported driver");
MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0003e9a02637..6df894d65d9e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -272,12 +272,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
*
* @drv: the cpuidle driver
* @dev: the cpuidle device
+ * @stop_tick: indication on whether or not to stop the tick
*
* Returns the index of the idle state. The return value must not be negative.
+ *
+ * The memory location pointed to by @stop_tick is expected to be written the
+ * 'false' boolean value if the scheduler tick should not be stopped before
+ * entering the returned state.
*/
-int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ bool *stop_tick)
{
- return cpuidle_curr_governor->select(drv, dev);
+ return cpuidle_curr_governor->select(drv, dev, stop_tick);
}
/**
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 1ad8745fd6d6..b24883f85c99 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -63,9 +63,10 @@ static inline void ladder_do_selection(struct ladder_device *ldev,
* ladder_select_state - selects the next state to enter
* @drv: cpuidle driver
* @dev: the CPU
+ * @dummy: not used
*/
static int ladder_select_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev, bool *dummy)
{
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct device *device = get_cpu_device(dev->cpu);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index aa390404e85f..1bfe03ceb236 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -123,6 +123,7 @@
struct menu_device {
int last_state_idx;
int needs_update;
+ int tick_wakeup;
unsigned int next_timer_us;
unsigned int predicted_us;
@@ -279,8 +280,10 @@ again:
* menu_select - selects the next idle state to enter
* @drv: cpuidle driver containing state data
* @dev: the CPU
+ * @stop_tick: indication on whether or not to stop the tick
*/
-static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ bool *stop_tick)
{
struct menu_device *data = this_cpu_ptr(&menu_devices);
struct device *device = get_cpu_device(dev->cpu);
@@ -292,6 +295,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
unsigned int expected_interval;
unsigned long nr_iowaiters, cpu_load;
int resume_latency = dev_pm_qos_raw_read_value(device);
+ ktime_t delta_next;
if (data->needs_update) {
menu_update(drv, dev);
@@ -303,11 +307,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
latency_req = resume_latency;
/* Special case when user has set very strict latency requirement */
- if (unlikely(latency_req == 0))
+ if (unlikely(latency_req == 0)) {
+ *stop_tick = false;
return 0;
+ }
/* determine the expected residency time, round up */
- data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length());
+ data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
get_iowait_load(&nr_iowaiters, &cpu_load);
data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
@@ -346,14 +352,30 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
*/
data->predicted_us = min(data->predicted_us, expected_interval);
- /*
- * Use the performance multiplier and the user-configurable
- * latency_req to determine the maximum exit latency.
- */
- interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
- if (latency_req > interactivity_req)
- latency_req = interactivity_req;
+ if (tick_nohz_tick_stopped()) {
+ /*
+ * If the tick is already stopped, the cost of possible short
+ * idle duration misprediction is much higher, because the CPU
+ * may be stuck in a shallow idle state for a long time as a
+ * result of it. In that case say we might mispredict and try
+ * to force the CPU into a state for which we would have stopped
+ * the tick, unless a timer is going to expire really soon
+ * anyway.
+ */
+ if (data->predicted_us < TICK_USEC)
+ data->predicted_us = min_t(unsigned int, TICK_USEC,
+ ktime_to_us(delta_next));
+ } else {
+ /*
+ * Use the performance multiplier and the user-configurable
+ * latency_req to determine the maximum exit latency.
+ */
+ interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
+ if (latency_req > interactivity_req)
+ latency_req = interactivity_req;
+ }
+ expected_interval = data->predicted_us;
/*
* Find the idle state with the lowest power while satisfying
* our constraints.
@@ -369,15 +391,52 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
idx = i; /* first enabled state */
if (s->target_residency > data->predicted_us)
break;
- if (s->exit_latency > latency_req)
+ if (s->exit_latency > latency_req) {
+ /*
+ * If we break out of the loop for latency reasons, use
+ * the target residency of the selected state as the
+ * expected idle duration so that the tick is retained
+ * as long as that target residency is low enough.
+ */
+ expected_interval = drv->states[idx].target_residency;
break;
-
+ }
idx = i;
}
if (idx == -1)
idx = 0; /* No states enabled. Must use 0. */
+ /*
+ * Don't stop the tick if the selected state is a polling one or if the
+ * expected idle duration is shorter than the tick period length.
+ */
+ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
+ expected_interval < TICK_USEC) {
+ unsigned int delta_next_us = ktime_to_us(delta_next);
+
+ *stop_tick = false;
+
+ if (!tick_nohz_tick_stopped() && idx > 0 &&
+ drv->states[idx].target_residency > delta_next_us) {
+ /*
+ * The tick is not going to be stopped and the target
+ * residency of the state to be returned is not within
+ * the time until the next timer event including the
+ * tick, so try to correct that.
+ */
+ for (i = idx - 1; i >= 0; i--) {
+ if (drv->states[i].disabled ||
+ dev->states_usage[i].disable)
+ continue;
+
+ idx = i;
+ if (drv->states[i].target_residency <= delta_next_us)
+ break;
+ }
+ }
+ }
+
data->last_state_idx = idx;
return data->last_state_idx;
@@ -397,6 +456,7 @@ static void menu_reflect(struct cpuidle_device *dev, int index)
data->last_state_idx = index;
data->needs_update = 1;
+ data->tick_wakeup = tick_nohz_idle_got_tick();
}
/**
@@ -427,14 +487,27 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* assume the state was never reached and the exit latency is 0.
*/
- /* measured value */
- measured_us = cpuidle_get_last_residency(dev);
-
- /* Deduct exit latency */
- if (measured_us > 2 * target->exit_latency)
- measured_us -= target->exit_latency;
- else
- measured_us /= 2;
+ if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
+ /*
+ * The nohz code said that there wouldn't be any events within
+ * the tick boundary (if the tick was stopped), but the idle
+ * duration predictor had a differing opinion. Since the CPU
+ * was woken up by a tick (that wasn't stopped after all), the
+ * predictor was not quite right, so assume that the CPU could
+ * have been idle long (but not forever) to help the idle
+ * duration predictor do a better job next time.
+ */
+ measured_us = 9 * MAX_INTERESTING / 10;
+ } else {
+ /* measured value */
+ measured_us = cpuidle_get_last_residency(dev);
+
+ /* Deduct exit latency */
+ if (measured_us > 2 * target->exit_latency)
+ measured_us -= target->exit_latency;
+ else
+ measured_us /= 2;
+ }
/* Make sure our coefficients do not exceed unity */
if (measured_us > data->next_timer_us)
diff --git a/drivers/crypto/qat/qat_common/.gitignore b/drivers/crypto/qat/qat_common/.gitignore
deleted file mode 100644
index ee328374dba8..000000000000
--- a/drivers/crypto/qat/qat_common/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*-asn1.[ch]
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 6feeacbe4d97..54e66adef252 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -187,7 +187,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
char *s;
int is_ff = 1, is_00 = 1, i;
- if (dmi_ident[slot] || dm->length <= index + 16)
+ if (dmi_ident[slot] || dm->length < index + 16)
return;
d = (u8 *) dm + index;
@@ -211,9 +211,9 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
* says that this is the defacto standard.
*/
if (dmi_ver >= 0x020600)
- sprintf(s, "%pUL", d);
+ sprintf(s, "%pUl", d);
else
- sprintf(s, "%pUB", d);
+ sprintf(s, "%pUb", d);
dmi_ident[slot] = s;
}
@@ -792,7 +792,15 @@ static bool dmi_matches(const struct dmi_system_id *dmi)
int s = dmi->matches[i].slot;
if (s == DMI_NONE)
break;
- if (dmi_ident[s]) {
+ if (s == DMI_OEM_STRING) {
+ /* DMI_OEM_STRING must be exact match */
+ const struct dmi_device *valid;
+
+ valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING,
+ dmi->matches[i].substr, NULL);
+ if (valid)
+ continue;
+ } else if (dmi_ident[s]) {
if (dmi->matches[i].exact_match) {
if (!strcmp(dmi_ident[s],
dmi->matches[i].substr))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index f44a83ab2bf4..c8b605f3dc05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -890,6 +890,7 @@ struct amdgpu_gfx_funcs {
void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
+ void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue);
};
struct amdgpu_ngg_buf {
@@ -1378,6 +1379,7 @@ enum amd_hw_ip_block_type {
ATHUB_HWIP,
NBIO_HWIP,
MP0_HWIP,
+ MP1_HWIP,
UVD_HWIP,
VCN_HWIP = UVD_HWIP,
VCE_HWIP,
@@ -1387,6 +1389,7 @@ enum amd_hw_ip_block_type {
SMUIO_HWIP,
PWR_HWIP,
NBIF_HWIP,
+ THM_HWIP,
MAX_HWIP
};
@@ -1812,6 +1815,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
+#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q))
/* Common functions */
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 369beb5041a2..448d69fe3756 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -64,16 +64,21 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
#if defined(CONFIG_DEBUG_FS)
-static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+
+static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
+ char __user *buf, size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
+ bool pm_pg_lock, use_bank, use_ring;
+ unsigned instance_bank, sh_bank, se_bank, me, pipe, queue;
- if (size & 0x3 || *pos & 0x3)
+ pm_pg_lock = use_bank = use_ring = false;
+ instance_bank = sh_bank = se_bank = me = pipe = queue = 0;
+
+ if (size & 0x3 || *pos & 0x3 ||
+ ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
return -EINVAL;
/* are we reading registers for which a PG lock is necessary? */
@@ -91,8 +96,15 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
if (instance_bank == 0x3FF)
instance_bank = 0xFFFFFFFF;
use_bank = 1;
+ } else if (*pos & (1ULL << 61)) {
+
+ me = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
+
+ use_ring = 1;
} else {
- use_bank = 0;
+ use_bank = use_ring = 0;
}
*pos &= (1UL << 22) - 1;
@@ -104,6 +116,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
sh_bank, instance_bank);
+ } else if (use_ring) {
+ mutex_lock(&adev->srbm_mutex);
+ amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue);
}
if (pm_pg_lock)
@@ -115,8 +130,14 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
if (*pos > adev->rmmio_size)
goto end;
- value = RREG32(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
+ if (read) {
+ value = RREG32(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ } else {
+ r = get_user(value, (uint32_t *)buf);
+ if (!r)
+ WREG32(*pos >> 2, value);
+ }
if (r) {
result = r;
goto end;
@@ -132,6 +153,9 @@ end:
if (use_bank) {
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
+ } else if (use_ring) {
+ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
}
if (pm_pg_lock)
@@ -140,78 +164,17 @@ end:
return result;
}
+
+static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
+}
+
static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- /* are we reading registers for which a PG lock is necessary? */
- pm_pg_lock = (*pos >> 23) & 1;
-
- if (*pos & (1ULL << 62)) {
- se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
- sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
- instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
-
- if (se_bank == 0x3FF)
- se_bank = 0xFFFFFFFF;
- if (sh_bank == 0x3FF)
- sh_bank = 0xFFFFFFFF;
- if (instance_bank == 0x3FF)
- instance_bank = 0xFFFFFFFF;
- use_bank = 1;
- } else {
- use_bank = 0;
- }
-
- *pos &= (1UL << 22) - 1;
-
- if (use_bank) {
- if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
- return -EINVAL;
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
- }
-
- if (pm_pg_lock)
- mutex_lock(&adev->pm.mutex);
-
- while (size) {
- uint32_t value;
-
- if (*pos > adev->rmmio_size)
- return result;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- }
-
- if (pm_pg_lock)
- mutex_unlock(&adev->pm.mutex);
-
- return result;
+ return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
}
static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7379aa5a6849..0b19482b36b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -922,6 +922,11 @@ static int __init amdgpu_init(void)
{
int r;
+ if (vgacon_text_force()) {
+ DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
+ return -EINVAL;
+ }
+
r = amdgpu_sync_init();
if (r)
goto error_sync;
@@ -930,10 +935,6 @@ static int __init amdgpu_init(void)
if (r)
goto error_fence;
- if (vgacon_text_force()) {
- DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
- return -EINVAL;
- }
DRM_INFO("amdgpu kernel modesetting enabled.\n");
driver = &kms_driver;
pdriver = &amdgpu_kms_pci_driver;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 455a81e4c246..97449e06a242 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -410,6 +410,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission)
{
+ long timeout;
int r;
/* Check that num_hw_submission is a power of two */
@@ -433,11 +434,16 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
+ /* for non-sriov case, no timeout enforce on compute ring */
+ if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ && !amdgpu_sriov_vf(ring->adev))
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
+
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission, amdgpu_job_hang_limit,
- (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ?
- MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(amdgpu_lockup_timeout),
- ring->name);
+ timeout, ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 28c2706e48d7..46b9ea4e6103 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -56,11 +56,23 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
alignment = PAGE_SIZE;
}
+retry:
r = amdgpu_bo_create(adev, size, alignment, initial_domain,
flags, type, resv, &bo);
if (r) {
- DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
- size, initial_domain, alignment, r);
+ if (r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
+ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+ goto retry;
+ }
+ DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
+ size, initial_domain, alignment, r);
+ }
return r;
}
*obj = &bo->gem_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index fac4b6067efd..6d08cde8443c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -356,7 +356,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
struct amdgpu_bo *bo;
unsigned long page_align;
size_t acc_size;
- u32 domains;
int r;
page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
@@ -418,23 +417,12 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
#endif
bo->tbo.bdev = &adev->mman.bdev;
- domains = bo->preferred_domains;
-retry:
- amdgpu_ttm_placement_from_domain(bo, domains);
+ amdgpu_ttm_placement_from_domain(bo, domain);
+
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, &ctx, acc_size,
NULL, resv, &amdgpu_ttm_bo_destroy);
-
- if (unlikely(r && r != -ERESTARTSYS)) {
- if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
- bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- goto retry;
- } else if (domains != bo->preferred_domains) {
- domains = bo->allowed_domains;
- goto retry;
- }
- }
- if (unlikely(r))
+ if (unlikely(r != 0))
return r;
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 19e71f4a8ac2..c7d43e064fc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -505,6 +505,9 @@ failed:
int psp_gpu_reset(struct amdgpu_device *adev)
{
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
return psp_mode1_reset(&adev->psp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index f48ea0dad875..a7576255cc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -859,7 +859,7 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 0fff5b8cd318..cd6bf291a853 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3061,11 +3061,18 @@ static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
+static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 q)
+{
+ DRM_INFO("Not implemented\n");
+}
+
static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v6_0_select_se_sh,
.read_wave_data = &gfx_v6_0_read_wave_data,
.read_wave_sgprs = &gfx_v6_0_read_wave_sgprs,
+ .select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
};
static int gfx_v6_0_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index e13d9d83767b..42b6144c1fd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4270,11 +4270,18 @@ static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
+static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 q)
+{
+ cik_srbm_select(adev, me, pipe, q, 0);
+}
+
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v7_0_select_se_sh,
.read_wave_data = &gfx_v7_0_read_wave_data,
.read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
+ .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 27943e57681c..b0e591eaa71a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3475,6 +3475,12 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
WREG32(mmGRBM_GFX_INDEX, data);
}
+static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 q)
+{
+ vi_srbm_select(adev, me, pipe, q, 0);
+}
+
static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
@@ -5442,6 +5448,7 @@ static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.select_se_sh = &gfx_v8_0_select_se_sh,
.read_wave_data = &gfx_v8_0_read_wave_data,
.read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
+ .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
};
static int gfx_v8_0_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 1ae3de1094f9..9d39fd5b1822 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -998,12 +998,19 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
}
+static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 q)
+{
+ soc15_grbm_select(adev, me, pipe, q, 0);
+}
+
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v9_0_select_se_sh,
.read_wave_data = &gfx_v9_0_read_wave_data,
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
};
static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -2757,6 +2764,45 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
return 0;
}
+static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int j;
+
+ /* disable the queue if it's active */
+ if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
+
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
+
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+
+ if (j == AMDGPU_MAX_USEC_TIMEOUT) {
+ DRM_DEBUG("KIQ dequeue request failed.\n");
+
+ /* Manual disable if dequeue request times out */
+ WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
+ }
+
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
+ 0);
+ }
+
+ WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
+ WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
+
+ return 0;
+}
+
static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3010,7 +3056,6 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring
return r;
}
-
static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3033,6 +3078,20 @@ static int gfx_v9_0_hw_fini(void *handle)
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
return 0;
}
+
+ /* Use deinitialize sequence from CAIL when unbinding device from driver,
+ * otherwise KIQ is hanging when binding back
+ */
+ if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
+ adev->gfx.kiq.ring.pipe,
+ adev->gfx.kiq.ring.queue, 0);
+ gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
+ soc15_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
gfx_v9_0_cp_enable(adev, false);
gfx_v9_0_rlc_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 6452101c7aab..c7190c39c4f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -837,7 +837,7 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index ecaef084dab1..be20a387d961 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1105,7 +1105,7 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 2a8184082cd1..399f876f9cad 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1121,7 +1121,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index b154667a8fd9..a675ec6d2811 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1252,6 +1252,71 @@ static void si_invalidate_hdp(struct amdgpu_device *adev,
}
}
+static int si_get_pcie_lanes(struct amdgpu_device *adev)
+{
+ u32 link_width_cntl;
+
+ if (adev->flags & AMD_IS_APU)
+ return 0;
+
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+
+ switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
+ case LC_LINK_WIDTH_X1:
+ return 1;
+ case LC_LINK_WIDTH_X2:
+ return 2;
+ case LC_LINK_WIDTH_X4:
+ return 4;
+ case LC_LINK_WIDTH_X8:
+ return 8;
+ case LC_LINK_WIDTH_X0:
+ case LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
+static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
+{
+ u32 link_width_cntl, mask;
+
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ switch (lanes) {
+ case 0:
+ mask = LC_LINK_WIDTH_X0;
+ break;
+ case 1:
+ mask = LC_LINK_WIDTH_X1;
+ break;
+ case 2:
+ mask = LC_LINK_WIDTH_X2;
+ break;
+ case 4:
+ mask = LC_LINK_WIDTH_X4;
+ break;
+ case 8:
+ mask = LC_LINK_WIDTH_X8;
+ break;
+ case 16:
+ mask = LC_LINK_WIDTH_X16;
+ break;
+ default:
+ DRM_ERROR("invalid pcie lane request: %d\n", lanes);
+ return;
+ }
+
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_LINK_WIDTH_MASK;
+ link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
+ link_width_cntl |= (LC_RECONFIG_NOW |
+ LC_RECONFIG_ARC_MISSING_ESCAPE);
+
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+}
+
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
@@ -1262,6 +1327,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.get_xclk = &si_get_xclk,
.set_uvd_clocks = &si_set_uvd_clocks,
.set_vce_clocks = NULL,
+ .get_pcie_lanes = &si_get_pcie_lanes,
+ .set_pcie_lanes = &si_set_pcie_lanes,
.get_config_memsize = &si_get_config_memsize,
.flush_hdp = &si_flush_hdp,
.invalidate_hdp = &si_invalidate_hdp,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 672eaffac0a5..797d505bf9ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6372,9 +6372,9 @@ static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
{
u32 lane_width;
u32 new_lane_width =
- (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
- (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
amdgpu_set_pcie_lanes(adev, new_lane_width);
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index 4c45db7f1157..45aafca7f315 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -38,6 +38,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
@@ -49,7 +50,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
-
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e42a28e3adc5..4e2f379ce217 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1403,6 +1403,28 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
return ret;
}
+
+static void register_backlight_device(struct amdgpu_display_manager *dm,
+ struct dc_link *link)
+{
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+ if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
+ link->type != dc_connection_none) {
+ /* Event if registration failed, we should continue with
+ * DM initialization because not having a backlight control
+ * is better then a black screen.
+ */
+ amdgpu_dm_register_backlight_device(dm);
+
+ if (dm->backlight_dev)
+ dm->backlight_link = link;
+ }
+#endif
+}
+
+
/* In this architecture, the association
* connector -> encoder -> crtc
* id not really requried. The crtc and connector will hold the
@@ -1456,6 +1478,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
+ struct dc_link *link = NULL;
if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
DRM_ERROR(
@@ -1482,9 +1505,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
- if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
- DETECT_REASON_BOOT))
+ link = dc_get_link_at_index(dm->dc, i);
+
+ if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
+ register_backlight_device(dm, link);
+ }
+
+
}
/* Software is initialized. Now we can register interrupt handlers. */
@@ -2685,7 +2713,8 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
- if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+ if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
+ link->type != dc_connection_none) {
amdgpu_dm_register_backlight_device(dm);
if (dm->backlight_dev) {
@@ -3561,6 +3590,7 @@ create_i2c(struct ddc_service *ddc_service,
return i2c;
}
+
/* Note: this function assumes that dc_link_detect() was called for the
* dc_link which will be represented by this aconnector.
*/
@@ -3630,28 +3660,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector);
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-
- /* NOTE: this currently will create backlight device even if a panel
- * is not connected to the eDP/LVDS connector.
- *
- * This is less than ideal but we don't have sink information at this
- * stage since detection happens after. We can't do detection earlier
- * since MST detection needs connectors to be created first.
- */
- if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
- /* Event if registration failed, we should continue with
- * DM initialization because not having a backlight control
- * is better then a black screen.
- */
- amdgpu_dm_register_backlight_device(dm);
-
- if (dm->backlight_dev)
- dm->backlight_link = link;
- }
-#endif
-
out_free:
if (res) {
kfree(i2c);
@@ -4840,33 +4848,6 @@ static int dm_update_planes_state(struct dc *dc,
return ret;
}
-static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
- struct drm_crtc *crtc)
-{
- struct drm_plane *plane;
- struct drm_crtc_state *crtc_state;
-
- WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
-
- drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
- struct drm_plane_state *plane_state =
- drm_atomic_get_plane_state(state, plane);
-
- if (IS_ERR(plane_state))
- return -EDEADLK;
-
- crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- if (crtc->primary == plane && crtc_state->active) {
- if (!plane_state->fb)
- return -EINVAL;
- }
- }
- return 0;
-}
-
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -4890,10 +4871,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- ret = dm_atomic_check_plane_state_fb(state, crtc);
- if (ret)
- goto fail;
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed)
continue;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index eeb04471b2f5..6d1c4981a185 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1997,6 +1997,19 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
return true;
}
+bool dc_link_set_abm_disable(const struct dc_link *link)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct abm *abm = core_dc->res_pool->abm;
+
+ if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+ return false;
+
+ abm->funcs->set_abm_immediate_disable(abm);
+
+ return true;
+}
+
bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
{
struct dc *core_dc = link->ctx->dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index fb4d9eafdc6e..dc34515ef01f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -132,6 +132,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream);
+bool dc_link_set_abm_disable(const struct dc_link *dc_link);
+
bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 444558ca6533..162f6a6c4208 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -735,6 +735,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
if (info_frame->avi.valid) {
const uint32_t *content =
(const uint32_t *) &info_frame->avi.sb[0];
+ /*we need turn on clock before programming AFMT block*/
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
REG_WRITE(AFMT_AVI_INFO0, content[0]);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 775d3bf0bd39..9150d2694450 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -102,6 +102,43 @@ static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
return 256 * ((pixels + 255) / 256);
}
+static void reset_lb_on_vblank(struct dc_context *ctx)
+{
+ uint32_t value, frame_count;
+ uint32_t retry = 0;
+ uint32_t status_pos =
+ dm_read_reg(ctx, mmCRTC_STATUS_POSITION);
+
+
+ /* Only if CRTC is enabled and counter is moving we wait for one frame. */
+ if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) {
+ /* Resetting LB on VBlank */
+ value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+ set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
+ set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
+ dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
+
+ frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
+
+
+ for (retry = 100; retry > 0; retry--) {
+ if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
+ break;
+ msleep(1);
+ }
+ if (!retry)
+ dm_error("Frame count did not increase for 100ms.\n");
+
+ /* Resetting LB on VBlank */
+ value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+ set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
+ set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
+ dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
+
+ }
+
+}
+
static void wait_for_fbc_state_changed(
struct dce110_compressor *cp110,
bool enabled)
@@ -232,19 +269,23 @@ void dce110_compressor_disable_fbc(struct compressor *compressor)
{
struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
- if (compressor->options.bits.FBC_SUPPORT &&
- dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
- uint32_t reg_data;
- /* Turn off compression */
- reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
- set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
- dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
-
- /* Reset enum controller_id to undefined */
- compressor->attached_inst = 0;
- compressor->is_enabled = false;
-
- wait_for_fbc_state_changed(cp110, false);
+ if (compressor->options.bits.FBC_SUPPORT) {
+ if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ uint32_t reg_data;
+ /* Turn off compression */
+ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
+
+ /* Reset enum controller_id to undefined */
+ compressor->attached_inst = 0;
+ compressor->is_enabled = false;
+
+ wait_for_fbc_state_changed(cp110, false);
+ }
+
+ /* Sync line buffer - dce100/110 only*/
+ reset_lb_on_vblank(compressor->ctx);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 30dd62f0f5fa..d0575999f172 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -453,10 +453,13 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
} else {
/* 10 segments
- * segment is from 2^-10 to 2^0
+ * segment is from 2^-10 to 2^1
+ * We include an extra segment for range [2^0, 2^1). This is to
+ * ensure that colors with normalized values of 1 don't miss the
+ * LUT.
*/
region_start = -10;
- region_end = 0;
+ region_end = 1;
seg_distr[0] = 4;
seg_distr[1] = 4;
@@ -468,7 +471,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
seg_distr[7] = 4;
seg_distr[8] = 4;
seg_distr[9] = 4;
- seg_distr[10] = -1;
+ seg_distr[10] = 0;
seg_distr[11] = -1;
seg_distr[12] = -1;
seg_distr[13] = -1;
@@ -1016,8 +1019,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
- if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
link->dc->hwss.edp_backlight_control(link, false);
+ dc_link_set_abm_disable(link);
+ }
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 3ae3da4e7c14..0f5ad54d3fd3 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1264,9 +1264,9 @@ struct atom_smc_dpm_info_v4_1
uint8_t ledpin2;
uint8_t padding8_4;
- uint8_t gfxclkspreadenabled;
- uint8_t gfxclkspreadpercent;
- uint16_t gfxclkspreadfreq;
+ uint8_t pllgfxclkspreadenabled;
+ uint8_t pllgfxclkspreadpercent;
+ uint16_t pllgfxclkspreadfreq;
uint8_t uclkspreadenabled;
uint8_t uclkspreadpercent;
@@ -1276,7 +1276,11 @@ struct atom_smc_dpm_info_v4_1
uint8_t socclkspreadpercent;
uint16_t socclkspreadfreq;
- uint32_t boardreserved[3];
+ uint8_t acggfxclkspreadenabled;
+ uint8_t acggfxclkspreadpercent;
+ uint16_t acggfxclkspreadfreq;
+
+ uint32_t boardreserved[10];
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index faf9c880e4f7..210fb3ecd213 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -32,7 +32,7 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
vega10_thermal.o smu10_hwmgr.o pp_psm.o\
vega12_processpptables.o vega12_hwmgr.o \
- vega12_powertune.o vega12_thermal.o \
+ vega12_thermal.o \
pp_overdriver.o smu_helper.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 55f9b30513ff..ad42caac033e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -616,9 +616,9 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
param->ledpin1 = info->ledpin1;
param->ledpin2 = info->ledpin2;
- param->gfxclkspreadenabled = info->gfxclkspreadenabled;
- param->gfxclkspreadpercent = info->gfxclkspreadpercent;
- param->gfxclkspreadfreq = info->gfxclkspreadfreq;
+ param->pllgfxclkspreadenabled = info->pllgfxclkspreadenabled;
+ param->pllgfxclkspreadpercent = info->pllgfxclkspreadpercent;
+ param->pllgfxclkspreadfreq = info->pllgfxclkspreadfreq;
param->uclkspreadenabled = info->uclkspreadenabled;
param->uclkspreadpercent = info->uclkspreadpercent;
@@ -628,5 +628,9 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
param->socclkspreadpercent = info->socclkspreadpercent;
param->socclkspreadfreq = info->socclkspreadfreq;
+ param->acggfxclkspreadenabled = info->acggfxclkspreadenabled;
+ param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
+ param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index a957d8f08029..8df1e84f27c9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -192,9 +192,9 @@ struct pp_atomfwctrl_smc_dpm_parameters
uint8_t ledpin1;
uint8_t ledpin2;
- uint8_t gfxclkspreadenabled;
- uint8_t gfxclkspreadpercent;
- uint16_t gfxclkspreadfreq;
+ uint8_t pllgfxclkspreadenabled;
+ uint8_t pllgfxclkspreadpercent;
+ uint16_t pllgfxclkspreadfreq;
uint8_t uclkspreadenabled;
uint8_t uclkspreadpercent;
@@ -203,6 +203,10 @@ struct pp_atomfwctrl_smc_dpm_parameters
uint8_t socclkspreadenabled;
uint8_t socclkspreadpercent;
uint16_t socclkspreadfreq;
+
+ uint8_t acggfxclkspreadenabled;
+ uint8_t acggfxclkspreadpercent;
+ uint16_t acggfxclkspreadfreq;
};
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 2b0c366d6149..add90675fd2a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3374,7 +3374,8 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
"Failed to start pm status log!",
return -1);
- msleep_interruptible(20);
+ /* Sampling period from 50ms to 4sec */
+ msleep_interruptible(200);
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PmStatusLogSample),
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 75a465f771f0..7b26607c646a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -319,13 +319,13 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
&size, &frev, &crev);
- if (crev != 9) {
- pr_err("Unsupported IGP table: %d %d\n", frev, crev);
+ if (info == NULL) {
+ pr_err("Could not retrieve the Integrated System Info Table!\n");
return -EINVAL;
}
- if (info == NULL) {
- pr_err("Could not retrieve the Integrated System Info Table!\n");
+ if (crev != 9) {
+ pr_err("Unsupported IGP table: %d %d\n", frev, crev);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 15ce1e825021..200de46bd06b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -33,7 +33,6 @@
#include "ppatomfwctrl.h"
#include "atomfirmware.h"
#include "cgs_common.h"
-#include "vega12_powertune.h"
#include "vega12_inc.h"
#include "pp_soc15.h"
#include "pppcielanes.h"
@@ -893,6 +892,28 @@ static int vega12_odn_initialize_default_settings(
return 0;
}
+static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
+ uint32_t adjust_percent)
+{
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+}
+
+static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
+{
+ int adjust_percent, result = 0;
+
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
+ adjust_percent =
+ hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
+ hwmgr->platform_descriptor.TDPAdjustment :
+ (-1 * hwmgr->platform_descriptor.TDPAdjustment);
+ result = vega12_set_overdrive_target_percentage(hwmgr,
+ (uint32_t)adjust_percent);
+ }
+ return result;
+}
+
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c
deleted file mode 100644
index 76e60c0181ac..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.c
+++ /dev/null
@@ -1,1364 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "vega12_hwmgr.h"
-#include "vega12_powertune.h"
-#include "vega12_smumgr.h"
-#include "vega12_ppsmc.h"
-#include "vega12_inc.h"
-#include "pp_debug.h"
-#include "pp_soc15.h"
-
-static const struct vega12_didt_config_reg SEDiDtTuningCtrlConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ */
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853 },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153 },
-
- /* DIDT_TD */
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde },
-
- /* DIDT_TCP */
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
-
- /* DIDT_DB */
- { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
- { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEDiDtCtrl3Config_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /*DIDT_SQ_CTRL3 */
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK, DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK, DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
-
- /*DIDT_TCP_CTRL3 */
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK, DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
-
- /*DIDT_TD_CTRL3 */
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__THROTTLE_POLICY_MASK, DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
-
- /*DIDT_DB_CTRL3 */
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__THROTTLE_POLICY_MASK, DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK, DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEDiDtCtrl2Config_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ */
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853 },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000 },
-
- /* DIDT_TD */
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
-
- /* DIDT_TCP */
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
-
- /* DIDT_DB */
- { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK, DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
- { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
- { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEDiDtCtrl1Config_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ */
- { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff },
- /* DIDT_TD */
- { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff },
- /* DIDT_TCP */
- { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff },
- /* DIDT_DB */
- { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MIN_POWER_MASK, DIDT_DB_CTRL1__MIN_POWER__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MAX_POWER_MASK, DIDT_DB_CTRL1__MAX_POWER__SHIFT, 0xffff },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-
-static const struct vega12_didt_config_reg SEDiDtWeightConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ */
- { ixDIDT_SQ_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B363B1A },
- { ixDIDT_SQ_WEIGHT4_7, 0xFFFFFFFF, 0, 0x270B2432 },
- { ixDIDT_SQ_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000018 },
-
- /* DIDT_TD */
- { ixDIDT_TD_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B1D220F },
- { ixDIDT_TD_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00007558 },
- { ixDIDT_TD_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
-
- /* DIDT_TCP */
- { ixDIDT_TCP_WEIGHT0_3, 0xFFFFFFFF, 0, 0x5ACE160D },
- { ixDIDT_TCP_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TCP_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
-
- /* DIDT_DB */
- { ixDIDT_DB_WEIGHT0_3, 0xFFFFFFFF, 0, 0x0E152A0F },
- { ixDIDT_DB_WEIGHT4_7, 0xFFFFFFFF, 0, 0x09061813 },
- { ixDIDT_DB_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000013 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEDiDtCtrl0Config_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ */
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
- /* DIDT_TD */
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
- /* DIDT_TCP */
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
- /* DIDT_DB */
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__PHASE_OFFSET_MASK, DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK, DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
- { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-
-static const struct vega12_didt_config_reg SEDiDtStallCtrlConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ */
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
-
- /* DIDT_TD */
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
-
- /* DIDT_TCP */
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
-
- /* DIDT_DB */
- { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
- { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
- { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
- { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEDiDtStallPatternConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* DIDT_SQ_STALL_PATTERN_1_2 */
- { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
- { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
-
- /* DIDT_SQ_STALL_PATTERN_3_4 */
- { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
- { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
-
- /* DIDT_SQ_STALL_PATTERN_5_6 */
- { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
- { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
-
- /* DIDT_SQ_STALL_PATTERN_7 */
- { ixDIDT_SQ_STALL_PATTERN_7, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
-
- /* DIDT_TCP_STALL_PATTERN_1_2 */
- { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
- { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
-
- /* DIDT_TCP_STALL_PATTERN_3_4 */
- { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
- { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
-
- /* DIDT_TCP_STALL_PATTERN_5_6 */
- { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
- { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
-
- /* DIDT_TCP_STALL_PATTERN_7 */
- { ixDIDT_TCP_STALL_PATTERN_7, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
-
- /* DIDT_TD_STALL_PATTERN_1_2 */
- { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
- { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
-
- /* DIDT_TD_STALL_PATTERN_3_4 */
- { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
- { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
-
- /* DIDT_TD_STALL_PATTERN_5_6 */
- { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
- { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
-
- /* DIDT_TD_STALL_PATTERN_7 */
- { ixDIDT_TD_STALL_PATTERN_7, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
-
- /* DIDT_DB_STALL_PATTERN_1_2 */
- { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
- { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
-
- /* DIDT_DB_STALL_PATTERN_3_4 */
- { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
- { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
-
- /* DIDT_DB_STALL_PATTERN_5_6 */
- { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
- { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
-
- /* DIDT_DB_STALL_PATTERN_7 */
- { ixDIDT_DB_STALL_PATTERN_7, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SELCacConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860021 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060021 },
- /* TD */
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x028E0020 },
- /* TCP */
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x001c0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x009c0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x011c0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x019c0020 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x021c0020 },
- /* DB */
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00200008 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00820008 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01020008 },
- { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01820008 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-
-static const struct vega12_didt_config_reg SEEDCStallPatternConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00030001 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x000F0007 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x003F001F },
- { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x0000007F },
- /* TD */
- { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
- /* TCP */
- { ixDIDT_TCP_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TCP_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TCP_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TCP_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
- /* DB */
- { ixDIDT_DB_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_DB_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_DB_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_DB_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEEDCForceStallPatternConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
- /* TD */
- { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
- { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEEDCStallDelayConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixDIDT_SQ_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_SQ_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
- /* TD */
- { ixDIDT_TD_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TD_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
- /* TCP */
- { ixDIDT_TCP_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
- { ixDIDT_TCP_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
- /* DB */
- { ixDIDT_DB_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEEDCThresholdConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { ixDIDT_SQ_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0x0000010E },
- { ixDIDT_TD_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
- { ixDIDT_TCP_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
- { ixDIDT_DB_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEEDCCtrlResetConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEEDCCtrlConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0004 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0006 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg SEEDCCtrlForceStallConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ */
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000C },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
-
- /* TD */
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg GCDiDtDroopCtrlConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT, 0x0000 },
- { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT, 0x0000 },
- { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT, 0x0000 },
- { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT, 0x0000 },
- { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg GCDiDtCtrl0Config_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK, GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
- { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__PHASE_OFFSET_MASK, GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
- { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_SW_RST_MASK, GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT, 0x0000 },
- { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { 0xFFFFFFFF } /* End of list */
-};
-
-
-static const struct vega12_didt_config_reg PSMSEEDCStallPatternConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC STALL PATTERNs */
- { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT, 0x0101 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT, 0x0101 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT, 0x1111 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT, 0x1111 },
-
- { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT, 0x1515 },
- { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT, 0x1515 },
-
- { ixDIDT_SQ_EDC_STALL_PATTERN_7, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT, 0x5555 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMSEEDCStallDelayConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC STALL DELAYs */
- { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT, 0x0000 },
-
- { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMSEEDCThresholdConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC THRESHOLD */
- { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMSEEDCCtrlResetConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC CTRL */
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMSEEDCCtrlConfig_Vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC CTRL */
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0003 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
- { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMGCEDCThresholdConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMGCEDCDroopCtrlConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT, 0x0001 },
- { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT, 0x0384 },
- { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT, 0x0001 },
- { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK, GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT, 0x0001 },
- { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT, 0x0001 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMGCEDCCtrlResetConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg PSMGCEDCCtrlConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
- { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg AvfsPSMResetConfig_vega12[]=
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { 0x16A02, 0xFFFFFFFF, 0x0, 0x0000005F },
- { 0x16A05, 0xFFFFFFFF, 0x0, 0x00000001 },
- { 0x16A06, 0x00000001, 0x0, 0x02000000 },
- { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static const struct vega12_didt_config_reg AvfsPSMInitConfig_vega12[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { 0x16A05, 0xFFFFFFFF, 0x18, 0x00000001 },
- { 0x16A05, 0xFFFFFFFF, 0x8, 0x00000003 },
- { 0x16A05, 0xFFFFFFFF, 0xa, 0x00000006 },
- { 0x16A05, 0xFFFFFFFF, 0x7, 0x00000000 },
- { 0x16A06, 0xFFFFFFFF, 0x18, 0x00000001 },
- { 0x16A06, 0xFFFFFFFF, 0x19, 0x00000001 },
- { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
-static int vega12_program_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega12_didt_config_reg *config_regs, enum vega12_didt_config_reg_type reg_type)
-{
- uint32_t data;
-
- PP_ASSERT_WITH_CODE((config_regs != NULL), "[vega12_program_didt_config_registers] Invalid config register table!", return -EINVAL);
-
- while (config_regs->offset != 0xFFFFFFFF) {
- switch (reg_type) {
- case VEGA12_CONFIGREG_DIDT:
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
- break;
- case VEGA12_CONFIGREG_GCCAC:
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
- break;
- case VEGA12_CONFIGREG_SECAC:
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset);
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset, data);
- break;
- default:
- return -EINVAL;
- }
-
- config_regs++;
- }
-
- return 0;
-}
-
-static int vega12_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega12_didt_config_reg *config_regs)
-{
- uint32_t data;
-
- while (config_regs->offset != 0xFFFFFFFF) {
- data = cgs_read_register(hwmgr->device, config_regs->offset);
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- cgs_write_register(hwmgr->device, config_regs->offset, data);
- config_regs++;
- }
-
- return 0;
-}
-
-static void vega12_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
-{
- uint32_t data;
- int result;
- uint32_t en = (enable ? 1 : 0);
- uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
-
- if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
- CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
- DIDT_SQ_CTRL0, DIDT_CTRL_EN, en);
- didt_block_info &= ~SQ_Enable_MASK;
- didt_block_info |= en << SQ_Enable_SHIFT;
- }
-
- if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
- CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
- DIDT_DB_CTRL0, DIDT_CTRL_EN, en);
- didt_block_info &= ~DB_Enable_MASK;
- didt_block_info |= en << DB_Enable_SHIFT;
- }
-
- if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
- CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
- DIDT_TD_CTRL0, DIDT_CTRL_EN, en);
- didt_block_info &= ~TD_Enable_MASK;
- didt_block_info |= en << TD_Enable_SHIFT;
- }
-
- if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
- DIDT_TCP_CTRL0, DIDT_CTRL_EN, en);
- didt_block_info &= ~TCP_Enable_MASK;
- didt_block_info |= en << TCP_Enable_SHIFT;
- }
-
-#if 0
- if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
- CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
- DIDT_DBR_CTRL0, DIDT_CTRL_EN, en);
- }
-#endif
-
- if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) {
- if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
- }
-
- if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
- }
-
- if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
- }
-
- if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
- }
-
-#if 0
- if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
- }
-#endif
- }
-
- if (enable) {
- /* For Vega12, SMC does not support any mask yet. */
- result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
- PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
- }
-}
-
-static int vega12_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
-{
- int result;
- uint32_t num_se = 0, count, data;
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
-
- num_se = adev->gfx.config.max_shader_engines;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
- for (count = 0; count < num_se; count++) {
- data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
- cgs_write_register(hwmgr->device, reg, data);
-
- result = vega12_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl1Config_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl2Config_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtTuningCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SELCacConfig_Vega12, VEGA12_CONFIGREG_SECAC);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega12, VEGA12_CONFIGREG_DIDT);
-
- if (0 != result)
- break;
- }
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
-
- vega12_didt_set_mask(hwmgr, true);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- return 0;
-}
-
-static int vega12_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
-{
- cgs_enter_safe_mode(hwmgr->device, true);
-
- vega12_didt_set_mask(hwmgr, false);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- return 0;
-}
-
-static int vega12_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
-{
- int result;
- uint32_t num_se = 0, count, data;
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
-
- num_se = adev->gfx.config.max_shader_engines;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
- for (count = 0; count < num_se; count++) {
- data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
- cgs_write_register(hwmgr->device, reg, data);
-
- result = vega12_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega12, VEGA12_CONFIGREG_DIDT);
- if (0 != result)
- break;
- }
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
-
- vega12_didt_set_mask(hwmgr, true);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- vega12_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega12);
- if (PP_CAP(PHM_PlatformCaps_GCEDC))
- vega12_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega12);
-
- if (PP_CAP(PHM_PlatformCaps_PSM))
- vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega12);
-
- return 0;
-}
-
-static int vega12_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
-{
- uint32_t data;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- vega12_didt_set_mask(hwmgr, false);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
- data = 0x00000000;
- cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data);
- }
-
- if (PP_CAP(PHM_PlatformCaps_PSM))
- vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
-
- return 0;
-}
-
-static int vega12_enable_se_edc_config(struct pp_hwmgr *hwmgr)
-{
- int result;
- uint32_t num_se = 0, count, data;
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
-
- num_se = adev->gfx.config.max_shader_engines;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
- for (count = 0; count < num_se; count++) {
- data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
- cgs_write_register(hwmgr->device, reg, data);
- result = vega12_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEEDCThresholdConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlResetConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
-
- if (0 != result)
- break;
- }
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
-
- vega12_didt_set_mask(hwmgr, true);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- return 0;
-}
-
-static int vega12_disable_se_edc_config(struct pp_hwmgr *hwmgr)
-{
- cgs_enter_safe_mode(hwmgr->device, true);
-
- vega12_didt_set_mask(hwmgr, false);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- return 0;
-}
-
-static int vega12_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
-{
- int result;
- uint32_t num_se = 0;
- uint32_t count, data;
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
-
- num_se = adev->gfx.config.max_shader_engines;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
-
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
- for (count = 0; count < num_se; count++) {
- data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
- cgs_write_register(hwmgr->device, reg, data);
- result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega12, VEGA12_CONFIGREG_DIDT);
-
- if (0 != result)
- break;
- }
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
-
- vega12_didt_set_mask(hwmgr, true);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega12);
-
- if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
- vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega12);
- vega12_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega12);
- }
-
- if (PP_CAP(PHM_PlatformCaps_PSM))
- vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega12);
-
- return 0;
-}
-
-static int vega12_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
-{
- uint32_t data;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- vega12_didt_set_mask(hwmgr, false);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
- data = 0x00000000;
- cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data);
- }
-
- if (PP_CAP(PHM_PlatformCaps_PSM))
- vega12_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega12);
-
- return 0;
-}
-
-static int vega12_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
-{
- uint32_t reg;
- int result;
-
- cgs_enter_safe_mode(hwmgr->device, true);
-
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
-
- result = vega12_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- result |= vega12_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega12, VEGA12_CONFIGREG_DIDT);
- if (0 != result)
- return result;
-
- vega12_didt_set_mask(hwmgr, false);
-
- cgs_enter_safe_mode(hwmgr->device, false);
-
- return 0;
-}
-
-static int vega12_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = vega12_disable_se_edc_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Pre DIDT disable clock gating failed!", return result);
-
- return 0;
-}
-
-int vega12_enable_didt_config(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
- if (data->smu_features[GNLD_DIDT].supported) {
- if (data->smu_features[GNLD_DIDT].enabled)
- PP_DBG_LOG("[EnableDiDtConfig] Feature DiDt Already enabled!\n");
-
- switch (data->registry_data.didt_mode) {
- case 0:
- result = vega12_enable_cac_driving_se_didt_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 0 Failed!", return result);
- break;
- case 2:
- result = vega12_enable_psm_gc_didt_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 2 Failed!", return result);
- break;
- case 3:
- result = vega12_enable_se_edc_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 3 Failed!", return result);
- break;
- case 1:
- case 4:
- case 5:
- result = vega12_enable_psm_gc_edc_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 5 Failed!", return result);
- break;
- case 6:
- result = vega12_enable_se_edc_force_stall_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 6 Failed!", return result);
- break;
- default:
- result = -EINVAL;
- break;
- }
-
-#if 0
- if (0 == result) {
- result = vega12_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
- PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
- data->smu_features[GNLD_DIDT].enabled = true;
- }
-#endif
- }
-
- return result;
-}
-
-int vega12_disable_didt_config(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
- if (data->smu_features[GNLD_DIDT].supported) {
- if (!data->smu_features[GNLD_DIDT].enabled)
- PP_DBG_LOG("[DisableDiDtConfig] Feature DiDt Already Disabled!\n");
-
- switch (data->registry_data.didt_mode) {
- case 0:
- result = vega12_disable_cac_driving_se_didt_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 0 Failed!", return result);
- break;
- case 2:
- result = vega12_disable_psm_gc_didt_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 2 Failed!", return result);
- break;
- case 3:
- result = vega12_disable_se_edc_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 3 Failed!", return result);
- break;
- case 1:
- case 4:
- case 5:
- result = vega12_disable_psm_gc_edc_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 5 Failed!", return result);
- break;
- case 6:
- result = vega12_disable_se_edc_force_stall_config(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 6 Failed!", return result);
- break;
- default:
- result = -EINVAL;
- break;
- }
-
- if (0 == result) {
- result = vega12_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
- PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
- data->smu_features[GNLD_DIDT].enabled = false;
- }
- }
-
- return result;
-}
-
-int vega12_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- struct vega12_hwmgr *data =
- (struct vega12_hwmgr *)(hwmgr->backend);
-
- if (data->smu_features[GNLD_PPT].enabled)
- return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetPptLimit, n);
-
- return 0;
-}
-
-int vega12_enable_power_containment(struct pp_hwmgr *hwmgr)
-{
- struct vega12_hwmgr *data =
- (struct vega12_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v2_information *table_info =
- (struct phm_ppt_v2_information *)(hwmgr->pptable);
- struct phm_tdp_table *tdp_table = table_info->tdp_table;
- uint32_t default_pwr_limit =
- (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
- int result = 0;
-
- if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
- if (data->smu_features[GNLD_PPT].supported)
- PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
- true, data->smu_features[GNLD_PPT].smu_feature_bitmap),
- "Attempt to enable PPT feature Failed!",
- data->smu_features[GNLD_PPT].supported = false);
-
- if (data->smu_features[GNLD_TDC].supported)
- PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
- true, data->smu_features[GNLD_TDC].smu_feature_bitmap),
- "Attempt to enable PPT feature Failed!",
- data->smu_features[GNLD_TDC].supported = false);
-
- result = vega12_set_power_limit(hwmgr, default_pwr_limit);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to set Default Power Limit in SMC!",
- return result);
- }
-
- return result;
-}
-
-int vega12_disable_power_containment(struct pp_hwmgr *hwmgr)
-{
- struct vega12_hwmgr *data =
- (struct vega12_hwmgr *)(hwmgr->backend);
-
- if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
- if (data->smu_features[GNLD_PPT].supported)
- PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
- false, data->smu_features[GNLD_PPT].smu_feature_bitmap),
- "Attempt to disable PPT feature Failed!",
- data->smu_features[GNLD_PPT].supported = false);
-
- if (data->smu_features[GNLD_TDC].supported)
- PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
- false, data->smu_features[GNLD_TDC].smu_feature_bitmap),
- "Attempt to disable PPT feature Failed!",
- data->smu_features[GNLD_TDC].supported = false);
- }
-
- return 0;
-}
-
-static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
- uint32_t adjust_percent)
-{
- return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
-}
-
-int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
-{
- int adjust_percent, result = 0;
-
- if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
- adjust_percent =
- hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
- hwmgr->platform_descriptor.TDPAdjustment :
- (-1 * hwmgr->platform_descriptor.TDPAdjustment);
- result = vega12_set_overdrive_target_percentage(hwmgr,
- (uint32_t)adjust_percent);
- }
- return result;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h
deleted file mode 100644
index 78d31a6747dd..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_powertune.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _VEGA12_POWERTUNE_H_
-#define _VEGA12_POWERTUNE_H_
-
-enum vega12_didt_config_reg_type {
- VEGA12_CONFIGREG_DIDT = 0,
- VEGA12_CONFIGREG_GCCAC,
- VEGA12_CONFIGREG_SECAC
-};
-
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
-
-struct vega12_didt_config_reg {
- uint32_t offset;
- uint32_t mask;
- uint32_t shift;
- uint32_t value;
-};
-
-int vega12_enable_power_containment(struct pp_hwmgr *hwmgr);
-int vega12_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
-int vega12_power_control_set_level(struct pp_hwmgr *hwmgr);
-int vega12_disable_power_containment(struct pp_hwmgr *hwmgr);
-
-int vega12_enable_didt_config(struct pp_hwmgr *hwmgr);
-int vega12_disable_didt_config(struct pp_hwmgr *hwmgr);
-
-#endif /* _VEGA12_POWERTUNE_H_ */
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index e7d794980b84..b34113f45904 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -208,9 +208,9 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->LedPin1 = smc_dpm_table.ledpin1;
ppsmc_pptable->LedPin2 = smc_dpm_table.ledpin2;
- ppsmc_pptable->GfxclkSpreadEnabled = smc_dpm_table.gfxclkspreadenabled;
- ppsmc_pptable->GfxclkSpreadPercent = smc_dpm_table.gfxclkspreadpercent;
- ppsmc_pptable->GfxclkSpreadFreq = smc_dpm_table.gfxclkspreadfreq;
+ ppsmc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table.pllgfxclkspreadenabled;
+ ppsmc_pptable->PllGfxclkSpreadPercent = smc_dpm_table.pllgfxclkspreadpercent;
+ ppsmc_pptable->PllGfxclkSpreadFreq = smc_dpm_table.pllgfxclkspreadfreq;
ppsmc_pptable->UclkSpreadEnabled = 0;
ppsmc_pptable->UclkSpreadPercent = smc_dpm_table.uclkspreadpercent;
@@ -220,6 +220,11 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->SocclkSpreadPercent = smc_dpm_table.socclkspreadpercent;
ppsmc_pptable->SocclkSpreadFreq = smc_dpm_table.socclkspreadfreq;
+ ppsmc_pptable->AcgGfxclkSpreadEnabled = smc_dpm_table.acggfxclkspreadenabled;
+ ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
+ ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
+
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index cd2e503a87da..fb696e3d06cf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -127,7 +127,7 @@
#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT )
#define FEATURE_GFXOFF_MASK (1 << FEATURE_GFXOFF_BIT )
#define FEATURE_CG_MASK (1 << FEATURE_CG_BIT )
-#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT )
+#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT)
#define FEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT )
#define FEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT )
#define FEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT )
@@ -481,9 +481,9 @@ typedef struct {
uint8_t padding8_4;
- uint8_t GfxclkSpreadEnabled;
- uint8_t GfxclkSpreadPercent;
- uint16_t GfxclkSpreadFreq;
+ uint8_t PllGfxclkSpreadEnabled;
+ uint8_t PllGfxclkSpreadPercent;
+ uint16_t PllGfxclkSpreadFreq;
uint8_t UclkSpreadEnabled;
uint8_t UclkSpreadPercent;
@@ -493,7 +493,11 @@ typedef struct {
uint8_t SocclkSpreadPercent;
uint16_t SocclkSpreadFreq;
- uint32_t BoardReserved[3];
+ uint8_t AcgGfxclkSpreadEnabled;
+ uint8_t AcgGfxclkSpreadPercent;
+ uint16_t AcgGfxclkSpreadFreq;
+
+ uint32_t BoardReserved[10];
uint32_t MmHubPadding[7];
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 55cd204c1789..651a3f28734b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -30,8 +30,7 @@
#include "ppatomctrl.h"
#include "pp_debug.h"
-#include "smu_ucode_xfer_vi.h"
-#include "smu7_smumgr.h"
+
/* MP Apertures */
#define MP0_Public 0x03800000
@@ -392,8 +391,7 @@ static int vega12_smu_init(struct pp_hwmgr *hwmgr)
struct cgs_firmware_info info = {0};
int ret;
- ret = cgs_get_firmware_info(hwmgr->device,
- smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
+ ret = cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU,
&info);
if (ret || !info.kptr)
return -EINVAL;
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index fb1c27f69e3a..3d662e6805eb 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -142,7 +142,7 @@ static enum dss_clk_source dpi_get_clk_src(struct dpi_data *dpi)
}
struct dpi_clk_calc_ctx {
- struct dss_pll *pll;
+ struct dpi_data *dpi;
unsigned int clkout_idx;
/* inputs */
@@ -191,7 +191,7 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
- return dispc_div_calc(ctx->pll->dss->dispc, dispc,
+ return dispc_div_calc(ctx->dpi->dss->dispc, dispc,
ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
@@ -208,8 +208,8 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
ctx->pll_cinfo.fint = fint;
ctx->pll_cinfo.clkdco = clkdco;
- return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
- ctx->pck_min, dss_get_max_fck_rate(ctx->pll->dss),
+ return dss_pll_hsdiv_calc_a(ctx->dpi->pll, clkdco,
+ ctx->pck_min, dss_get_max_fck_rate(ctx->dpi->dss),
dpi_calc_hsdiv_cb, ctx);
}
@@ -219,7 +219,7 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
ctx->fck = fck;
- return dispc_div_calc(ctx->pll->dss->dispc, fck,
+ return dispc_div_calc(ctx->dpi->dss->dispc, fck,
ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
}
@@ -230,7 +230,7 @@ static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
unsigned long clkin;
memset(ctx, 0, sizeof(*ctx));
- ctx->pll = dpi->pll;
+ ctx->dpi = dpi;
ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src);
clkin = clk_get_rate(dpi->pll->clkin);
@@ -244,7 +244,7 @@ static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
pll_min = 0;
pll_max = 0;
- return dss_pll_calc_a(ctx->pll, clkin,
+ return dss_pll_calc_a(ctx->dpi->pll, clkin,
pll_min, pll_max,
dpi_calc_pll_cb, ctx);
} else { /* DSS_PLL_TYPE_B */
@@ -275,6 +275,7 @@ static bool dpi_dss_clk_calc(struct dpi_data *dpi, unsigned long pck,
bool ok;
memset(ctx, 0, sizeof(*ctx));
+ ctx->dpi = dpi;
if (pck > 1000 * i * i * i)
ctx->pck_min = max(pck - 1000 * i * i * i, 0lu);
else
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e415d2c097a7..48d0e6bd0508 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -140,6 +140,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugs.freedesktop.org/show_bug.cgi?id=101491
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+ * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
+ */
+ { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
{ 0, 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 97a0a639dad9..90d5b41007bf 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5912,9 +5912,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
{
u32 lane_width;
u32 new_lane_width =
- (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
u32 current_lane_width =
- (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ ((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
if (new_lane_width != current_lane_width) {
radeon_set_pcie_lanes(rdev, new_lane_width);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 692b34125866..e0d59e9ff3c6 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -966,8 +966,6 @@ static void i801_enable_host_notify(struct i2c_adapter *adapter)
if (!(priv->features & FEATURE_HOST_NOTIFY))
return;
- priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
-
if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
SMBSLVCMD(priv));
@@ -1615,6 +1613,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
outb_p(inb_p(SMBAUXCTL(priv)) &
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+ /* Remember original Host Notify setting */
+ if (priv->features & FEATURE_HOST_NOTIFY)
+ priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
+
/* Default timeout in interrupt mode: 200 ms */
priv->adapter.timeout = HZ / 5;
@@ -1699,6 +1701,15 @@ static void i801_remove(struct pci_dev *dev)
*/
}
+static void i801_shutdown(struct pci_dev *dev)
+{
+ struct i801_priv *priv = pci_get_drvdata(dev);
+
+ /* Restore config registers to avoid hard hang on some systems */
+ i801_disable_host_notify(priv);
+ pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
+}
+
#ifdef CONFIG_PM
static int i801_suspend(struct device *dev)
{
@@ -1728,6 +1739,7 @@ static struct pci_driver i801_driver = {
.id_table = i801_ids,
.probe = i801_probe,
.remove = i801_remove,
+ .shutdown = i801_shutdown,
.driver = {
.pm = &i801_pm_ops,
},
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 1adeebaa81b0..1ba40bb2b966 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1845,6 +1845,9 @@ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
unsigned long orig_jiffies;
int ret, try;
+ if (WARN_ON(!msgs || num < 1))
+ return -EINVAL;
+
if (adap->quirks && i2c_check_for_quirks(adap, msgs, num))
return -EOPNOTSUPP;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 7659bc48f1db..5d9699fe1b55 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -23,12 +23,13 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
-#include <linux/platform_data/atmel_mxt_ts.h>
#include <linux/input/mt.h>
#include <linux/interrupt.h>
#include <linux/of.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
+#include <linux/property.h>
#include <asm/unaligned.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -268,12 +269,16 @@ static const struct v4l2_file_operations mxt_video_fops = {
.poll = vb2_fop_poll,
};
+enum mxt_suspend_mode {
+ MXT_SUSPEND_DEEP_SLEEP = 0,
+ MXT_SUSPEND_T9_CTRL = 1,
+};
+
/* Each client has this additional data */
struct mxt_data {
struct i2c_client *client;
struct input_dev *input_dev;
char phys[64]; /* device physical location */
- const struct mxt_platform_data *pdata;
struct mxt_object *object_table;
struct mxt_info info;
unsigned int irq;
@@ -324,6 +329,11 @@ struct mxt_data {
/* for config update handling */
struct completion crc_completion;
+
+ u32 *t19_keymap;
+ unsigned int t19_num_keys;
+
+ enum mxt_suspend_mode suspend_mode;
};
struct mxt_vb2_buffer {
@@ -742,15 +752,14 @@ static int mxt_write_object(struct mxt_data *data,
static void mxt_input_button(struct mxt_data *data, u8 *message)
{
struct input_dev *input = data->input_dev;
- const struct mxt_platform_data *pdata = data->pdata;
int i;
- for (i = 0; i < pdata->t19_num_keys; i++) {
- if (pdata->t19_keymap[i] == KEY_RESERVED)
+ for (i = 0; i < data->t19_num_keys; i++) {
+ if (data->t19_keymap[i] == KEY_RESERVED)
continue;
/* Active-low switch */
- input_report_key(input, pdata->t19_keymap[i],
+ input_report_key(input, data->t19_keymap[i],
!(message[1] & BIT(i)));
}
}
@@ -758,7 +767,7 @@ static void mxt_input_button(struct mxt_data *data, u8 *message)
static void mxt_input_sync(struct mxt_data *data)
{
input_mt_report_pointer_emulation(data->input_dev,
- data->pdata->t19_num_keys);
+ data->t19_num_keys);
input_sync(data->input_dev);
}
@@ -1858,7 +1867,6 @@ static void mxt_input_close(struct input_dev *dev);
static void mxt_set_up_as_touchpad(struct input_dev *input_dev,
struct mxt_data *data)
{
- const struct mxt_platform_data *pdata = data->pdata;
int i;
input_dev->name = "Atmel maXTouch Touchpad";
@@ -1872,15 +1880,14 @@ static void mxt_set_up_as_touchpad(struct input_dev *input_dev,
input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
MXT_PIXELS_PER_MM);
- for (i = 0; i < pdata->t19_num_keys; i++)
- if (pdata->t19_keymap[i] != KEY_RESERVED)
+ for (i = 0; i < data->t19_num_keys; i++)
+ if (data->t19_keymap[i] != KEY_RESERVED)
input_set_capability(input_dev, EV_KEY,
- pdata->t19_keymap[i]);
+ data->t19_keymap[i]);
}
static int mxt_initialize_input_device(struct mxt_data *data)
{
- const struct mxt_platform_data *pdata = data->pdata;
struct device *dev = &data->client->dev;
struct input_dev *input_dev;
int error;
@@ -1946,7 +1953,7 @@ static int mxt_initialize_input_device(struct mxt_data *data)
}
/* If device has buttons we assume it is a touchpad */
- if (pdata->t19_num_keys) {
+ if (data->t19_num_keys) {
mxt_set_up_as_touchpad(input_dev, data);
mt_flags |= INPUT_MT_POINTER;
} else {
@@ -2868,7 +2875,7 @@ static const struct attribute_group mxt_attr_group = {
static void mxt_start(struct mxt_data *data)
{
- switch (data->pdata->suspend_mode) {
+ switch (data->suspend_mode) {
case MXT_SUSPEND_T9_CTRL:
mxt_soft_reset(data);
@@ -2886,12 +2893,11 @@ static void mxt_start(struct mxt_data *data)
mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
break;
}
-
}
static void mxt_stop(struct mxt_data *data)
{
- switch (data->pdata->suspend_mode) {
+ switch (data->suspend_mode) {
case MXT_SUSPEND_T9_CTRL:
/* Touch disable */
mxt_write_object(data,
@@ -2921,55 +2927,49 @@ static void mxt_input_close(struct input_dev *dev)
mxt_stop(data);
}
-#ifdef CONFIG_OF
-static const struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
+static int mxt_parse_device_properties(struct mxt_data *data)
{
- struct mxt_platform_data *pdata;
- struct device_node *np = client->dev.of_node;
+ static const char keymap_property[] = "linux,gpio-keymap";
+ struct device *dev = &data->client->dev;
u32 *keymap;
- int proplen, ret;
-
- if (!np)
- return ERR_PTR(-ENOENT);
-
- pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
+ int n_keys;
+ int error;
- if (of_find_property(np, "linux,gpio-keymap", &proplen)) {
- pdata->t19_num_keys = proplen / sizeof(u32);
+ if (device_property_present(dev, keymap_property)) {
+ n_keys = device_property_read_u32_array(dev, keymap_property,
+ NULL, 0);
+ if (n_keys <= 0) {
+ error = n_keys < 0 ? n_keys : -EINVAL;
+ dev_err(dev, "invalid/malformed '%s' property: %d\n",
+ keymap_property, error);
+ return error;
+ }
- keymap = devm_kzalloc(&client->dev,
- pdata->t19_num_keys * sizeof(keymap[0]),
- GFP_KERNEL);
+ keymap = devm_kmalloc_array(dev, n_keys, sizeof(*keymap),
+ GFP_KERNEL);
if (!keymap)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- ret = of_property_read_u32_array(np, "linux,gpio-keymap",
- keymap, pdata->t19_num_keys);
- if (ret)
- dev_warn(&client->dev,
- "Couldn't read linux,gpio-keymap: %d\n", ret);
+ error = device_property_read_u32_array(dev, keymap_property,
+ keymap, n_keys);
+ if (error) {
+ dev_err(dev, "failed to parse '%s' property: %d\n",
+ keymap_property, error);
+ return error;
+ }
- pdata->t19_keymap = keymap;
+ data->t19_keymap = keymap;
+ data->t19_num_keys = n_keys;
}
- pdata->suspend_mode = MXT_SUSPEND_DEEP_SLEEP;
-
- return pdata;
-}
-#else
-static const struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
-{
- return ERR_PTR(-ENOENT);
+ return 0;
}
-#endif
#ifdef CONFIG_ACPI
struct mxt_acpi_platform_data {
const char *hid;
- struct mxt_platform_data pdata;
+ const struct property_entry *props;
};
static unsigned int samus_touchpad_buttons[] = {
@@ -2979,14 +2979,16 @@ static unsigned int samus_touchpad_buttons[] = {
BTN_LEFT
};
+static const struct property_entry samus_touchpad_props[] = {
+ PROPERTY_ENTRY_U32_ARRAY("linux,gpio-keymap", samus_touchpad_buttons),
+ { }
+};
+
static struct mxt_acpi_platform_data samus_platform_data[] = {
{
/* Touchpad */
.hid = "ATML0000",
- .pdata = {
- .t19_num_keys = ARRAY_SIZE(samus_touchpad_buttons),
- .t19_keymap = samus_touchpad_buttons,
- },
+ .props = samus_touchpad_props,
},
{
/* Touchscreen */
@@ -3004,14 +3006,16 @@ static unsigned int chromebook_tp_buttons[] = {
BTN_LEFT
};
+static const struct property_entry chromebook_tp_props[] = {
+ PROPERTY_ENTRY_U32_ARRAY("linux,gpio-keymap", chromebook_tp_buttons),
+ { }
+};
+
static struct mxt_acpi_platform_data chromebook_platform_data[] = {
{
/* Touchpad */
.hid = "ATML0000",
- .pdata = {
- .t19_num_keys = ARRAY_SIZE(chromebook_tp_buttons),
- .t19_keymap = chromebook_tp_buttons,
- },
+ .props = chromebook_tp_props,
},
{
/* Touchscreen */
@@ -3041,83 +3045,85 @@ static const struct dmi_system_id mxt_dmi_table[] = {
{ }
};
-static const struct mxt_platform_data *mxt_parse_acpi(struct i2c_client *client)
+static int mxt_prepare_acpi_properties(struct i2c_client *client)
{
struct acpi_device *adev;
const struct dmi_system_id *system_id;
const struct mxt_acpi_platform_data *acpi_pdata;
- /*
- * Ignore ACPI devices representing bootloader mode.
- *
- * This is a bit of a hack: Google Chromebook BIOS creates ACPI
- * devices for both application and bootloader modes, but we are
- * interested in application mode only (if device is in bootloader
- * mode we'll end up switching into application anyway). So far
- * application mode addresses were all above 0x40, so we'll use it
- * as a threshold.
- */
- if (client->addr < 0x40)
- return ERR_PTR(-ENXIO);
-
adev = ACPI_COMPANION(&client->dev);
if (!adev)
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
system_id = dmi_first_match(mxt_dmi_table);
if (!system_id)
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
acpi_pdata = system_id->driver_data;
if (!acpi_pdata)
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
while (acpi_pdata->hid) {
- if (!strcmp(acpi_device_hid(adev), acpi_pdata->hid))
- return &acpi_pdata->pdata;
+ if (!strcmp(acpi_device_hid(adev), acpi_pdata->hid)) {
+ /*
+ * Remove previously installed properties if we
+ * are probing this device not for the very first
+ * time.
+ */
+ device_remove_properties(&client->dev);
+
+ /*
+ * Now install the platform-specific properties
+ * that are missing from ACPI.
+ */
+ device_add_properties(&client->dev, acpi_pdata->props);
+ break;
+ }
acpi_pdata++;
}
- return ERR_PTR(-ENOENT);
+ return 0;
}
#else
-static const struct mxt_platform_data *mxt_parse_acpi(struct i2c_client *client)
+static int mxt_prepare_acpi_properties(struct i2c_client *client)
{
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
}
#endif
-static const struct mxt_platform_data *
-mxt_get_platform_data(struct i2c_client *client)
-{
- const struct mxt_platform_data *pdata;
-
- pdata = dev_get_platdata(&client->dev);
- if (pdata)
- return pdata;
-
- pdata = mxt_parse_dt(client);
- if (!IS_ERR(pdata) || PTR_ERR(pdata) != -ENOENT)
- return pdata;
-
- pdata = mxt_parse_acpi(client);
- if (!IS_ERR(pdata) || PTR_ERR(pdata) != -ENOENT)
- return pdata;
-
- dev_err(&client->dev, "No platform data specified\n");
- return ERR_PTR(-EINVAL);
-}
+static const struct dmi_system_id chromebook_T9_suspend_dmi[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"),
+ },
+ },
+ { }
+};
static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct mxt_data *data;
- const struct mxt_platform_data *pdata;
int error;
- pdata = mxt_get_platform_data(client);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
+ /*
+ * Ignore ACPI devices representing bootloader mode.
+ *
+ * This is a bit of a hack: Google Chromebook BIOS creates ACPI
+ * devices for both application and bootloader modes, but we are
+ * interested in application mode only (if device is in bootloader
+ * mode we'll end up switching into application anyway). So far
+ * application mode addresses were all above 0x40, so we'll use it
+ * as a threshold.
+ */
+ if (ACPI_COMPANION(&client->dev) && client->addr < 0x40)
+ return -ENXIO;
data = devm_kzalloc(&client->dev, sizeof(struct mxt_data), GFP_KERNEL);
if (!data)
@@ -3127,7 +3133,6 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
client->adapter->nr, client->addr);
data->client = client;
- data->pdata = pdata;
data->irq = client->irq;
i2c_set_clientdata(client, data);
@@ -3135,6 +3140,17 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
init_completion(&data->reset_completion);
init_completion(&data->crc_completion);
+ data->suspend_mode = dmi_check_system(chromebook_T9_suspend_dmi) ?
+ MXT_SUSPEND_T9_CTRL : MXT_SUSPEND_DEEP_SLEEP;
+
+ error = mxt_prepare_acpi_properties(client);
+ if (error && error != -ENOENT)
+ return error;
+
+ error = mxt_parse_device_properties(data);
+ if (error)
+ return error;
+
data->reset_gpio = devm_gpiod_get_optional(&client->dev,
"reset", GPIOD_OUT_LOW);
if (IS_ERR(data->reset_gpio)) {
@@ -3144,8 +3160,7 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, mxt_interrupt,
- pdata->irqflags | IRQF_ONESHOT,
+ NULL, mxt_interrupt, IRQF_ONESHOT,
client->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
@@ -3265,7 +3280,7 @@ MODULE_DEVICE_TABLE(i2c, mxt_id);
static struct i2c_driver mxt_driver = {
.driver = {
.name = "atmel_mxt_ts",
- .of_match_table = of_match_ptr(mxt_of_match),
+ .of_match_table = mxt_of_match,
.acpi_match_table = ACPI_PTR(mxt_acpi_id),
.pm = &mxt_pm_ops,
},
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 83819d0cbf90..2a99f0f14795 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -81,11 +81,12 @@
*/
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
-static DEFINE_RWLOCK(amd_iommu_devtable_lock);
+static DEFINE_SPINLOCK(amd_iommu_devtable_lock);
+static DEFINE_SPINLOCK(pd_bitmap_lock);
+static DEFINE_SPINLOCK(iommu_table_lock);
/* List of all available dev_data structures */
-static LIST_HEAD(dev_data_list);
-static DEFINE_SPINLOCK(dev_data_list_lock);
+static LLIST_HEAD(dev_data_list);
LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
@@ -204,40 +205,33 @@ static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
- unsigned long flags;
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
return NULL;
dev_data->devid = devid;
-
- spin_lock_irqsave(&dev_data_list_lock, flags);
- list_add_tail(&dev_data->dev_data_list, &dev_data_list);
- spin_unlock_irqrestore(&dev_data_list_lock, flags);
-
ratelimit_default_init(&dev_data->rs);
+ llist_add(&dev_data->dev_data_list, &dev_data_list);
return dev_data;
}
static struct iommu_dev_data *search_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
- unsigned long flags;
+ struct llist_node *node;
- spin_lock_irqsave(&dev_data_list_lock, flags);
- list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
+ if (llist_empty(&dev_data_list))
+ return NULL;
+
+ node = dev_data_list.first;
+ llist_for_each_entry(dev_data, node, dev_data_list) {
if (dev_data->devid == devid)
- goto out_unlock;
+ return dev_data;
}
- dev_data = NULL;
-
-out_unlock:
- spin_unlock_irqrestore(&dev_data_list_lock, flags);
-
- return dev_data;
+ return NULL;
}
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
@@ -311,6 +305,8 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
if (dev_data == NULL) {
dev_data = alloc_dev_data(devid);
+ if (!dev_data)
+ return NULL;
if (translation_pre_enabled(iommu))
dev_data->defer_attach = true;
@@ -548,6 +544,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
+ struct device *dev = iommu->iommu.dev;
int type, devid, domid, flags;
volatile u32 *event = __evt;
int count = 0;
@@ -574,53 +571,53 @@ retry:
amd_iommu_report_page_fault(devid, domid, address, flags);
return;
} else {
- printk(KERN_ERR "AMD-Vi: Event logged [");
+ dev_err(dev, "AMD-Vi: Event logged [");
}
switch (type) {
case EVENT_TYPE_ILL_DEV:
- printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
+ dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
+ "address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address, flags);
dump_dte_entry(devid);
break;
case EVENT_TYPE_DEV_TAB_ERR:
- printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
+ dev_err(dev, "DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+ "address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
- printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
- "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- domid, address, flags);
+ dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+ "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ domid, address, flags);
break;
case EVENT_TYPE_ILL_CMD:
- printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
+ dev_err(dev, "ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
dump_command(address);
break;
case EVENT_TYPE_CMD_HARD_ERR:
- printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
- "flags=0x%04x]\n", address, flags);
+ dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx "
+ "flags=0x%04x]\n", address, flags);
break;
case EVENT_TYPE_IOTLB_INV_TO:
- printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
- "address=0x%016llx]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address);
+ dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
+ "address=0x%016llx]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address);
break;
case EVENT_TYPE_INV_DEV_REQ:
- printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
+ dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
+ "address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address, flags);
break;
default:
- printk(KERN_ERR "UNKNOWN type=0x%02x event[0]=0x%08x "
- "event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
- type, event[0], event[1], event[2], event[3]);
+ dev_err(dev, KERN_ERR "UNKNOWN event[0]=0x%08x event[1]=0x%08x "
+ "event[2]=0x%08x event[3]=0x%08x\n",
+ event[0], event[1], event[2], event[3]);
}
memset(__evt, 0, 4 * sizeof(u32));
@@ -1057,9 +1054,9 @@ static int iommu_queue_command_sync(struct amd_iommu *iommu,
unsigned long flags;
int ret;
- spin_lock_irqsave(&iommu->lock, flags);
+ raw_spin_lock_irqsave(&iommu->lock, flags);
ret = __iommu_queue_command_sync(iommu, cmd, sync);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
@@ -1085,7 +1082,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
- spin_lock_irqsave(&iommu->lock, flags);
+ raw_spin_lock_irqsave(&iommu->lock, flags);
iommu->cmd_sem = 0;
@@ -1096,7 +1093,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
ret = wait_on_sem(&iommu->cmd_sem);
out_unlock:
- spin_unlock_irqrestore(&iommu->lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
@@ -1606,29 +1603,26 @@ static void del_domain_from_list(struct protection_domain *domain)
static u16 domain_id_alloc(void)
{
- unsigned long flags;
int id;
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock(&pd_bitmap_lock);
id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
BUG_ON(id == 0);
if (id > 0 && id < MAX_DOMAIN_ID)
__set_bit(id, amd_iommu_pd_alloc_bitmap);
else
id = 0;
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock(&pd_bitmap_lock);
return id;
}
static void domain_id_free(int id)
{
- unsigned long flags;
-
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock(&pd_bitmap_lock);
if (id > 0 && id < MAX_DOMAIN_ID)
__clear_bit(id, amd_iommu_pd_alloc_bitmap);
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock(&pd_bitmap_lock);
}
#define DEFINE_FREE_PT_FN(LVL, FN) \
@@ -2104,9 +2098,9 @@ static int attach_device(struct device *dev,
}
skip_ats_check:
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
ret = __attach_device(dev_data, domain);
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
/*
* We might boot into a crash-kernel here. The crashed kernel
@@ -2156,9 +2150,9 @@ static void detach_device(struct device *dev)
domain = dev_data->domain;
/* lock device table */
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
__detach_device(dev_data);
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
if (!dev_is_pci(dev))
return;
@@ -2795,7 +2789,7 @@ static void cleanup_domain(struct protection_domain *domain)
struct iommu_dev_data *entry;
unsigned long flags;
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
while (!list_empty(&domain->dev_list)) {
entry = list_first_entry(&domain->dev_list,
@@ -2803,7 +2797,7 @@ static void cleanup_domain(struct protection_domain *domain)
__detach_device(entry);
}
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
}
static void protection_domain_free(struct protection_domain *domain)
@@ -3025,15 +3019,12 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
size_t unmap_size;
if (domain->mode == PAGE_MODE_NONE)
- return -EINVAL;
+ return 0;
mutex_lock(&domain->api_lock);
unmap_size = iommu_unmap_page(domain, iova, page_size);
mutex_unlock(&domain->api_lock);
- domain_flush_tlb_pde(domain);
- domain_flush_complete(domain);
-
return unmap_size;
}
@@ -3151,6 +3142,19 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
return dev_data->defer_attach;
}
+static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct protection_domain *dom = to_pdomain(domain);
+
+ domain_flush_tlb_pde(dom);
+ domain_flush_complete(dom);
+}
+
+static void amd_iommu_iotlb_range_add(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+}
+
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
@@ -3169,6 +3173,9 @@ const struct iommu_ops amd_iommu_ops = {
.apply_resv_region = amd_iommu_apply_resv_region,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
+ .flush_iotlb_all = amd_iommu_flush_iotlb_all,
+ .iotlb_range_add = amd_iommu_iotlb_range_add,
+ .iotlb_sync = amd_iommu_flush_iotlb_all,
};
/*****************************************************************************
@@ -3570,14 +3577,62 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
amd_iommu_dev_table[devid].data[2] = dte;
}
-static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
+static struct irq_remap_table *get_irq_table(u16 devid)
+{
+ struct irq_remap_table *table;
+
+ if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
+ "%s: no iommu for devid %x\n", __func__, devid))
+ return NULL;
+
+ table = irq_lookup_table[devid];
+ if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
+ return NULL;
+
+ return table;
+}
+
+static struct irq_remap_table *__alloc_irq_table(void)
+{
+ struct irq_remap_table *table;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+
+ table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
+ if (!table->table) {
+ kfree(table);
+ return NULL;
+ }
+ raw_spin_lock_init(&table->lock);
+
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ memset(table->table, 0,
+ MAX_IRQS_PER_TABLE * sizeof(u32));
+ else
+ memset(table->table, 0,
+ (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
+ return table;
+}
+
+static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
+ struct irq_remap_table *table)
+{
+ irq_lookup_table[devid] = table;
+ set_dte_irq_entry(devid, table);
+ iommu_flush_dte(iommu, devid);
+}
+
+static struct irq_remap_table *alloc_irq_table(u16 devid)
{
struct irq_remap_table *table = NULL;
+ struct irq_remap_table *new_table = NULL;
struct amd_iommu *iommu;
unsigned long flags;
u16 alias;
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock_irqsave(&iommu_table_lock, flags);
iommu = amd_iommu_rlookup_table[devid];
if (!iommu)
@@ -3590,60 +3645,45 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
alias = amd_iommu_alias_table[devid];
table = irq_lookup_table[alias];
if (table) {
- irq_lookup_table[devid] = table;
- set_dte_irq_entry(devid, table);
- iommu_flush_dte(iommu, devid);
- goto out;
+ set_remap_table_entry(iommu, devid, table);
+ goto out_wait;
}
+ spin_unlock_irqrestore(&iommu_table_lock, flags);
/* Nothing there yet, allocate new irq remapping table */
- table = kzalloc(sizeof(*table), GFP_ATOMIC);
- if (!table)
- goto out_unlock;
-
- /* Initialize table spin-lock */
- spin_lock_init(&table->lock);
+ new_table = __alloc_irq_table();
+ if (!new_table)
+ return NULL;
- if (ioapic)
- /* Keep the first 32 indexes free for IOAPIC interrupts */
- table->min_index = 32;
+ spin_lock_irqsave(&iommu_table_lock, flags);
- table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
- if (!table->table) {
- kfree(table);
- table = NULL;
+ table = irq_lookup_table[devid];
+ if (table)
goto out_unlock;
- }
- if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
- memset(table->table, 0,
- MAX_IRQS_PER_TABLE * sizeof(u32));
- else
- memset(table->table, 0,
- (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
-
- if (ioapic) {
- int i;
-
- for (i = 0; i < 32; ++i)
- iommu->irte_ops->set_allocated(table, i);
+ table = irq_lookup_table[alias];
+ if (table) {
+ set_remap_table_entry(iommu, devid, table);
+ goto out_wait;
}
- irq_lookup_table[devid] = table;
- set_dte_irq_entry(devid, table);
- iommu_flush_dte(iommu, devid);
- if (devid != alias) {
- irq_lookup_table[alias] = table;
- set_dte_irq_entry(alias, table);
- iommu_flush_dte(iommu, alias);
- }
+ table = new_table;
+ new_table = NULL;
-out:
+ set_remap_table_entry(iommu, devid, table);
+ if (devid != alias)
+ set_remap_table_entry(iommu, alias, table);
+
+out_wait:
iommu_completion_wait(iommu);
out_unlock:
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock_irqrestore(&iommu_table_lock, flags);
+ if (new_table) {
+ kmem_cache_free(amd_iommu_irq_cache, new_table->table);
+ kfree(new_table);
+ }
return table;
}
@@ -3657,14 +3697,14 @@ static int alloc_irq_index(u16 devid, int count, bool align)
if (!iommu)
return -ENODEV;
- table = get_irq_table(devid, false);
+ table = alloc_irq_table(devid);
if (!table)
return -ENODEV;
if (align)
alignment = roundup_pow_of_two(count);
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
/* Scan table for free entries */
for (index = ALIGN(table->min_index, alignment), c = 0;
@@ -3691,7 +3731,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
index = -ENOSPC;
out:
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
return index;
}
@@ -3708,11 +3748,11 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
if (iommu == NULL)
return -EINVAL;
- table = get_irq_table(devid, false);
+ table = get_irq_table(devid);
if (!table)
return -ENOMEM;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
entry = (struct irte_ga *)table->table;
entry = &entry[index];
@@ -3723,7 +3763,7 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
if (data)
data->ref = entry;
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -3741,13 +3781,13 @@ static int modify_irte(u16 devid, int index, union irte *irte)
if (iommu == NULL)
return -EINVAL;
- table = get_irq_table(devid, false);
+ table = get_irq_table(devid);
if (!table)
return -ENOMEM;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
table->table[index] = irte->val;
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -3765,13 +3805,13 @@ static void free_irte(u16 devid, int index)
if (iommu == NULL)
return;
- table = get_irq_table(devid, false);
+ table = get_irq_table(devid);
if (!table)
return;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
iommu->irte_ops->clear_allocated(table, index);
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -3852,10 +3892,8 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
struct irte_ga *irte = (struct irte_ga *) entry;
- struct iommu_dev_data *dev_data = search_dev_data(devid);
- if (!dev_data || !dev_data->use_vapic ||
- !irte->lo.fields_remap.guest_mode) {
+ if (!irte->lo.fields_remap.guest_mode) {
irte->hi.fields.vector = vector;
irte->lo.fields_remap.destination = dest_apicid;
modify_irte_ga(devid, index, irte, NULL);
@@ -4061,7 +4099,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct amd_ir_data *data = NULL;
struct irq_cfg *cfg;
int i, ret, devid;
- int index = -1;
+ int index;
if (!info)
return -EINVAL;
@@ -4085,10 +4123,26 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
return ret;
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
- if (get_irq_table(devid, true))
+ struct irq_remap_table *table;
+ struct amd_iommu *iommu;
+
+ table = alloc_irq_table(devid);
+ if (table) {
+ if (!table->min_index) {
+ /*
+ * Keep the first 32 indexes free for IOAPIC
+ * interrupts.
+ */
+ table->min_index = 32;
+ iommu = amd_iommu_rlookup_table[devid];
+ for (i = 0; i < 32; ++i)
+ iommu->irte_ops->set_allocated(table, i);
+ }
+ WARN_ON(table->min_index != 32);
index = info->ioapic_pin;
- else
- ret = -ENOMEM;
+ } else {
+ index = -ENOMEM;
+ }
} else {
bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
@@ -4354,7 +4408,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
{
unsigned long flags;
struct amd_iommu *iommu;
- struct irq_remap_table *irt;
+ struct irq_remap_table *table;
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
int devid = ir_data->irq_2_irte.devid;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
@@ -4368,11 +4422,11 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
if (!iommu)
return -ENODEV;
- irt = get_irq_table(devid, false);
- if (!irt)
+ table = get_irq_table(devid);
+ if (!table)
return -ENODEV;
- spin_lock_irqsave(&irt->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
if (ref->lo.fields_vapic.guest_mode) {
if (cpu >= 0)
@@ -4381,7 +4435,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
barrier();
}
- spin_unlock_irqrestore(&irt->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 4e4a615bf13f..904c575d1677 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1474,7 +1474,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
{
int ret;
- spin_lock_init(&iommu->lock);
+ raw_spin_lock_init(&iommu->lock);
/* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 6a877ebd058b..1c9b080276c9 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -408,7 +408,7 @@ extern bool amd_iommu_iotlb_sup;
#define IRQ_TABLE_ALIGNMENT 128
struct irq_remap_table {
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned min_index;
u32 *table;
};
@@ -490,7 +490,7 @@ struct amd_iommu {
int index;
/* locks the accesses to the hardware */
- spinlock_t lock;
+ raw_spinlock_t lock;
/* Pointer to PCI device of this IOMMU */
struct pci_dev *dev;
@@ -627,7 +627,7 @@ struct devid_map {
*/
struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
- struct list_head dev_data_list; /* For global dev_data_list */
+ struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
u16 alias; /* Alias Device ID */
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 3f2f1fc68b52..1d647104bccc 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -22,6 +22,8 @@
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/err.h>
@@ -43,18 +45,15 @@
/* MMIO registers */
#define ARM_SMMU_IDR0 0x0
-#define IDR0_ST_LVL_SHIFT 27
-#define IDR0_ST_LVL_MASK 0x3
-#define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
-#define IDR0_STALL_MODEL_SHIFT 24
-#define IDR0_STALL_MODEL_MASK 0x3
-#define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
-#define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
-#define IDR0_TTENDIAN_SHIFT 21
-#define IDR0_TTENDIAN_MASK 0x3
-#define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
-#define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
-#define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
+#define IDR0_ST_LVL GENMASK(28, 27)
+#define IDR0_ST_LVL_2LVL 1
+#define IDR0_STALL_MODEL GENMASK(25, 24)
+#define IDR0_STALL_MODEL_STALL 0
+#define IDR0_STALL_MODEL_FORCE 2
+#define IDR0_TTENDIAN GENMASK(22, 21)
+#define IDR0_TTENDIAN_MIXED 0
+#define IDR0_TTENDIAN_LE 2
+#define IDR0_TTENDIAN_BE 3
#define IDR0_CD2L (1 << 19)
#define IDR0_VMID16 (1 << 18)
#define IDR0_PRI (1 << 16)
@@ -64,10 +63,9 @@
#define IDR0_ATS (1 << 10)
#define IDR0_HYP (1 << 9)
#define IDR0_COHACC (1 << 4)
-#define IDR0_TTF_SHIFT 2
-#define IDR0_TTF_MASK 0x3
-#define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
-#define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
+#define IDR0_TTF GENMASK(3, 2)
+#define IDR0_TTF_AARCH64 2
+#define IDR0_TTF_AARCH32_64 3
#define IDR0_S1P (1 << 1)
#define IDR0_S2P (1 << 0)
@@ -75,31 +73,27 @@
#define IDR1_TABLES_PRESET (1 << 30)
#define IDR1_QUEUES_PRESET (1 << 29)
#define IDR1_REL (1 << 28)
-#define IDR1_CMDQ_SHIFT 21
-#define IDR1_CMDQ_MASK 0x1f
-#define IDR1_EVTQ_SHIFT 16
-#define IDR1_EVTQ_MASK 0x1f
-#define IDR1_PRIQ_SHIFT 11
-#define IDR1_PRIQ_MASK 0x1f
-#define IDR1_SSID_SHIFT 6
-#define IDR1_SSID_MASK 0x1f
-#define IDR1_SID_SHIFT 0
-#define IDR1_SID_MASK 0x3f
+#define IDR1_CMDQS GENMASK(25, 21)
+#define IDR1_EVTQS GENMASK(20, 16)
+#define IDR1_PRIQS GENMASK(15, 11)
+#define IDR1_SSIDSIZE GENMASK(10, 6)
+#define IDR1_SIDSIZE GENMASK(5, 0)
#define ARM_SMMU_IDR5 0x14
-#define IDR5_STALL_MAX_SHIFT 16
-#define IDR5_STALL_MAX_MASK 0xffff
+#define IDR5_STALL_MAX GENMASK(31, 16)
#define IDR5_GRAN64K (1 << 6)
#define IDR5_GRAN16K (1 << 5)
#define IDR5_GRAN4K (1 << 4)
-#define IDR5_OAS_SHIFT 0
-#define IDR5_OAS_MASK 0x7
-#define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
-#define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
-#define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
-#define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
-#define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
-#define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
+#define IDR5_OAS GENMASK(2, 0)
+#define IDR5_OAS_32_BIT 0
+#define IDR5_OAS_36_BIT 1
+#define IDR5_OAS_40_BIT 2
+#define IDR5_OAS_42_BIT 3
+#define IDR5_OAS_44_BIT 4
+#define IDR5_OAS_48_BIT 5
+#define IDR5_OAS_52_BIT 6
+#define IDR5_VAX GENMASK(11, 10)
+#define IDR5_VAX_52_BIT 1
#define ARM_SMMU_CR0 0x20
#define CR0_CMDQEN (1 << 3)
@@ -110,18 +104,16 @@
#define ARM_SMMU_CR0ACK 0x24
#define ARM_SMMU_CR1 0x28
-#define CR1_SH_NSH 0
-#define CR1_SH_OSH 2
-#define CR1_SH_ISH 3
+#define CR1_TABLE_SH GENMASK(11, 10)
+#define CR1_TABLE_OC GENMASK(9, 8)
+#define CR1_TABLE_IC GENMASK(7, 6)
+#define CR1_QUEUE_SH GENMASK(5, 4)
+#define CR1_QUEUE_OC GENMASK(3, 2)
+#define CR1_QUEUE_IC GENMASK(1, 0)
+/* CR1 cacheability fields don't quite follow the usual TCR-style encoding */
#define CR1_CACHE_NC 0
#define CR1_CACHE_WB 1
#define CR1_CACHE_WT 2
-#define CR1_TABLE_SH_SHIFT 10
-#define CR1_TABLE_OC_SHIFT 8
-#define CR1_TABLE_IC_SHIFT 6
-#define CR1_QUEUE_SH_SHIFT 4
-#define CR1_QUEUE_OC_SHIFT 2
-#define CR1_QUEUE_IC_SHIFT 0
#define ARM_SMMU_CR2 0x2c
#define CR2_PTM (1 << 2)
@@ -129,8 +121,8 @@
#define CR2_E2H (1 << 0)
#define ARM_SMMU_GBPA 0x44
-#define GBPA_ABORT (1 << 20)
#define GBPA_UPDATE (1 << 31)
+#define GBPA_ABORT (1 << 20)
#define ARM_SMMU_IRQ_CTRL 0x50
#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
@@ -158,18 +150,14 @@
#define ARM_SMMU_STRTAB_BASE 0x80
#define STRTAB_BASE_RA (1UL << 62)
-#define STRTAB_BASE_ADDR_SHIFT 6
-#define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
+#define STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6)
#define ARM_SMMU_STRTAB_BASE_CFG 0x88
-#define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
-#define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
-#define STRTAB_BASE_CFG_SPLIT_SHIFT 6
-#define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
-#define STRTAB_BASE_CFG_FMT_SHIFT 16
-#define STRTAB_BASE_CFG_FMT_MASK 0x3
-#define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
-#define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
+#define STRTAB_BASE_CFG_FMT GENMASK(17, 16)
+#define STRTAB_BASE_CFG_FMT_LINEAR 0
+#define STRTAB_BASE_CFG_FMT_2LVL 1
+#define STRTAB_BASE_CFG_SPLIT GENMASK(10, 6)
+#define STRTAB_BASE_CFG_LOG2SIZE GENMASK(5, 0)
#define ARM_SMMU_CMDQ_BASE 0x90
#define ARM_SMMU_CMDQ_PROD 0x98
@@ -190,14 +178,16 @@
#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
/* Common MSI config fields */
-#define MSI_CFG0_ADDR_SHIFT 2
-#define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
-#define MSI_CFG2_SH_SHIFT 4
-#define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
-#define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
-#define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
-#define MSI_CFG2_MEMATTR_SHIFT 0
-#define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
+#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
+#define MSI_CFG2_SH GENMASK(5, 4)
+#define MSI_CFG2_MEMATTR GENMASK(3, 0)
+
+/* Common memory attribute values */
+#define ARM_SMMU_SH_NSH 0
+#define ARM_SMMU_SH_OSH 2
+#define ARM_SMMU_SH_ISH 3
+#define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1
+#define ARM_SMMU_MEMATTR_OIWB 0xf
#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
@@ -207,10 +197,8 @@
Q_IDX(q, p) * (q)->ent_dwords)
#define Q_BASE_RWA (1UL << 62)
-#define Q_BASE_ADDR_SHIFT 5
-#define Q_BASE_ADDR_MASK 0xfffffffffffUL
-#define Q_BASE_LOG2SIZE_SHIFT 0
-#define Q_BASE_LOG2SIZE_MASK 0x1fUL
+#define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
+#define Q_BASE_LOG2SIZE GENMASK(4, 0)
/*
* Stream table.
@@ -223,187 +211,143 @@
#define STRTAB_SPLIT 8
#define STRTAB_L1_DESC_DWORDS 1
-#define STRTAB_L1_DESC_SPAN_SHIFT 0
-#define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
-#define STRTAB_L1_DESC_L2PTR_SHIFT 6
-#define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
+#define STRTAB_L1_DESC_SPAN GENMASK_ULL(4, 0)
+#define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6)
#define STRTAB_STE_DWORDS 8
#define STRTAB_STE_0_V (1UL << 0)
-#define STRTAB_STE_0_CFG_SHIFT 1
-#define STRTAB_STE_0_CFG_MASK 0x7UL
-#define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
-#define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
-#define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
-#define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
-
-#define STRTAB_STE_0_S1FMT_SHIFT 4
-#define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
-#define STRTAB_STE_0_S1CTXPTR_SHIFT 6
-#define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
-#define STRTAB_STE_0_S1CDMAX_SHIFT 59
-#define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
+#define STRTAB_STE_0_CFG GENMASK_ULL(3, 1)
+#define STRTAB_STE_0_CFG_ABORT 0
+#define STRTAB_STE_0_CFG_BYPASS 4
+#define STRTAB_STE_0_CFG_S1_TRANS 5
+#define STRTAB_STE_0_CFG_S2_TRANS 6
+
+#define STRTAB_STE_0_S1FMT GENMASK_ULL(5, 4)
+#define STRTAB_STE_0_S1FMT_LINEAR 0
+#define STRTAB_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
+#define STRTAB_STE_0_S1CDMAX GENMASK_ULL(63, 59)
#define STRTAB_STE_1_S1C_CACHE_NC 0UL
#define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
#define STRTAB_STE_1_S1C_CACHE_WT 2UL
#define STRTAB_STE_1_S1C_CACHE_WB 3UL
-#define STRTAB_STE_1_S1C_SH_NSH 0UL
-#define STRTAB_STE_1_S1C_SH_OSH 2UL
-#define STRTAB_STE_1_S1C_SH_ISH 3UL
-#define STRTAB_STE_1_S1CIR_SHIFT 2
-#define STRTAB_STE_1_S1COR_SHIFT 4
-#define STRTAB_STE_1_S1CSH_SHIFT 6
+#define STRTAB_STE_1_S1CIR GENMASK_ULL(3, 2)
+#define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4)
+#define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6)
#define STRTAB_STE_1_S1STALLD (1UL << 27)
+#define STRTAB_STE_1_EATS GENMASK_ULL(29, 28)
#define STRTAB_STE_1_EATS_ABT 0UL
#define STRTAB_STE_1_EATS_TRANS 1UL
#define STRTAB_STE_1_EATS_S1CHK 2UL
-#define STRTAB_STE_1_EATS_SHIFT 28
+#define STRTAB_STE_1_STRW GENMASK_ULL(31, 30)
#define STRTAB_STE_1_STRW_NSEL1 0UL
#define STRTAB_STE_1_STRW_EL2 2UL
-#define STRTAB_STE_1_STRW_SHIFT 30
+#define STRTAB_STE_1_SHCFG GENMASK_ULL(45, 44)
#define STRTAB_STE_1_SHCFG_INCOMING 1UL
-#define STRTAB_STE_1_SHCFG_SHIFT 44
-#define STRTAB_STE_2_S2VMID_SHIFT 0
-#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
-#define STRTAB_STE_2_VTCR_SHIFT 32
-#define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
+#define STRTAB_STE_2_S2VMID GENMASK_ULL(15, 0)
+#define STRTAB_STE_2_VTCR GENMASK_ULL(50, 32)
#define STRTAB_STE_2_S2AA64 (1UL << 51)
#define STRTAB_STE_2_S2ENDI (1UL << 52)
#define STRTAB_STE_2_S2PTW (1UL << 54)
#define STRTAB_STE_2_S2R (1UL << 58)
-#define STRTAB_STE_3_S2TTB_SHIFT 4
-#define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
+#define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4)
/* Context descriptor (stage-1 only) */
#define CTXDESC_CD_DWORDS 8
-#define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
-#define ARM64_TCR_T0SZ_SHIFT 0
-#define ARM64_TCR_T0SZ_MASK 0x1fUL
-#define CTXDESC_CD_0_TCR_TG0_SHIFT 6
-#define ARM64_TCR_TG0_SHIFT 14
-#define ARM64_TCR_TG0_MASK 0x3UL
-#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
-#define ARM64_TCR_IRGN0_SHIFT 8
-#define ARM64_TCR_IRGN0_MASK 0x3UL
-#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
-#define ARM64_TCR_ORGN0_SHIFT 10
-#define ARM64_TCR_ORGN0_MASK 0x3UL
-#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
-#define ARM64_TCR_SH0_SHIFT 12
-#define ARM64_TCR_SH0_MASK 0x3UL
-#define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
-#define ARM64_TCR_EPD0_SHIFT 7
-#define ARM64_TCR_EPD0_MASK 0x1UL
-#define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
-#define ARM64_TCR_EPD1_SHIFT 23
-#define ARM64_TCR_EPD1_MASK 0x1UL
+#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
+#define ARM64_TCR_T0SZ GENMASK_ULL(5, 0)
+#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
+#define ARM64_TCR_TG0 GENMASK_ULL(15, 14)
+#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
+#define ARM64_TCR_IRGN0 GENMASK_ULL(9, 8)
+#define CTXDESC_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
+#define ARM64_TCR_ORGN0 GENMASK_ULL(11, 10)
+#define CTXDESC_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
+#define ARM64_TCR_SH0 GENMASK_ULL(13, 12)
+#define CTXDESC_CD_0_TCR_EPD0 (1ULL << 14)
+#define ARM64_TCR_EPD0 (1ULL << 7)
+#define CTXDESC_CD_0_TCR_EPD1 (1ULL << 30)
+#define ARM64_TCR_EPD1 (1ULL << 23)
#define CTXDESC_CD_0_ENDI (1UL << 15)
#define CTXDESC_CD_0_V (1UL << 31)
-#define CTXDESC_CD_0_TCR_IPS_SHIFT 32
-#define ARM64_TCR_IPS_SHIFT 32
-#define ARM64_TCR_IPS_MASK 0x7UL
-#define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
-#define ARM64_TCR_TBI0_SHIFT 37
-#define ARM64_TCR_TBI0_MASK 0x1UL
+#define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32)
+#define ARM64_TCR_IPS GENMASK_ULL(34, 32)
+#define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38)
+#define ARM64_TCR_TBI0 (1ULL << 37)
#define CTXDESC_CD_0_AA64 (1UL << 41)
#define CTXDESC_CD_0_S (1UL << 44)
#define CTXDESC_CD_0_R (1UL << 45)
#define CTXDESC_CD_0_A (1UL << 46)
-#define CTXDESC_CD_0_ASET_SHIFT 47
-#define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
-#define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
-#define CTXDESC_CD_0_ASID_SHIFT 48
-#define CTXDESC_CD_0_ASID_MASK 0xffffUL
-
-#define CTXDESC_CD_1_TTB0_SHIFT 4
-#define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
+#define CTXDESC_CD_0_ASET (1UL << 47)
+#define CTXDESC_CD_0_ASID GENMASK_ULL(63, 48)
-#define CTXDESC_CD_3_MAIR_SHIFT 0
+#define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
/* Convert between AArch64 (CPU) TCR format and SMMU CD format */
-#define ARM_SMMU_TCR2CD(tcr, fld) \
- (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
- << CTXDESC_CD_0_TCR_##fld##_SHIFT)
+#define ARM_SMMU_TCR2CD(tcr, fld) FIELD_PREP(CTXDESC_CD_0_TCR_##fld, \
+ FIELD_GET(ARM64_TCR_##fld, tcr))
/* Command queue */
#define CMDQ_ENT_DWORDS 2
#define CMDQ_MAX_SZ_SHIFT 8
-#define CMDQ_ERR_SHIFT 24
-#define CMDQ_ERR_MASK 0x7f
+#define CMDQ_CONS_ERR GENMASK(30, 24)
#define CMDQ_ERR_CERROR_NONE_IDX 0
#define CMDQ_ERR_CERROR_ILL_IDX 1
#define CMDQ_ERR_CERROR_ABT_IDX 2
-#define CMDQ_0_OP_SHIFT 0
-#define CMDQ_0_OP_MASK 0xffUL
+#define CMDQ_0_OP GENMASK_ULL(7, 0)
#define CMDQ_0_SSV (1UL << 11)
-#define CMDQ_PREFETCH_0_SID_SHIFT 32
-#define CMDQ_PREFETCH_1_SIZE_SHIFT 0
-#define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
+#define CMDQ_PREFETCH_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_PREFETCH_1_SIZE GENMASK_ULL(4, 0)
+#define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12)
-#define CMDQ_CFGI_0_SID_SHIFT 32
-#define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
+#define CMDQ_CFGI_0_SID GENMASK_ULL(63, 32)
#define CMDQ_CFGI_1_LEAF (1UL << 0)
-#define CMDQ_CFGI_1_RANGE_SHIFT 0
-#define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
+#define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
-#define CMDQ_TLBI_0_VMID_SHIFT 32
-#define CMDQ_TLBI_0_ASID_SHIFT 48
+#define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32)
+#define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48)
#define CMDQ_TLBI_1_LEAF (1UL << 0)
-#define CMDQ_TLBI_1_VA_MASK ~0xfffUL
-#define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
-
-#define CMDQ_PRI_0_SSID_SHIFT 12
-#define CMDQ_PRI_0_SSID_MASK 0xfffffUL
-#define CMDQ_PRI_0_SID_SHIFT 32
-#define CMDQ_PRI_0_SID_MASK 0xffffffffUL
-#define CMDQ_PRI_1_GRPID_SHIFT 0
-#define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
-#define CMDQ_PRI_1_RESP_SHIFT 12
-#define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
-#define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
-#define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
-
-#define CMDQ_SYNC_0_CS_SHIFT 12
-#define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
-#define CMDQ_SYNC_0_CS_IRQ (1UL << CMDQ_SYNC_0_CS_SHIFT)
-#define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
-#define CMDQ_SYNC_0_MSH_SHIFT 22
-#define CMDQ_SYNC_0_MSH_ISH (3UL << CMDQ_SYNC_0_MSH_SHIFT)
-#define CMDQ_SYNC_0_MSIATTR_SHIFT 24
-#define CMDQ_SYNC_0_MSIATTR_OIWB (0xfUL << CMDQ_SYNC_0_MSIATTR_SHIFT)
-#define CMDQ_SYNC_0_MSIDATA_SHIFT 32
-#define CMDQ_SYNC_0_MSIDATA_MASK 0xffffffffUL
-#define CMDQ_SYNC_1_MSIADDR_SHIFT 0
-#define CMDQ_SYNC_1_MSIADDR_MASK 0xffffffffffffcUL
+#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
+#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
+
+#define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12)
+#define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
+#define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12)
+
+#define CMDQ_SYNC_0_CS GENMASK_ULL(13, 12)
+#define CMDQ_SYNC_0_CS_NONE 0
+#define CMDQ_SYNC_0_CS_IRQ 1
+#define CMDQ_SYNC_0_CS_SEV 2
+#define CMDQ_SYNC_0_MSH GENMASK_ULL(23, 22)
+#define CMDQ_SYNC_0_MSIATTR GENMASK_ULL(27, 24)
+#define CMDQ_SYNC_0_MSIDATA GENMASK_ULL(63, 32)
+#define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2)
/* Event queue */
#define EVTQ_ENT_DWORDS 4
#define EVTQ_MAX_SZ_SHIFT 7
-#define EVTQ_0_ID_SHIFT 0
-#define EVTQ_0_ID_MASK 0xffUL
+#define EVTQ_0_ID GENMASK_ULL(7, 0)
/* PRI queue */
#define PRIQ_ENT_DWORDS 2
#define PRIQ_MAX_SZ_SHIFT 8
-#define PRIQ_0_SID_SHIFT 0
-#define PRIQ_0_SID_MASK 0xffffffffUL
-#define PRIQ_0_SSID_SHIFT 32
-#define PRIQ_0_SSID_MASK 0xfffffUL
+#define PRIQ_0_SID GENMASK_ULL(31, 0)
+#define PRIQ_0_SSID GENMASK_ULL(51, 32)
#define PRIQ_0_PERM_PRIV (1UL << 58)
#define PRIQ_0_PERM_EXEC (1UL << 59)
#define PRIQ_0_PERM_READ (1UL << 60)
@@ -411,10 +355,8 @@
#define PRIQ_0_PRG_LAST (1UL << 62)
#define PRIQ_0_SSID_V (1UL << 63)
-#define PRIQ_1_PRG_IDX_SHIFT 0
-#define PRIQ_1_PRG_IDX_MASK 0x1ffUL
-#define PRIQ_1_ADDR_SHIFT 12
-#define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
+#define PRIQ_1_PRG_IDX GENMASK_ULL(8, 0)
+#define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12)
/* High-level queue structures */
#define ARM_SMMU_POLL_TIMEOUT_US 100
@@ -430,9 +372,9 @@ MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
enum pri_resp {
- PRI_RESP_DENY,
- PRI_RESP_FAIL,
- PRI_RESP_SUCC,
+ PRI_RESP_DENY = 0,
+ PRI_RESP_FAIL = 1,
+ PRI_RESP_SUCC = 2,
};
enum arm_smmu_msi_index {
@@ -611,6 +553,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_STALLS (1 << 11)
#define ARM_SMMU_FEAT_HYP (1 << 12)
#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
+#define ARM_SMMU_FEAT_VAX (1 << 14)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
@@ -836,67 +779,64 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
{
memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
- cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
+ cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode);
switch (ent->opcode) {
case CMDQ_OP_TLBI_EL2_ALL:
case CMDQ_OP_TLBI_NSNH_ALL:
break;
case CMDQ_OP_PREFETCH_CFG:
- cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
- cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
+ cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid);
+ cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size);
cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
break;
case CMDQ_OP_CFGI_STE:
- cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
- cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
+ cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid);
+ cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf);
break;
case CMDQ_OP_CFGI_ALL:
/* Cover the entire SID range */
- cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
+ cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
break;
case CMDQ_OP_TLBI_NH_VA:
- cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
- cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
break;
case CMDQ_OP_TLBI_S2_IPA:
- cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
- cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
break;
case CMDQ_OP_TLBI_NH_ASID:
- cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
/* Fallthrough */
case CMDQ_OP_TLBI_S12_VMALL:
- cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
break;
case CMDQ_OP_PRI_RESP:
- cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
- cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
- cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
- cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
+ cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid);
+ cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid);
+ cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid);
+ cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid);
switch (ent->pri.resp) {
case PRI_RESP_DENY:
- cmd[1] |= CMDQ_PRI_1_RESP_DENY;
- break;
case PRI_RESP_FAIL:
- cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
- break;
case PRI_RESP_SUCC:
- cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
break;
default:
return -EINVAL;
}
+ cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp);
break;
case CMDQ_OP_CMD_SYNC:
if (ent->sync.msiaddr)
- cmd[0] |= CMDQ_SYNC_0_CS_IRQ;
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ);
else
- cmd[0] |= CMDQ_SYNC_0_CS_SEV;
- cmd[0] |= CMDQ_SYNC_0_MSH_ISH | CMDQ_SYNC_0_MSIATTR_OIWB;
- cmd[0] |= (u64)ent->sync.msidata << CMDQ_SYNC_0_MSIDATA_SHIFT;
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata);
cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
break;
default:
@@ -918,7 +858,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
u64 cmd[CMDQ_ENT_DWORDS];
struct arm_smmu_queue *q = &smmu->cmdq.q;
u32 cons = readl_relaxed(q->cons_reg);
- u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
+ u32 idx = FIELD_GET(CMDQ_CONS_ERR, cons);
struct arm_smmu_cmdq_ent cmd_sync = {
.opcode = CMDQ_OP_CMD_SYNC,
};
@@ -1083,8 +1023,8 @@ static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
#ifdef __BIG_ENDIAN
CTXDESC_CD_0_ENDI |
#endif
- CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
- CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
+ CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
+ CTXDESC_CD_0_AA64 | FIELD_PREP(CTXDESC_CD_0_ASID, cfg->cd.asid) |
CTXDESC_CD_0_V;
/* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
@@ -1093,10 +1033,10 @@ static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
cfg->cdptr[0] = cpu_to_le64(val);
- val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
+ val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK;
cfg->cdptr[1] = cpu_to_le64(val);
- cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
+ cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair);
}
/* Stream table manipulation functions */
@@ -1105,10 +1045,8 @@ arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
{
u64 val = 0;
- val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
- << STRTAB_L1_DESC_SPAN_SHIFT;
- val |= desc->l2ptr_dma &
- STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
+ val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
+ val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
*dst = cpu_to_le64(val);
}
@@ -1156,10 +1094,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
};
if (val & STRTAB_STE_0_V) {
- u64 cfg;
-
- cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
- switch (cfg) {
+ switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
case STRTAB_STE_0_CFG_BYPASS:
break;
case STRTAB_STE_0_CFG_S1_TRANS:
@@ -1180,13 +1115,13 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
/* Bypass/fault */
if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
if (!ste->assigned && disable_bypass)
- val |= STRTAB_STE_0_CFG_ABORT;
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
else
- val |= STRTAB_STE_0_CFG_BYPASS;
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
dst[0] = cpu_to_le64(val);
- dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
- << STRTAB_STE_1_SHCFG_SHIFT);
+ dst[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
dst[2] = 0; /* Nuke the VMID */
/*
* The SMMU can perform negative caching, so we must sync
@@ -1200,41 +1135,36 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
if (ste->s1_cfg) {
BUG_ON(ste_live);
dst[1] = cpu_to_le64(
- STRTAB_STE_1_S1C_CACHE_WBRA
- << STRTAB_STE_1_S1CIR_SHIFT |
- STRTAB_STE_1_S1C_CACHE_WBRA
- << STRTAB_STE_1_S1COR_SHIFT |
- STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
+ FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+ FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+ FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
#ifdef CONFIG_PCI_ATS
- STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
+ FIELD_PREP(STRTAB_STE_1_EATS, STRTAB_STE_1_EATS_TRANS) |
#endif
- STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
+ FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
- val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
- << STRTAB_STE_0_S1CTXPTR_SHIFT) |
- STRTAB_STE_0_CFG_S1_TRANS;
+ val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS);
}
if (ste->s2_cfg) {
BUG_ON(ste_live);
dst[2] = cpu_to_le64(
- ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
- (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
- << STRTAB_STE_2_VTCR_SHIFT |
+ FIELD_PREP(STRTAB_STE_2_S2VMID, ste->s2_cfg->vmid) |
+ FIELD_PREP(STRTAB_STE_2_VTCR, ste->s2_cfg->vtcr) |
#ifdef __BIG_ENDIAN
STRTAB_STE_2_S2ENDI |
#endif
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
STRTAB_STE_2_S2R);
- dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
- STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
+ dst[3] = cpu_to_le64(ste->s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
- val |= STRTAB_STE_0_CFG_S2_TRANS;
+ val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
}
arm_smmu_sync_ste_for_sid(smmu, sid);
@@ -1295,7 +1225,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
do {
while (!queue_remove_raw(q, evt)) {
- u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
+ u8 id = FIELD_GET(EVTQ_0_ID, evt[0]);
dev_info(smmu->dev, "event 0x%02x received:\n", id);
for (i = 0; i < ARRAY_SIZE(evt); ++i)
@@ -1323,11 +1253,11 @@ static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
u16 grpid;
bool ssv, last;
- sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
- ssv = evt[0] & PRIQ_0_SSID_V;
- ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
- last = evt[0] & PRIQ_0_PRG_LAST;
- grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
+ sid = FIELD_GET(PRIQ_0_SID, evt[0]);
+ ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]);
+ ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : 0;
+ last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]);
+ grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]);
dev_info(smmu->dev, "unexpected PRI request received:\n");
dev_info(smmu->dev,
@@ -1337,7 +1267,7 @@ static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
evt[0] & PRIQ_0_PERM_READ ? "R" : "",
evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
- evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
+ evt[1] & PRIQ_1_ADDR_MASK);
if (last) {
struct arm_smmu_cmdq_ent cmd = {
@@ -1664,7 +1594,8 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1:
- ias = VA_BITS;
+ ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48;
+ ias = min_t(unsigned long, ias, VA_BITS);
oas = smmu->ias;
fmt = ARM_64_LPAE_S1;
finalise_stage_fn = arm_smmu_domain_finalise_s1;
@@ -1696,7 +1627,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
return -ENOMEM;
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
- domain->geometry.aperture_end = (1UL << ias) - 1;
+ domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
domain->geometry.force_aperture = true;
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
@@ -2102,9 +2033,8 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
q->ent_dwords = dwords;
q->q_base = Q_BASE_RWA;
- q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
- q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
- << Q_BASE_LOG2SIZE_SHIFT;
+ q->q_base |= q->base_dma & Q_BASE_ADDR_MASK;
+ q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->max_n_shift);
q->prod = q->cons = 0;
return 0;
@@ -2186,11 +2116,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
cfg->strtab = strtab;
/* Configure strtab_base_cfg for 2 levels */
- reg = STRTAB_BASE_CFG_FMT_2LVL;
- reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
- << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
- reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
- << STRTAB_BASE_CFG_SPLIT_SHIFT;
+ reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_2LVL);
+ reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, size);
+ reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT);
cfg->strtab_base_cfg = reg;
return arm_smmu_init_l1_strtab(smmu);
@@ -2216,9 +2144,8 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
cfg->num_l1_ents = 1 << smmu->sid_bits;
/* Configure strtab_base_cfg for a linear table covering all SIDs */
- reg = STRTAB_BASE_CFG_FMT_LINEAR;
- reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
- << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
+ reg = FIELD_PREP(STRTAB_BASE_CFG_FMT, STRTAB_BASE_CFG_FMT_LINEAR);
+ reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
@@ -2239,8 +2166,7 @@ static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
return ret;
/* Set the strtab base address */
- reg = smmu->strtab_cfg.strtab_dma &
- STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
+ reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK;
reg |= STRTAB_BASE_RA;
smmu->strtab_cfg.strtab_base = reg;
@@ -2303,11 +2229,11 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
- doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
+ doorbell &= MSI_CFG0_ADDR_MASK;
writeq_relaxed(doorbell, smmu->base + cfg[0]);
writel_relaxed(msg->data, smmu->base + cfg[1]);
- writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
+ writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
}
static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
@@ -2328,10 +2254,15 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
if (!(smmu->features & ARM_SMMU_FEAT_MSI))
return;
+ if (!dev->msi_domain) {
+ dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n");
+ return;
+ }
+
/* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
if (ret) {
- dev_warn(dev, "failed to allocate MSIs\n");
+ dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
return;
}
@@ -2370,6 +2301,8 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
"arm-smmu-v3-evtq", smmu);
if (ret < 0)
dev_warn(smmu->dev, "failed to enable evtq irq\n");
+ } else {
+ dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n");
}
irq = smmu->gerr_irq;
@@ -2378,6 +2311,8 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
0, "arm-smmu-v3-gerror", smmu);
if (ret < 0)
dev_warn(smmu->dev, "failed to enable gerror irq\n");
+ } else {
+ dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n");
}
if (smmu->features & ARM_SMMU_FEAT_PRI) {
@@ -2391,6 +2326,8 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
if (ret < 0)
dev_warn(smmu->dev,
"failed to enable priq irq\n");
+ } else {
+ dev_warn(smmu->dev, "no priq irq - PRI will be broken\n");
}
}
}
@@ -2463,12 +2400,12 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
return ret;
/* CR1 (table and queue memory attributes) */
- reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
- (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
- (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
- (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
- (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
- (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
+ reg = FIELD_PREP(CR1_TABLE_SH, ARM_SMMU_SH_ISH) |
+ FIELD_PREP(CR1_TABLE_OC, CR1_CACHE_WB) |
+ FIELD_PREP(CR1_TABLE_IC, CR1_CACHE_WB) |
+ FIELD_PREP(CR1_QUEUE_SH, ARM_SMMU_SH_ISH) |
+ FIELD_PREP(CR1_QUEUE_OC, CR1_CACHE_WB) |
+ FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB);
writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
/* CR2 (random crap) */
@@ -2578,7 +2515,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
/* 2-level structures */
- if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
+ if (FIELD_GET(IDR0_ST_LVL, reg) == IDR0_ST_LVL_2LVL)
smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
if (reg & IDR0_CD2L)
@@ -2589,7 +2526,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
* We currently require the same endianness as the CPU, but this
* could be changed later by adding a new IO_PGTABLE_QUIRK.
*/
- switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
+ switch (FIELD_GET(IDR0_TTENDIAN, reg)) {
case IDR0_TTENDIAN_MIXED:
smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
break;
@@ -2631,7 +2568,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
coherent ? "true" : "false");
- switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
+ switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
case IDR0_STALL_MODEL_FORCE:
smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
/* Fallthrough */
@@ -2651,7 +2588,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
}
/* We only support the AArch64 table format at present */
- switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
+ switch (FIELD_GET(IDR0_TTF, reg)) {
case IDR0_TTF_AARCH32_64:
smmu->ias = 40;
/* Fallthrough */
@@ -2674,22 +2611,22 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
}
/* Queue sizes, capped at 4k */
- smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
- reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
+ smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
+ FIELD_GET(IDR1_CMDQS, reg));
if (!smmu->cmdq.q.max_n_shift) {
/* Odd alignment restrictions on the base, so ignore for now */
dev_err(smmu->dev, "unit-length command queue not supported\n");
return -ENXIO;
}
- smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
- reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
- smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
- reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
+ smmu->evtq.q.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
+ FIELD_GET(IDR1_EVTQS, reg));
+ smmu->priq.q.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
+ FIELD_GET(IDR1_PRIQS, reg));
/* SID/SSID sizes */
- smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
- smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
+ smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg);
+ smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg);
/*
* If the SMMU supports fewer bits than would fill a single L2 stream
@@ -2702,8 +2639,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
/* Maximum number of outstanding stalls */
- smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
- & IDR5_STALL_MAX_MASK;
+ smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg);
/* Page sizes */
if (reg & IDR5_GRAN64K)
@@ -2713,13 +2649,12 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
if (reg & IDR5_GRAN4K)
smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
- if (arm_smmu_ops.pgsize_bitmap == -1UL)
- arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
- else
- arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
+ /* Input address size */
+ if (FIELD_GET(IDR5_VAX, reg) == IDR5_VAX_52_BIT)
+ smmu->features |= ARM_SMMU_FEAT_VAX;
/* Output address size */
- switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
+ switch (FIELD_GET(IDR5_OAS, reg)) {
case IDR5_OAS_32_BIT:
smmu->oas = 32;
break;
@@ -2735,6 +2670,10 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
case IDR5_OAS_44_BIT:
smmu->oas = 44;
break;
+ case IDR5_OAS_52_BIT:
+ smmu->oas = 52;
+ smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */
+ break;
default:
dev_info(smmu->dev,
"unknown output address size. Truncating to 48-bit\n");
@@ -2743,6 +2682,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->oas = 48;
}
+ if (arm_smmu_ops.pgsize_bitmap == -1UL)
+ arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
+ else
+ arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
+
/* Set the DMA mask for our table walker */
if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
dev_warn(smmu->dev,
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 25914d36c5ac..f05f3cf90756 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -19,6 +19,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi_iort.h>
#include <linux/device.h>
#include <linux/dma-iommu.h>
#include <linux/gfp.h>
@@ -167,13 +168,18 @@ EXPORT_SYMBOL(iommu_put_dma_cookie);
*
* IOMMU drivers can use this to implement their .get_resv_regions callback
* for general non-IOMMU-specific reservations. Currently, this covers host
- * bridge windows for PCI devices.
+ * bridge windows for PCI devices and GICv3 ITS region reservation on ACPI
+ * based ARM platforms that may require HW MSI reservation.
*/
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
struct pci_host_bridge *bridge;
struct resource_entry *window;
+ if (!is_of_node(dev->iommu_fwspec->iommu_fwnode) &&
+ iort_iommu_msi_get_resv_regions(dev, list) < 0)
+ return;
+
if (!dev_is_pci(dev))
return;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 9a7ffd13c7f0..accf58388bdb 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -806,7 +806,7 @@ int __init dmar_dev_scope_init(void)
return dmar_dev_scope_status;
}
-void dmar_register_bus_notifier(void)
+void __init dmar_register_bus_notifier(void)
{
bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index c5f4f7691b57..85879cfec52f 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1239,17 +1239,6 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
return phys;
}
-static struct iommu_group *get_device_iommu_group(struct device *dev)
-{
- struct iommu_group *group;
-
- group = iommu_group_get(dev);
- if (!group)
- group = iommu_group_alloc();
-
- return group;
-}
-
static int exynos_iommu_add_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
@@ -1345,7 +1334,7 @@ static const struct iommu_ops exynos_iommu_ops = {
.unmap = exynos_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = exynos_iommu_iova_to_phys,
- .device_group = get_device_iommu_group,
+ .device_group = generic_device_group,
.add_device = exynos_iommu_add_device,
.remove_device = exynos_iommu_remove_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 24d1b1b42013..749d8f235346 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -5043,7 +5043,6 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct page *freelist = NULL;
- struct intel_iommu *iommu;
unsigned long start_pfn, last_pfn;
unsigned int npages;
int iommu_id, level = 0;
@@ -5062,12 +5061,9 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
npages = last_pfn - start_pfn + 1;
- for_each_domain_iommu(iommu_id, dmar_domain) {
- iommu = g_iommus[iommu_id];
-
+ for_each_domain_iommu(iommu_id, dmar_domain)
iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
start_pfn, npages, !freelist, 0);
- }
dma_free_pagelist(freelist);
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 99bc9bd64b9e..e8cd984cf9c8 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -396,6 +396,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
pasid_max - 1, GFP_KERNEL);
if (ret < 0) {
kfree(svm);
+ kfree(sdev);
goto out;
}
svm->pasid = ret;
@@ -422,17 +423,13 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
iommu->pasid_table[svm->pasid].val = pasid_entry_val;
wmb();
- /* In caching mode, we still have to flush with PASID 0 when
- * a PASID table entry becomes present. Not entirely clear
- * *why* that would be the case — surely we could just issue
- * a flush with the PASID value that we've changed? The PASID
- * is the index into the table, after all. It's not like domain
- * IDs in the case of the equivalent context-entry change in
- * caching mode. And for that matter it's not entirely clear why
- * a VMM would be in the business of caching the PASID table
- * anyway. Surely that can be left entirely to the guest? */
+
+ /*
+ * Flush PASID cache when a PASID table entry becomes
+ * present.
+ */
if (cap_caching_mode(iommu->cap))
- intel_flush_pasid_dev(svm, sdev, 0);
+ intel_flush_pasid_dev(svm, sdev, svm->pasid);
}
list_add_rcu(&sdev->list, &svm->devs);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 2ca08dc9331c..10e4a3d11c02 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -357,8 +357,8 @@ static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl)
return false;
}
-static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long,
- size_t, int, arm_v7s_iopte *);
+static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long,
+ size_t, int, arm_v7s_iopte *);
static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
unsigned long iova, phys_addr_t paddr, int prot,
@@ -541,9 +541,10 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
return pte;
}
-static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
- unsigned long iova, size_t size,
- arm_v7s_iopte blk_pte, arm_v7s_iopte *ptep)
+static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
+ unsigned long iova, size_t size,
+ arm_v7s_iopte blk_pte,
+ arm_v7s_iopte *ptep)
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_v7s_iopte pte, *tablep;
@@ -584,9 +585,9 @@ static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
return size;
}
-static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
- unsigned long iova, size_t size, int lvl,
- arm_v7s_iopte *ptep)
+static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
+ unsigned long iova, size_t size, int lvl,
+ arm_v7s_iopte *ptep)
{
arm_v7s_iopte pte[ARM_V7S_CONT_PAGES];
struct io_pgtable *iop = &data->iop;
@@ -656,8 +657,8 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep);
}
-static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
- size_t size)
+static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 51e5c43caed1..39c2a056da21 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
#include <linux/atomic.h>
+#include <linux/bitops.h>
#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/sizes.h>
@@ -32,7 +33,7 @@
#include "io-pgtable.h"
-#define ARM_LPAE_MAX_ADDR_BITS 48
+#define ARM_LPAE_MAX_ADDR_BITS 52
#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
#define ARM_LPAE_MAX_LEVELS 4
@@ -86,6 +87,8 @@
#define ARM_LPAE_PTE_TYPE_TABLE 3
#define ARM_LPAE_PTE_TYPE_PAGE 3
+#define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
+
#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
@@ -159,6 +162,7 @@
#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
+#define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
@@ -170,9 +174,7 @@
#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
/* IOPTE accessors */
-#define iopte_deref(pte,d) \
- (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
- & ~(ARM_LPAE_GRANULE(d) - 1ULL)))
+#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
#define iopte_type(pte,l) \
(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
@@ -184,12 +186,6 @@
(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
-#define iopte_to_pfn(pte,d) \
- (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
-
-#define pfn_to_iopte(pfn,d) \
- (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
-
struct arm_lpae_io_pgtable {
struct io_pgtable iop;
@@ -203,6 +199,27 @@ struct arm_lpae_io_pgtable {
typedef u64 arm_lpae_iopte;
+static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
+ struct arm_lpae_io_pgtable *data)
+{
+ arm_lpae_iopte pte = paddr;
+
+ /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
+ return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
+}
+
+static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
+ struct arm_lpae_io_pgtable *data)
+{
+ u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
+
+ if (data->pg_shift < 16)
+ return paddr;
+
+ /* Rotate the packed high-order bits back to the top */
+ return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
+}
+
static bool selftest_running = false;
static dma_addr_t __arm_lpae_dma_addr(void *pages)
@@ -268,9 +285,9 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
__arm_lpae_sync_pte(ptep, cfg);
}
-static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
- unsigned long iova, size_t size, int lvl,
- arm_lpae_iopte *ptep);
+static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size, int lvl,
+ arm_lpae_iopte *ptep);
static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
phys_addr_t paddr, arm_lpae_iopte prot,
@@ -287,7 +304,7 @@ static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
pte |= ARM_LPAE_PTE_TYPE_BLOCK;
pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
- pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
+ pte |= paddr_to_iopte(paddr, data);
__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
}
@@ -506,10 +523,10 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
kfree(data);
}
-static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
- unsigned long iova, size_t size,
- arm_lpae_iopte blk_pte, int lvl,
- arm_lpae_iopte *ptep)
+static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size,
+ arm_lpae_iopte blk_pte, int lvl,
+ arm_lpae_iopte *ptep)
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte pte, *tablep;
@@ -528,7 +545,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
if (size == split_sz)
unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
- blk_paddr = iopte_to_pfn(blk_pte, data) << data->pg_shift;
+ blk_paddr = iopte_to_paddr(blk_pte, data);
pte = iopte_prot(blk_pte);
for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
@@ -560,9 +577,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
return size;
}
-static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
- unsigned long iova, size_t size, int lvl,
- arm_lpae_iopte *ptep)
+static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size, int lvl,
+ arm_lpae_iopte *ptep)
{
arm_lpae_iopte pte;
struct io_pgtable *iop = &data->iop;
@@ -606,8 +623,8 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
}
-static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
- size_t size)
+static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte *ptep = data->pgd;
@@ -652,12 +669,13 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
found_translation:
iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
- return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
+ return iopte_to_paddr(pte, data) | iova;
}
static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
{
- unsigned long granule;
+ unsigned long granule, page_sizes;
+ unsigned int max_addr_bits = 48;
/*
* We need to restrict the supported page sizes to match the
@@ -677,17 +695,24 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
switch (granule) {
case SZ_4K:
- cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ page_sizes = (SZ_4K | SZ_2M | SZ_1G);
break;
case SZ_16K:
- cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
+ page_sizes = (SZ_16K | SZ_32M);
break;
case SZ_64K:
- cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
+ max_addr_bits = 52;
+ page_sizes = (SZ_64K | SZ_512M);
+ if (cfg->oas > 48)
+ page_sizes |= 1ULL << 42; /* 4TB */
break;
default:
- cfg->pgsize_bitmap = 0;
+ page_sizes = 0;
}
+
+ cfg->pgsize_bitmap &= page_sizes;
+ cfg->ias = min(cfg->ias, max_addr_bits);
+ cfg->oas = min(cfg->oas, max_addr_bits);
}
static struct arm_lpae_io_pgtable *
@@ -784,6 +809,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
case 48:
reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
break;
+ case 52:
+ reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
default:
goto out_free_data;
}
@@ -891,6 +919,9 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
case 48:
reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
break;
+ case 52:
+ reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
default:
goto out_free_data;
}
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index cd2e1eafffe6..2df79093cad9 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -119,8 +119,8 @@ struct io_pgtable_cfg {
struct io_pgtable_ops {
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
- int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
- size_t size);
+ size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
};
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 69fef991c651..d2aa23202bb9 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1573,10 +1573,10 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
if (unlikely(ops->unmap == NULL ||
domain->pgsize_bitmap == 0UL))
- return -ENODEV;
+ return 0;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
- return -EINVAL;
+ return 0;
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
@@ -1589,7 +1589,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
if (!IS_ALIGNED(iova | size, min_pagesz)) {
pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
iova, size, min_pagesz);
- return -EINVAL;
+ return 0;
}
pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index f227d73e7bf6..f2832a10fcea 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -60,7 +60,7 @@
(((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
#define REG_MMU_IVRP_PADDR 0x114
-#define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31))
+
#define REG_MMU_VLD_PA_RNG 0x118
#define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
@@ -539,8 +539,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
- writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
- data->base + REG_MMU_IVRP_PADDR);
+ if (data->m4u_plat == M4U_MT8173)
+ regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
+ else
+ regval = lower_32_bits(data->protect_base) |
+ upper_32_bits(data->protect_base);
+ writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
+
if (data->enable_4GB && data->m4u_plat != M4U_MT8173) {
/*
* If 4GB mode is enabled, the validate PA range is from
@@ -695,6 +700,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
+ reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
clk_disable_unprepare(data->bclk);
return 0;
}
@@ -717,8 +723,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
- writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
- base + REG_MMU_IVRP_PADDR);
+ writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
if (data->m4u_dom)
writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
base + REG_MMU_PT_BASE_ADDR);
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index b4451a1c7c2f..778498b8633f 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -32,6 +32,7 @@ struct mtk_iommu_suspend_reg {
u32 ctrl_reg;
u32 int_control0;
u32 int_main_control;
+ u32 ivrp_paddr;
};
enum mtk_iommu_plat {
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 5a96fd14ac22..a7c2a973784f 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -417,20 +417,12 @@ static int mtk_iommu_create_mapping(struct device *dev,
m4udev->archdata.iommu = mtk_mapping;
}
- ret = arm_iommu_attach_device(dev, mtk_mapping);
- if (ret)
- goto err_release_mapping;
-
return 0;
-
-err_release_mapping:
- arm_iommu_release_mapping(mtk_mapping);
- m4udev->archdata.iommu = NULL;
- return ret;
}
static int mtk_iommu_add_device(struct device *dev)
{
+ struct dma_iommu_mapping *mtk_mapping;
struct of_phandle_args iommu_spec;
struct of_phandle_iterator it;
struct mtk_iommu_data *data;
@@ -451,15 +443,30 @@ static int mtk_iommu_add_device(struct device *dev)
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return -ENODEV; /* Not a iommu client device */
- data = dev->iommu_fwspec->iommu_priv;
- iommu_device_link(&data->iommu, dev);
-
- group = iommu_group_get_for_dev(dev);
+ /*
+ * This is a short-term bodge because the ARM DMA code doesn't
+ * understand multi-device groups, but we have to call into it
+ * successfully (and not just rely on a normal IOMMU API attach
+ * here) in order to set the correct DMA API ops on @dev.
+ */
+ group = iommu_group_alloc();
if (IS_ERR(group))
return PTR_ERR(group);
+ err = iommu_group_add_device(group, dev);
iommu_group_put(group);
- return 0;
+ if (err)
+ return err;
+
+ data = dev->iommu_fwspec->iommu_priv;
+ mtk_mapping = data->dev->archdata.iommu;
+ err = arm_iommu_attach_device(dev, mtk_mapping);
+ if (err) {
+ iommu_group_remove_device(dev);
+ return err;
+ }
+
+ return iommu_device_link(&data->iommu, dev);;
}
static void mtk_iommu_remove_device(struct device *dev)
@@ -476,24 +483,6 @@ static void mtk_iommu_remove_device(struct device *dev)
iommu_fwspec_free(dev);
}
-static struct iommu_group *mtk_iommu_device_group(struct device *dev)
-{
- struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
-
- if (!data)
- return ERR_PTR(-ENODEV);
-
- /* All the client devices are in the same m4u iommu-group */
- if (!data->m4u_group) {
- data->m4u_group = iommu_group_alloc();
- if (IS_ERR(data->m4u_group))
- dev_err(dev, "Failed to allocate M4U IOMMU group\n");
- } else {
- iommu_group_ref_get(data->m4u_group);
- }
- return data->m4u_group;
-}
-
static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
{
u32 regval;
@@ -546,7 +535,6 @@ static struct iommu_ops mtk_iommu_ops = {
.iova_to_phys = mtk_iommu_iova_to_phys,
.add_device = mtk_iommu_add_device,
.remove_device = mtk_iommu_remove_device,
- .device_group = mtk_iommu_device_group,
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
};
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index e135ab830ebf..c33b7b104e72 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1536,7 +1536,7 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev)
struct iommu_group *group = ERR_PTR(-EINVAL);
if (arch_data->iommu_dev)
- group = arch_data->iommu_dev->group;
+ group = iommu_group_ref_get(arch_data->iommu_dev->group);
return group;
}
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 9d991c2d8767..5fc8656c60f9 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -4,6 +4,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -13,13 +14,15 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iommu.h>
-#include <linux/jiffies.h>
+#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -36,7 +39,10 @@
#define RK_MMU_AUTO_GATING 0x24
#define DTE_ADDR_DUMMY 0xCAFEBABE
-#define FORCE_RESET_TIMEOUT 100 /* ms */
+
+#define RK_MMU_POLL_PERIOD_US 100
+#define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
+#define RK_MMU_POLL_TIMEOUT_US 1000
/* RK_MMU_STATUS fields */
#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
@@ -73,11 +79,8 @@
*/
#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
-#define IOMMU_REG_POLL_COUNT_FAST 1000
-
struct rk_iommu_domain {
struct list_head iommus;
- struct platform_device *pdev;
u32 *dt; /* page directory table */
dma_addr_t dt_dma;
spinlock_t iommus_lock; /* lock for iommus list */
@@ -86,24 +89,37 @@ struct rk_iommu_domain {
struct iommu_domain domain;
};
+/* list of clocks required by IOMMU */
+static const char * const rk_iommu_clocks[] = {
+ "aclk", "iface",
+};
+
struct rk_iommu {
struct device *dev;
void __iomem **bases;
int num_mmu;
- int *irq;
- int num_irq;
+ struct clk_bulk_data *clocks;
+ int num_clocks;
bool reset_disabled;
struct iommu_device iommu;
struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */
+ struct iommu_group *group;
+};
+
+struct rk_iommudata {
+ struct device_link *link; /* runtime PM link from IOMMU to master */
+ struct rk_iommu *iommu;
};
+static struct device *dma_dev;
+
static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
unsigned int count)
{
size_t size = count * sizeof(u32); /* count of u32 entry */
- dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE);
+ dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
}
static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
@@ -111,27 +127,6 @@ static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
return container_of(dom, struct rk_iommu_domain, domain);
}
-/**
- * Inspired by _wait_for in intel_drv.h
- * This is NOT safe for use in interrupt context.
- *
- * Note that it's important that we check the condition again after having
- * timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
- */
-#define rk_wait_for(COND, MS) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- ret__ = (COND) ? 0 : -ETIMEDOUT; \
- break; \
- } \
- usleep_range(50, 100); \
- } \
- ret__; \
-})
-
/*
* The Rockchip rk3288 iommu uses a 2-level page table.
* The first level is the "Directory Table" (DT).
@@ -296,19 +291,21 @@ static void rk_iommu_base_command(void __iomem *base, u32 command)
{
writel(command, base + RK_MMU_COMMAND);
}
-static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
+static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
size_t size)
{
int i;
-
- dma_addr_t iova_end = iova + size;
+ dma_addr_t iova_end = iova_start + size;
/*
* TODO(djkurtz): Figure out when it is more efficient to shootdown the
* entire iotlb rather than iterate over individual iovas.
*/
- for (i = 0; i < iommu->num_mmu; i++)
- for (; iova < iova_end; iova += SPAGE_SIZE)
+ for (i = 0; i < iommu->num_mmu; i++) {
+ dma_addr_t iova;
+
+ for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
+ }
}
static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
@@ -335,9 +332,21 @@ static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
return enable;
}
+static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
+{
+ bool done = true;
+ int i;
+
+ for (i = 0; i < iommu->num_mmu; i++)
+ done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
+
+ return done;
+}
+
static int rk_iommu_enable_stall(struct rk_iommu *iommu)
{
int ret, i;
+ bool val;
if (rk_iommu_is_stall_active(iommu))
return 0;
@@ -348,7 +357,9 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
- ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
+ ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
+ val, RK_MMU_POLL_PERIOD_US,
+ RK_MMU_POLL_TIMEOUT_US);
if (ret)
for (i = 0; i < iommu->num_mmu; i++)
dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
@@ -360,13 +371,16 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
static int rk_iommu_disable_stall(struct rk_iommu *iommu)
{
int ret, i;
+ bool val;
if (!rk_iommu_is_stall_active(iommu))
return 0;
rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
- ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
+ ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
+ !val, RK_MMU_POLL_PERIOD_US,
+ RK_MMU_POLL_TIMEOUT_US);
if (ret)
for (i = 0; i < iommu->num_mmu; i++)
dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
@@ -378,13 +392,16 @@ static int rk_iommu_disable_stall(struct rk_iommu *iommu)
static int rk_iommu_enable_paging(struct rk_iommu *iommu)
{
int ret, i;
+ bool val;
if (rk_iommu_is_paging_enabled(iommu))
return 0;
rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
- ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
+ ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
+ val, RK_MMU_POLL_PERIOD_US,
+ RK_MMU_POLL_TIMEOUT_US);
if (ret)
for (i = 0; i < iommu->num_mmu; i++)
dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
@@ -396,13 +413,16 @@ static int rk_iommu_enable_paging(struct rk_iommu *iommu)
static int rk_iommu_disable_paging(struct rk_iommu *iommu)
{
int ret, i;
+ bool val;
if (!rk_iommu_is_paging_enabled(iommu))
return 0;
rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
- ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
+ ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
+ !val, RK_MMU_POLL_PERIOD_US,
+ RK_MMU_POLL_TIMEOUT_US);
if (ret)
for (i = 0; i < iommu->num_mmu; i++)
dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
@@ -415,6 +435,7 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
{
int ret, i;
u32 dte_addr;
+ bool val;
if (iommu->reset_disabled)
return 0;
@@ -435,13 +456,12 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
- for (i = 0; i < iommu->num_mmu; i++) {
- ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
- FORCE_RESET_TIMEOUT);
- if (ret) {
- dev_err(iommu->dev, "FORCE_RESET command timed out\n");
- return ret;
- }
+ ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
+ val, RK_MMU_FORCE_RESET_TIMEOUT_US,
+ RK_MMU_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+ return ret;
}
return 0;
@@ -503,6 +523,12 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
int i;
+ if (WARN_ON(!pm_runtime_get_if_in_use(iommu->dev)))
+ return 0;
+
+ if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
+ goto out;
+
for (i = 0; i < iommu->num_mmu; i++) {
int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
if (int_status == 0)
@@ -549,6 +575,10 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
}
+ clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+
+out:
+ pm_runtime_put(iommu->dev);
return ret;
}
@@ -590,8 +620,17 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
list_for_each(pos, &rk_domain->iommus) {
struct rk_iommu *iommu;
+
iommu = list_entry(pos, struct rk_iommu, node);
- rk_iommu_zap_lines(iommu, iova, size);
+
+ /* Only zap TLBs of IOMMUs that are powered on. */
+ if (pm_runtime_get_if_in_use(iommu->dev)) {
+ WARN_ON(clk_bulk_enable(iommu->num_clocks,
+ iommu->clocks));
+ rk_iommu_zap_lines(iommu, iova, size);
+ clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+ pm_runtime_put(iommu->dev);
+ }
}
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
}
@@ -608,7 +647,6 @@ static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
dma_addr_t iova)
{
- struct device *dev = &rk_domain->pdev->dev;
u32 *page_table, *dte_addr;
u32 dte_index, dte;
phys_addr_t pt_phys;
@@ -626,9 +664,9 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (!page_table)
return ERR_PTR(-ENOMEM);
- pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, pt_dma)) {
- dev_err(dev, "DMA mapping error while allocating page table\n");
+ pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, pt_dma)) {
+ dev_err(dma_dev, "DMA mapping error while allocating page table\n");
free_page((unsigned long)page_table);
return ERR_PTR(-ENOMEM);
}
@@ -790,52 +828,46 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
{
- struct iommu_group *group;
- struct device *iommu_dev;
- struct rk_iommu *rk_iommu;
+ struct rk_iommudata *data = dev->archdata.iommu;
- group = iommu_group_get(dev);
- if (!group)
- return NULL;
- iommu_dev = iommu_group_get_iommudata(group);
- rk_iommu = dev_get_drvdata(iommu_dev);
- iommu_group_put(group);
+ return data ? data->iommu : NULL;
+}
+
+/* Must be called with iommu powered on and attached */
+static void rk_iommu_disable(struct rk_iommu *iommu)
+{
+ int i;
- return rk_iommu;
+ /* Ignore error while disabling, just keep going */
+ WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
+ rk_iommu_enable_stall(iommu);
+ rk_iommu_disable_paging(iommu);
+ for (i = 0; i < iommu->num_mmu; i++) {
+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
+ }
+ rk_iommu_disable_stall(iommu);
+ clk_bulk_disable(iommu->num_clocks, iommu->clocks);
}
-static int rk_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+/* Must be called with iommu powered on and attached */
+static int rk_iommu_enable(struct rk_iommu *iommu)
{
- struct rk_iommu *iommu;
+ struct iommu_domain *domain = iommu->domain;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
- unsigned long flags;
int ret, i;
- /*
- * Allow 'virtual devices' (e.g., drm) to attach to domain.
- * Such a device does not belong to an iommu group.
- */
- iommu = rk_iommu_from_dev(dev);
- if (!iommu)
- return 0;
+ ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
+ if (ret)
+ return ret;
ret = rk_iommu_enable_stall(iommu);
if (ret)
- return ret;
+ goto out_disable_clocks;
ret = rk_iommu_force_reset(iommu);
if (ret)
- return ret;
-
- iommu->domain = domain;
-
- for (i = 0; i < iommu->num_irq; i++) {
- ret = devm_request_irq(iommu->dev, iommu->irq[i], rk_iommu_irq,
- IRQF_SHARED, dev_name(dev), iommu);
- if (ret)
- return ret;
- }
+ goto out_disable_stall;
for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
@@ -845,18 +877,12 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
}
ret = rk_iommu_enable_paging(iommu);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&rk_domain->iommus_lock, flags);
- list_add_tail(&iommu->node, &rk_domain->iommus);
- spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
-
- dev_dbg(dev, "Attached to iommu domain\n");
+out_disable_stall:
rk_iommu_disable_stall(iommu);
-
- return 0;
+out_disable_clocks:
+ clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+ return ret;
}
static void rk_iommu_detach_device(struct iommu_domain *domain,
@@ -865,60 +891,90 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
- int i;
/* Allow 'virtual devices' (eg drm) to detach from domain */
iommu = rk_iommu_from_dev(dev);
if (!iommu)
return;
+ dev_dbg(dev, "Detaching from iommu domain\n");
+
+ /* iommu already detached */
+ if (iommu->domain != domain)
+ return;
+
+ iommu->domain = NULL;
+
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
list_del_init(&iommu->node);
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
- /* Ignore error while disabling, just keep going */
- rk_iommu_enable_stall(iommu);
- rk_iommu_disable_paging(iommu);
- for (i = 0; i < iommu->num_mmu; i++) {
- rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
+ if (pm_runtime_get_if_in_use(iommu->dev)) {
+ rk_iommu_disable(iommu);
+ pm_runtime_put(iommu->dev);
}
- rk_iommu_disable_stall(iommu);
+}
- for (i = 0; i < iommu->num_irq; i++)
- devm_free_irq(iommu->dev, iommu->irq[i], iommu);
+static int rk_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ unsigned long flags;
+ int ret;
- iommu->domain = NULL;
+ /*
+ * Allow 'virtual devices' (e.g., drm) to attach to domain.
+ * Such a device does not belong to an iommu group.
+ */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return 0;
+
+ dev_dbg(dev, "Attaching to iommu domain\n");
+
+ /* iommu already attached */
+ if (iommu->domain == domain)
+ return 0;
- dev_dbg(dev, "Detached from iommu domain\n");
+ if (iommu->domain)
+ rk_iommu_detach_device(iommu->domain, dev);
+
+ iommu->domain = domain;
+
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_add_tail(&iommu->node, &rk_domain->iommus);
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+ if (!pm_runtime_get_if_in_use(iommu->dev))
+ return 0;
+
+ ret = rk_iommu_enable(iommu);
+ if (ret)
+ rk_iommu_detach_device(iommu->domain, dev);
+
+ pm_runtime_put(iommu->dev);
+
+ return ret;
}
static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
{
struct rk_iommu_domain *rk_domain;
- struct platform_device *pdev;
- struct device *iommu_dev;
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
return NULL;
- /* Register a pdev per domain, so DMA API can base on this *dev
- * even some virtual master doesn't have an iommu slave
- */
- pdev = platform_device_register_simple("rk_iommu_domain",
- PLATFORM_DEVID_AUTO, NULL, 0);
- if (IS_ERR(pdev))
+ if (!dma_dev)
return NULL;
- rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL);
+ rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
- goto err_unreg_pdev;
-
- rk_domain->pdev = pdev;
+ return NULL;
if (type == IOMMU_DOMAIN_DMA &&
iommu_get_dma_cookie(&rk_domain->domain))
- goto err_unreg_pdev;
+ return NULL;
/*
* rk32xx iommus use a 2 level pagetable.
@@ -929,11 +985,10 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
if (!rk_domain->dt)
goto err_put_cookie;
- iommu_dev = &pdev->dev;
- rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt,
+ rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
SPAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) {
- dev_err(iommu_dev, "DMA map error for DT\n");
+ if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
+ dev_err(dma_dev, "DMA map error for DT\n");
goto err_free_dt;
}
@@ -954,8 +1009,6 @@ err_free_dt:
err_put_cookie:
if (type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
-err_unreg_pdev:
- platform_device_unregister(pdev);
return NULL;
}
@@ -972,126 +1025,82 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
if (rk_dte_is_pt_valid(dte)) {
phys_addr_t pt_phys = rk_dte_pt_address(dte);
u32 *page_table = phys_to_virt(pt_phys);
- dma_unmap_single(&rk_domain->pdev->dev, pt_phys,
+ dma_unmap_single(dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
free_page((unsigned long)page_table);
}
}
- dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma,
+ dma_unmap_single(dma_dev, rk_domain->dt_dma,
SPAGE_SIZE, DMA_TO_DEVICE);
free_page((unsigned long)rk_domain->dt);
if (domain->type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
-
- platform_device_unregister(rk_domain->pdev);
}
-static bool rk_iommu_is_dev_iommu_master(struct device *dev)
+static int rk_iommu_add_device(struct device *dev)
{
- struct device_node *np = dev->of_node;
- int ret;
-
- /*
- * An iommu master has an iommus property containing a list of phandles
- * to iommu nodes, each with an #iommu-cells property with value 0.
- */
- ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
- return (ret > 0);
-}
+ struct iommu_group *group;
+ struct rk_iommu *iommu;
+ struct rk_iommudata *data;
-static int rk_iommu_group_set_iommudata(struct iommu_group *group,
- struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct platform_device *pd;
- int ret;
- struct of_phandle_args args;
+ data = dev->archdata.iommu;
+ if (!data)
+ return -ENODEV;
- /*
- * An iommu master has an iommus property containing a list of phandles
- * to iommu nodes, each with an #iommu-cells property with value 0.
- */
- ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
- &args);
- if (ret) {
- dev_err(dev, "of_parse_phandle_with_args(%pOF) => %d\n",
- np, ret);
- return ret;
- }
- if (args.args_count != 0) {
- dev_err(dev, "incorrect number of iommu params found for %pOF (found %d, expected 0)\n",
- args.np, args.args_count);
- return -EINVAL;
- }
+ iommu = rk_iommu_from_dev(dev);
- pd = of_find_device_by_node(args.np);
- of_node_put(args.np);
- if (!pd) {
- dev_err(dev, "iommu %pOF not found\n", args.np);
- return -EPROBE_DEFER;
- }
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+ iommu_group_put(group);
- /* TODO(djkurtz): handle multiple slave iommus for a single master */
- iommu_group_set_iommudata(group, &pd->dev, NULL);
+ iommu_device_link(&iommu->iommu, dev);
+ data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME);
return 0;
}
-static int rk_iommu_add_device(struct device *dev)
+static void rk_iommu_remove_device(struct device *dev)
{
- struct iommu_group *group;
struct rk_iommu *iommu;
- int ret;
-
- if (!rk_iommu_is_dev_iommu_master(dev))
- return -ENODEV;
+ struct rk_iommudata *data = dev->archdata.iommu;
- group = iommu_group_get(dev);
- if (!group) {
- group = iommu_group_alloc();
- if (IS_ERR(group)) {
- dev_err(dev, "Failed to allocate IOMMU group\n");
- return PTR_ERR(group);
- }
- }
+ iommu = rk_iommu_from_dev(dev);
- ret = iommu_group_add_device(group, dev);
- if (ret)
- goto err_put_group;
+ device_link_del(data->link);
+ iommu_device_unlink(&iommu->iommu, dev);
+ iommu_group_remove_device(dev);
+}
- ret = rk_iommu_group_set_iommudata(group, dev);
- if (ret)
- goto err_remove_device;
+static struct iommu_group *rk_iommu_device_group(struct device *dev)
+{
+ struct rk_iommu *iommu;
iommu = rk_iommu_from_dev(dev);
- if (iommu)
- iommu_device_link(&iommu->iommu, dev);
-
- iommu_group_put(group);
-
- return 0;
-err_remove_device:
- iommu_group_remove_device(dev);
-err_put_group:
- iommu_group_put(group);
- return ret;
+ return iommu_group_ref_get(iommu->group);
}
-static void rk_iommu_remove_device(struct device *dev)
+static int rk_iommu_of_xlate(struct device *dev,
+ struct of_phandle_args *args)
{
- struct rk_iommu *iommu;
+ struct platform_device *iommu_dev;
+ struct rk_iommudata *data;
- if (!rk_iommu_is_dev_iommu_master(dev))
- return;
+ data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- iommu = rk_iommu_from_dev(dev);
- if (iommu)
- iommu_device_unlink(&iommu->iommu, dev);
+ iommu_dev = of_find_device_by_node(args->np);
- iommu_group_remove_device(dev);
+ data->iommu = platform_get_drvdata(iommu_dev);
+ dev->archdata.iommu = data;
+
+ of_dev_put(iommu_dev);
+
+ return 0;
}
static const struct iommu_ops rk_iommu_ops = {
@@ -1105,31 +1114,9 @@ static const struct iommu_ops rk_iommu_ops = {
.add_device = rk_iommu_add_device,
.remove_device = rk_iommu_remove_device,
.iova_to_phys = rk_iommu_iova_to_phys,
+ .device_group = rk_iommu_device_group,
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
-};
-
-static int rk_iommu_domain_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
-
- dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
-
- /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */
- arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false);
-
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
- dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
-
- return 0;
-}
-
-static struct platform_driver rk_iommu_domain_driver = {
- .probe = rk_iommu_domain_probe,
- .driver = {
- .name = "rk_iommu_domain",
- },
+ .of_xlate = rk_iommu_of_xlate,
};
static int rk_iommu_probe(struct platform_device *pdev)
@@ -1138,7 +1125,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct rk_iommu *iommu;
struct resource *res;
int num_res = pdev->num_resources;
- int err, i;
+ int err, i, irq;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -1165,50 +1152,108 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (iommu->num_mmu == 0)
return PTR_ERR(iommu->bases[0]);
- iommu->num_irq = platform_irq_count(pdev);
- if (iommu->num_irq < 0)
- return iommu->num_irq;
- if (iommu->num_irq == 0)
- return -ENXIO;
-
- iommu->irq = devm_kcalloc(dev, iommu->num_irq, sizeof(*iommu->irq),
- GFP_KERNEL);
- if (!iommu->irq)
- return -ENOMEM;
+ i = 0;
+ while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
+ if (irq < 0)
+ return irq;
- for (i = 0; i < iommu->num_irq; i++) {
- iommu->irq[i] = platform_get_irq(pdev, i);
- if (iommu->irq[i] < 0) {
- dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq[i]);
- return -ENXIO;
- }
+ err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
+ IRQF_SHARED, dev_name(dev), iommu);
+ if (err)
+ return err;
}
iommu->reset_disabled = device_property_read_bool(dev,
"rockchip,disable-mmu-reset");
- err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
+ iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
+ iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
+ sizeof(*iommu->clocks), GFP_KERNEL);
+ if (!iommu->clocks)
+ return -ENOMEM;
+
+ for (i = 0; i < iommu->num_clocks; ++i)
+ iommu->clocks[i].id = rk_iommu_clocks[i];
+
+ err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
+ if (err)
+ return err;
+
+ err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
if (err)
return err;
+ iommu->group = iommu_group_alloc();
+ if (IS_ERR(iommu->group)) {
+ err = PTR_ERR(iommu->group);
+ goto err_unprepare_clocks;
+ }
+
+ err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
+ if (err)
+ goto err_put_group;
+
iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
+ iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
+
err = iommu_device_register(&iommu->iommu);
+ if (err)
+ goto err_remove_sysfs;
+
+ /*
+ * Use the first registered IOMMU device for domain to use with DMA
+ * API, since a domain might not physically correspond to a single
+ * IOMMU device..
+ */
+ if (!dma_dev)
+ dma_dev = &pdev->dev;
+
+ bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
+ pm_runtime_enable(dev);
+
+ return 0;
+err_remove_sysfs:
+ iommu_device_sysfs_remove(&iommu->iommu);
+err_put_group:
+ iommu_group_put(iommu->group);
+err_unprepare_clocks:
+ clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
return err;
}
-static int rk_iommu_remove(struct platform_device *pdev)
+static void rk_iommu_shutdown(struct platform_device *pdev)
{
- struct rk_iommu *iommu = platform_get_drvdata(pdev);
+ pm_runtime_force_suspend(&pdev->dev);
+}
- if (iommu) {
- iommu_device_sysfs_remove(&iommu->iommu);
- iommu_device_unregister(&iommu->iommu);
- }
+static int __maybe_unused rk_iommu_suspend(struct device *dev)
+{
+ struct rk_iommu *iommu = dev_get_drvdata(dev);
+ if (!iommu->domain)
+ return 0;
+
+ rk_iommu_disable(iommu);
return 0;
}
+static int __maybe_unused rk_iommu_resume(struct device *dev)
+{
+ struct rk_iommu *iommu = dev_get_drvdata(dev);
+
+ if (!iommu->domain)
+ return 0;
+
+ return rk_iommu_enable(iommu);
+}
+
+static const struct dev_pm_ops rk_iommu_pm_ops = {
+ SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
static const struct of_device_id rk_iommu_dt_ids[] = {
{ .compatible = "rockchip,iommu" },
{ /* sentinel */ }
@@ -1217,45 +1262,22 @@ MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
static struct platform_driver rk_iommu_driver = {
.probe = rk_iommu_probe,
- .remove = rk_iommu_remove,
+ .shutdown = rk_iommu_shutdown,
.driver = {
.name = "rk_iommu",
.of_match_table = rk_iommu_dt_ids,
+ .pm = &rk_iommu_pm_ops,
+ .suppress_bind_attrs = true,
},
};
static int __init rk_iommu_init(void)
{
- struct device_node *np;
- int ret;
-
- np = of_find_matching_node(NULL, rk_iommu_dt_ids);
- if (!np)
- return 0;
-
- of_node_put(np);
-
- ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
- if (ret)
- return ret;
-
- ret = platform_driver_register(&rk_iommu_domain_driver);
- if (ret)
- return ret;
-
- ret = platform_driver_register(&rk_iommu_driver);
- if (ret)
- platform_driver_unregister(&rk_iommu_domain_driver);
- return ret;
+ return platform_driver_register(&rk_iommu_driver);
}
-static void __exit rk_iommu_exit(void)
-{
- platform_driver_unregister(&rk_iommu_driver);
- platform_driver_unregister(&rk_iommu_domain_driver);
-}
-
subsys_initcall(rk_iommu_init);
-module_exit(rk_iommu_exit);
+
+IOMMU_OF_DECLARE(rk_iommu_of, "rockchip,iommu");
MODULE_DESCRIPTION("IOMMU API for Rockchip");
MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 2982e93d2369..5416f2b2ac21 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3612,7 +3612,8 @@ static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
return -ENOMEM;
}
- err = iort_register_domain_token(its_entry->translation_id, dom_handle);
+ err = iort_register_domain_token(its_entry->translation_id, res.start,
+ dom_handle);
if (err) {
pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
&res.start, its_entry->translation_id);
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index e4fafdd96e5e..eafd06f62a3a 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -305,8 +305,8 @@ static void cros_ec_sensors_register(struct cros_ec_dev *ec)
resp = (struct ec_response_motion_sense *)msg->data;
sensor_num = resp->dump.sensor_count;
- /* Allocate 2 extra sensors in case lid angle or FIFO are needed */
- sensor_cells = kzalloc(sizeof(struct mfd_cell) * (sensor_num + 2),
+ /* Allocate 1 extra sensors in FIFO are needed */
+ sensor_cells = kzalloc(sizeof(struct mfd_cell) * (sensor_num + 1),
GFP_KERNEL);
if (sensor_cells == NULL)
goto error;
@@ -362,16 +362,10 @@ static void cros_ec_sensors_register(struct cros_ec_dev *ec)
sensor_type[resp->info.type]++;
id++;
}
- if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2) {
- sensor_platforms[id].sensor_num = sensor_num;
- sensor_cells[id].name = "cros-ec-angle";
- sensor_cells[id].id = 0;
- sensor_cells[id].platform_data = &sensor_platforms[id];
- sensor_cells[id].pdata_size =
- sizeof(struct cros_ec_sensor_platform);
- id++;
- }
+ if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2)
+ ec->has_kb_wake_angle = true;
+
if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
sensor_cells[id].name = "cros-ec-ring";
id++;
@@ -424,6 +418,14 @@ static int ec_device_probe(struct platform_device *pdev)
goto failed;
}
+ /* check whether this EC is a sensor hub. */
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
+ cros_ec_sensors_register(ec);
+
+ /* Take control of the lightbar from the EC. */
+ lb_manual_suspend_ctrl(ec, 1);
+
+ /* We can now add the sysfs class, we know which parameter to show */
retval = cdev_device_add(&ec->cdev, &ec->class_dev);
if (retval) {
dev_err(dev, "cdev_device_add failed => %d\n", retval);
@@ -433,13 +435,6 @@ static int ec_device_probe(struct platform_device *pdev)
if (cros_ec_debugfs_init(ec))
dev_warn(dev, "failed to create debugfs directory\n");
- /* check whether this EC is a sensor hub. */
- if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
- cros_ec_sensors_register(ec);
-
- /* Take control of the lightbar from the EC. */
- lb_manual_suspend_ctrl(ec, 1);
-
return 0;
failed:
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 24108bfad889..6193270e7b3d 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -400,10 +400,14 @@ static void skip_back_repeat_test(char *arg)
int go_back = simple_strtol(arg, NULL, 10);
repeat_test--;
- if (repeat_test <= 0)
+ if (repeat_test <= 0) {
ts.idx++;
- else
+ } else {
+ if (repeat_test % 100 == 0)
+ v1printk("kgdbts:RUN ... %d remaining\n", repeat_test);
+
ts.idx -= go_back;
+ }
fill_get_buf(ts.tst[ts.idx].get);
}
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 02485e310c81..9e923cd1d80e 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -3080,6 +3080,7 @@ static void __exit mmc_blk_exit(void)
mmc_unregister_driver(&mmc_driver);
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
+ bus_unregister(&mmc_rpmb_bus_type);
}
module_init(mmc_blk_init);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 712e08d9a45e..a0168e9e4fce 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -362,9 +362,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
host->irq_mask &= ~irq;
else
host->irq_mask |= irq;
- spin_unlock_irqrestore(&host->lock, flags);
writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
+ spin_unlock_irqrestore(&host->lock, flags);
}
static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index e30df9ad8197..308029930304 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -913,7 +913,7 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
host->check_scc_error(host);
/* If SET_BLOCK_COUNT, continue with main command */
- if (host->mrq) {
+ if (host->mrq && !mrq->cmd->error) {
tmio_process_mrq(host, mrq);
return;
}
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index b1fc28f63882..d0b63bbf46a7 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -244,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
* in any case.
*/
if (mode & FMODE_WRITE) {
- ret = -EPERM;
+ ret = -EROFS;
goto out_unlock;
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index e941395de3ae..753494e042d5 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -854,6 +854,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
+ /*
+ * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
+ * MLC NAND is different and needs special care, otherwise UBI or UBIFS
+ * will die soon and you will lose all your data.
+ */
+ if (mtd->type == MTD_MLCNANDFLASH) {
+ pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
+ mtd->index);
+ return -EINVAL;
+ }
+
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 590d967011bb..98f7d6be8d1f 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -362,7 +362,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
{
int i;
- flush_work(&ubi->fm_work);
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index c96a92118b8b..32f6d2e24d66 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -951,9 +951,11 @@ void aq_nic_shutdown(struct aq_nic_s *self)
netif_device_detach(self->ndev);
- err = aq_nic_stop(self);
- if (err < 0)
- goto err_exit;
+ if (netif_running(self->ndev)) {
+ err = aq_nic_stop(self);
+ if (err < 0)
+ goto err_exit;
+ }
aq_nic_deinit(self);
err_exit:
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 84d7f4dd4ce1..e652d86b87d4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -48,6 +48,8 @@
#define FORCE_FLASHLESS 0
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state);
int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
{
@@ -247,6 +249,20 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
self->rbl_enabled = (boot_exit_code != 0);
+ /* FW 1.x may bootup in an invalid POWER state (WOL feature).
+ * We should work around this by forcing its state back to DEINIT
+ */
+ if (!hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
+ aq_hw_read_reg(self,
+ HW_ATL_MPI_FW_VERSION))) {
+ int err = 0;
+
+ hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
+ HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
+ 10, 1000U);
+ }
+
if (self->rbl_enabled)
return hw_atl_utils_soft_reset_rbl(self);
else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1991f0c7bc0e..f83769d8047b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6090,7 +6090,7 @@ static void bnxt_free_irq(struct bnxt *bp)
free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
bp->dev->rx_cpu_rmap = NULL;
#endif
- if (!bp->irq_tbl)
+ if (!bp->irq_tbl || !bp->bnapi)
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -7686,6 +7686,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
+ if (bp->flags & BNXT_FLAG_NEW_RM)
+ cp += bnxt_get_ulp_msix_num(bp);
return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
vnics);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8d8ccd67e0e2..1f622ca2a64f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -870,17 +870,22 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct bnxt *bp = netdev_priv(dev);
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic;
int i = 0;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
- if (indir)
+ if (!bp->vnic_info)
+ return 0;
+
+ vnic = &bp->vnic_info[0];
+ if (indir && vnic->rss_table) {
for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
indir[i] = le16_to_cpu(vnic->rss_table[i]);
+ }
- if (key)
+ if (key && vnic->rss_hash_key)
memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 65c2cee35766..795f45024c20 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -377,6 +377,30 @@ static bool is_wildcard(void *mask, int len)
return true;
}
+static bool is_exactmatch(void *mask, int len)
+{
+ const u8 *p = mask;
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (p[i] != 0xff)
+ return false;
+
+ return true;
+}
+
+static bool bits_set(void *key, int len)
+{
+ const u8 *p = key;
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (p[i] != 0)
+ return true;
+
+ return false;
+}
+
static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
__le16 ref_flow_handle,
__le32 tunnel_handle, __le16 *flow_handle)
@@ -764,6 +788,41 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
return false;
}
+ /* Currently source/dest MAC cannot be partial wildcard */
+ if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
+ !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
+ netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
+ return false;
+ }
+ if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
+ !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
+ netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
+ return false;
+ }
+
+ /* Currently VLAN fields cannot be partial wildcard */
+ if (bits_set(&flow->l2_key.inner_vlan_tci,
+ sizeof(flow->l2_key.inner_vlan_tci)) &&
+ !is_exactmatch(&flow->l2_mask.inner_vlan_tci,
+ sizeof(flow->l2_mask.inner_vlan_tci))) {
+ netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n");
+ return false;
+ }
+ if (bits_set(&flow->l2_key.inner_vlan_tpid,
+ sizeof(flow->l2_key.inner_vlan_tpid)) &&
+ !is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
+ sizeof(flow->l2_mask.inner_vlan_tpid))) {
+ netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
+ return false;
+ }
+
+ /* Currently Ethertype must be set */
+ if (!is_exactmatch(&flow->l2_mask.ether_type,
+ sizeof(flow->l2_mask.ether_type))) {
+ netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
+ return false;
+ }
+
return true;
}
@@ -992,8 +1051,10 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel decap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds
+ * tunnel header fileds. Ignore src_port in the tunnel_key,
+ * since it is not required for decap filters.
*/
+ decap_key->tp_src = 0;
decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
&tc_info->decap_ht_params,
decap_key);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 26290403f38f..38f635cf8408 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -64,6 +64,31 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
return rc;
}
+static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
+ u16 *max_mtu)
+{
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_qcfg_input req = {0};
+ u16 mtu;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ mtu = le16_to_cpu(resp->max_mtu_configured);
+ if (!mtu)
+ *max_mtu = BNXT_MAX_MTU;
+ else
+ *max_mtu = mtu;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_vf_rep_open(struct net_device *dev)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
@@ -365,6 +390,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
struct net_device *dev)
{
struct net_device *pf_dev = bp->dev;
+ u16 max_mtu;
dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
@@ -380,6 +406,10 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
dev->perm_addr);
ether_addr_copy(dev->dev_addr, dev->perm_addr);
+ /* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */
+ if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu))
+ dev->max_mtu = max_mtu;
+ dev->min_mtu = ETH_ZLEN;
}
static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 9c2567b0d93e..dfad93fca0a6 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -375,7 +375,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
* because generally mcdi responses are fast. After that, back off
* and poll once a jiffy (approximately)
*/
- spins = TICK_USEC;
+ spins = USER_TICK_USEC;
finish = jiffies + MCDI_RPC_TIMEOUT;
while (1) {
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index 5782733959f0..f4e93f5fc204 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
if(x < 0 || x > comp->rslot_limit)
goto bad;
+ /* Check if the cstate is initialized */
+ if (!comp->rstate[x].initialized)
+ goto bad;
+
comp->flags &=~ SLF_TOSS;
comp->recv_current = x;
} else {
@@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
if (cs->cs_tcp.doff > 5)
memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
+ cs->initialized = true;
/* Put headers back on packet
* Neither header checksum is recalculated
*/
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a1ba262f40ad..28583aa0c17d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -743,8 +743,15 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
static void tun_detach(struct tun_file *tfile, bool clean)
{
+ struct tun_struct *tun;
+ struct net_device *dev;
+
rtnl_lock();
+ tun = rtnl_dereference(tfile->tun);
+ dev = tun ? tun->dev : NULL;
__tun_detach(tfile, clean);
+ if (dev)
+ netdev_state_change(dev);
rtnl_unlock();
}
@@ -2562,10 +2569,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
/* One or more queue has already been attached, no need
* to initialize the device again.
*/
+ netdev_state_change(dev);
return 0;
}
- }
- else {
+
+ tun->flags = (tun->flags & ~TUN_FEATURES) |
+ (ifr->ifr_flags & TUN_FEATURES);
+
+ netdev_state_change(dev);
+ } else {
char *name;
unsigned long flags = 0;
int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
@@ -2642,6 +2654,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
+ tun->flags = (tun->flags & ~TUN_FEATURES) |
+ (ifr->ifr_flags & TUN_FEATURES);
+
INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
if (err < 0)
@@ -2656,9 +2671,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
- tun->flags = (tun->flags & ~TUN_FEATURES) |
- (ifr->ifr_flags & TUN_FEATURES);
-
/* Make sure persistent devices do not get stuck in
* xoff state.
*/
@@ -2805,6 +2817,9 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
} else
ret = -EINVAL;
+ if (ret >= 0)
+ netdev_state_change(tun->dev);
+
unlock:
rtnl_unlock();
return ret;
@@ -2845,6 +2860,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned int ifindex;
int le;
int ret;
+ bool do_notify = false;
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
(_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
@@ -2941,10 +2957,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
if (arg && !(tun->flags & IFF_PERSIST)) {
tun->flags |= IFF_PERSIST;
__module_get(THIS_MODULE);
+ do_notify = true;
}
if (!arg && (tun->flags & IFF_PERSIST)) {
tun->flags &= ~IFF_PERSIST;
module_put(THIS_MODULE);
+ do_notify = true;
}
tun_debug(KERN_INFO, tun, "persist %s\n",
@@ -2959,6 +2977,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
}
tun->owner = owner;
+ do_notify = true;
tun_debug(KERN_INFO, tun, "owner set to %u\n",
from_kuid(&init_user_ns, tun->owner));
break;
@@ -2971,6 +2990,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
}
tun->group = group;
+ do_notify = true;
tun_debug(KERN_INFO, tun, "group set to %u\n",
from_kgid(&init_user_ns, tun->group));
break;
@@ -3130,6 +3150,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
}
+ if (do_notify)
+ netdev_state_change(tun->dev);
+
unlock:
rtnl_unlock();
if (tun)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index fff4b13eece2..5c42cf81a08b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -902,6 +902,12 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* Cinterion AHS3 modem by GEMALTO */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index aff105f5f58c..0867f7275852 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -928,7 +928,8 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
offset += 0x100;
else
ret = -EINVAL;
- ret = lan78xx_read_raw_otp(dev, offset, length, data);
+ if (!ret)
+ ret = lan78xx_read_raw_otp(dev, offset, length, data);
}
return ret;
@@ -2502,7 +2503,7 @@ static void lan78xx_init_stats(struct lan78xx_net *dev)
dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
- lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
+ set_bit(EVENT_STAT_UPDATE, &dev->flags);
}
static int lan78xx_open(struct net_device *net)
@@ -2514,10 +2515,6 @@ static int lan78xx_open(struct net_device *net)
if (ret < 0)
goto out;
- ret = lan78xx_reset(dev);
- if (ret < 0)
- goto done;
-
phy_start(net->phydev);
netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 197a6ba9700f..9df4f71e58ca 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -376,6 +376,15 @@ static void nvme_put_ns(struct nvme_ns *ns)
kref_put(&ns->kref, nvme_free_ns);
}
+static inline void nvme_clear_nvme_request(struct request *req)
+{
+ if (!(req->rq_flags & RQF_DONTPREP)) {
+ nvme_req(req)->retries = 0;
+ nvme_req(req)->flags = 0;
+ req->rq_flags |= RQF_DONTPREP;
+ }
+}
+
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
@@ -392,6 +401,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
return req;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
+ nvme_clear_nvme_request(req);
nvme_req(req)->cmd = cmd;
return req;
@@ -608,11 +618,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
{
blk_status_t ret = BLK_STS_OK;
- if (!(req->rq_flags & RQF_DONTPREP)) {
- nvme_req(req)->retries = 0;
- nvme_req(req)->flags = 0;
- req->rq_flags |= RQF_DONTPREP;
- }
+ nvme_clear_nvme_request(req);
switch (req_op(req)) {
case REQ_OP_DRV_IN:
@@ -742,6 +748,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ nvme_req(req)->flags |= NVME_REQ_USERCMD;
if (ubuffer && bufflen) {
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
@@ -826,7 +833,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
}
}
-void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
{
if (unlikely(ctrl->kato == 0))
return;
@@ -836,7 +843,6 @@ void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
-EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
{
@@ -1103,7 +1109,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
}
if (ctrl->effects)
- effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
+ effects = le32_to_cpu(ctrl->effects->acs[opcode]);
else
effects = nvme_known_admin_effects(opcode);
@@ -2220,7 +2226,7 @@ out_unlock:
int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 log_page, void *log,
- size_t size, size_t offset)
+ size_t size, u64 offset)
{
struct nvme_command c = { };
unsigned long dwlen = size / 4 - 1;
@@ -2235,8 +2241,8 @@ int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
c.get_log_page.lid = log_page;
c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
- c.get_log_page.lpol = cpu_to_le32(offset & ((1ULL << 32) - 1));
- c.get_log_page.lpou = cpu_to_le32(offset >> 32ULL);
+ c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
+ c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}
@@ -2833,7 +2839,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
goto out_free_head;
head->instance = ret;
INIT_LIST_HEAD(&head->list);
- init_srcu_struct(&head->srcu);
+ ret = init_srcu_struct(&head->srcu);
+ if (ret)
+ goto out_ida_remove;
head->subsys = ctrl->subsys;
head->ns_id = nsid;
kref_init(&head->ref);
@@ -2855,6 +2863,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
return head;
out_cleanup_srcu:
cleanup_srcu_struct(&head->srcu);
+out_ida_remove:
ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
out_free_head:
kfree(head);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 8f0f34d06d46..124c458806df 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -536,6 +536,85 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
return NULL;
}
+blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live, bool is_connected)
+{
+ struct nvme_command *cmd = nvme_req(rq)->cmd;
+
+ if (likely(ctrl->state == NVME_CTRL_LIVE && is_connected))
+ return BLK_STS_OK;
+
+ switch (ctrl->state) {
+ case NVME_CTRL_DELETING:
+ goto reject_io;
+
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_CONNECTING:
+ if (!is_connected)
+ /*
+ * This is the case of starting a new
+ * association but connectivity was lost
+ * before it was fully created. We need to
+ * error the commands used to initialize the
+ * controller so the reconnect can go into a
+ * retry attempt. The commands should all be
+ * marked REQ_FAILFAST_DRIVER, which will hit
+ * the reject path below. Anything else will
+ * be queued while the state settles.
+ */
+ goto reject_or_queue_io;
+
+ if ((queue_live &&
+ !(nvme_req(rq)->flags & NVME_REQ_USERCMD)) ||
+ (!queue_live && blk_rq_is_passthrough(rq) &&
+ cmd->common.opcode == nvme_fabrics_command &&
+ cmd->fabrics.fctype == nvme_fabrics_type_connect))
+ /*
+ * If queue is live, allow only commands that
+ * are internally generated pass through. These
+ * are commands on the admin queue to initialize
+ * the controller. This will reject any ioctl
+ * admin cmds received while initializing.
+ *
+ * If the queue is not live, allow only a
+ * connect command. This will reject any ioctl
+ * admin cmd as well as initialization commands
+ * if the controller reverted the queue to non-live.
+ */
+ return BLK_STS_OK;
+
+ /*
+ * fall-thru to the reject_or_queue_io clause
+ */
+ break;
+
+ /* these cases fall-thru
+ * case NVME_CTRL_LIVE:
+ * case NVME_CTRL_RESETTING:
+ */
+ default:
+ break;
+ }
+
+reject_or_queue_io:
+ /*
+ * Any other new io is something we're not in a state to send
+ * to the device. Default action is to busy it and retry it
+ * after the controller state is recovered. However, anything
+ * marked for failfast or nvme multipath is immediately failed.
+ * Note: commands used to initialize the controller will be
+ * marked for failfast.
+ * Note: nvme cli/ioctl commands are marked for failfast.
+ */
+ if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+ return BLK_STS_RESOURCE;
+
+reject_io:
+ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+ return BLK_STS_IOERR;
+}
+EXPORT_SYMBOL_GPL(nvmf_check_if_ready);
+
static const match_table_t opt_tokens = {
{ NVMF_OPT_TRANSPORT, "transport=%s" },
{ NVMF_OPT_TRADDR, "traddr=%s" },
@@ -608,8 +687,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->discovery_nqn =
!(strcmp(opts->subsysnqn,
NVME_DISC_SUBSYS_NAME));
- if (opts->discovery_nqn)
+ if (opts->discovery_nqn) {
+ opts->kato = 0;
opts->nr_io_queues = 0;
+ }
break;
case NVMF_OPT_TRADDR:
p = match_strdup(args);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a3145d90c1d2..ef46c915b7b5 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -157,36 +157,7 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
-
-static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
- struct request *rq)
-{
- struct nvme_command *cmd = nvme_req(rq)->cmd;
-
- /*
- * We cannot accept any other command until the connect command has
- * completed, so only allow connect to pass.
- */
- if (!blk_rq_is_passthrough(rq) ||
- cmd->common.opcode != nvme_fabrics_command ||
- cmd->fabrics.fctype != nvme_fabrics_type_connect) {
- /*
- * Connecting state means transport disruption or initial
- * establishment, which can take a long time and even might
- * fail permanently, fail fast to give upper layers a chance
- * to failover.
- * Deleting state means that the ctrl will never accept commands
- * again, fail it permanently.
- */
- if (ctrl->state == NVME_CTRL_CONNECTING ||
- ctrl->state == NVME_CTRL_DELETING) {
- nvme_req(rq)->status = NVME_SC_ABORT_REQ;
- return BLK_STS_IOERR;
- }
- return BLK_STS_RESOURCE; /* try again later */
- }
-
- return BLK_STS_OK;
-}
+blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl,
+ struct request *rq, bool queue_live, bool is_connected);
#endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index c6e719b2f3ca..6cb26bcf6ec0 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2277,14 +2277,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
return BLK_STS_OK;
}
-static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
- struct request *rq)
-{
- if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
- return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
- return BLK_STS_OK;
-}
-
static blk_status_t
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
@@ -2300,7 +2292,9 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
u32 data_len;
blk_status_t ret;
- ret = nvme_fc_is_ready(queue, rq);
+ ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
+ test_bit(NVME_FC_Q_LIVE, &queue->flags),
+ ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE);
if (unlikely(ret))
return ret;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cf93690b3ffc..061fecfd44f5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -105,6 +105,7 @@ struct nvme_request {
enum {
NVME_REQ_CANCELLED = (1 << 0),
+ NVME_REQ_USERCMD = (1 << 1),
};
static inline struct nvme_request *nvme_req(struct request *req)
@@ -422,7 +423,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
unsigned timeout, int qid, int at_head,
blk_mq_req_flags_t flags);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
-void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
@@ -430,7 +430,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- u8 log_page, void *log, size_t size, size_t offset);
+ u8 log_page, void *log, size_t size, u64 offset);
extern const struct attribute_group nvme_ns_id_attr_group;
extern const struct block_device_operations nvme_ns_head_ops;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 295fbec1e5f2..fbc71fac6f1e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -84,6 +84,7 @@ struct nvme_dev {
struct dma_pool *prp_small_pool;
unsigned online_queues;
unsigned max_qid;
+ unsigned int num_vecs;
int q_depth;
u32 db_stride;
void __iomem *bar;
@@ -414,7 +415,8 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_dev *dev = set->driver_data;
- return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev), 0);
+ return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev),
+ dev->num_vecs > 1 ? 1 /* admin queue */ : 0);
}
/**
@@ -1380,8 +1382,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
return 0;
}
-static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth, int node)
+static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
{
struct nvme_queue *nvmeq = &dev->queues[qid];
@@ -1457,7 +1458,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
nvmeq->sq_cmds_io = dev->cmb + offset;
}
- nvmeq->cq_vector = qid - 1;
+ /*
+ * A queue's vector matches the queue identifier unless the controller
+ * has only one vector available.
+ */
+ nvmeq->cq_vector = dev->num_vecs == 1 ? 0 : qid;
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
goto release_vector;
@@ -1596,8 +1601,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
if (result < 0)
return result;
- result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
- dev_to_node(dev->dev));
+ result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
if (result)
return result;
@@ -1630,9 +1634,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
int ret = 0;
for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
- /* vector == qid - 1, match nvme_create_queue */
- if (nvme_alloc_queue(dev, i, dev->q_depth,
- pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
+ if (nvme_alloc_queue(dev, i, dev->q_depth)) {
ret = -ENOMEM;
break;
}
@@ -1914,6 +1916,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
int result, nr_io_queues;
unsigned long size;
+ struct irq_affinity affd = {
+ .pre_vectors = 1
+ };
+
nr_io_queues = num_possible_cpus();
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
@@ -1949,11 +1955,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* setting up the full range we need.
*/
pci_free_irq_vectors(pdev);
- nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues,
- PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY);
- if (nr_io_queues <= 0)
+ result = pci_alloc_irq_vectors_affinity(pdev, 1, nr_io_queues + 1,
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+ if (result <= 0)
return -EIO;
- dev->max_qid = nr_io_queues;
+ dev->num_vecs = result;
+ dev->max_qid = max(result - 1, 1);
/*
* Should investigate if there's a performance win from allocating
@@ -2201,7 +2208,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_stop_queues(&dev->ctrl);
- if (!dead) {
+ if (!dead && dev->ctrl.queue_count > 0) {
/*
* If the controller is still alive tell it to stop using the
* host memory buffer. In theory the shutdown / reset should
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 758537e9ba07..1eb4438a8763 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1601,17 +1601,6 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED;
}
-/*
- * We cannot accept any other command until the Connect command has completed.
- */
-static inline blk_status_t
-nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
-{
- if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
- return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
- return BLK_STS_OK;
-}
-
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1627,7 +1616,8 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(rq->tag < 0);
- ret = nvme_rdma_is_ready(queue, rq);
+ ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
+ test_bit(NVME_RDMA_Q_LIVE, &queue->flags), true);
if (unlikely(ret))
return ret;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 90dcdc40ac71..5e0e9fcc0d4d 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -178,6 +178,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->vid = 0;
id->ssvid = 0;
+ memset(id->sn, ' ', sizeof(id->sn));
bin2hex(id->sn, &ctrl->subsys->serial,
min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index a72425d8bce0..231e04e0a496 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -59,7 +59,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
- memcpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
+ strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
}
/*
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 28bbdff4a88b..cd2344179673 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -173,8 +173,8 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req)
sector = le64_to_cpu(write_zeroes->slba) <<
(req->ns->blksize_shift - 9);
- nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) <<
- (req->ns->blksize_shift - 9)) + 1;
+ nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
+ (req->ns->blksize_shift - 9));
if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
GFP_KERNEL, &bio, 0))
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index a350765d2d5c..31fdfba556a8 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -149,14 +149,6 @@ nvme_loop_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED;
}
-static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
- struct request *rq)
-{
- if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
- return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
- return BLK_STS_OK;
-}
-
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -166,7 +158,8 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret;
- ret = nvme_loop_is_ready(queue, req);
+ ret = nvmf_check_if_ready(&queue->ctrl->ctrl, req,
+ test_bit(NVME_LOOP_Q_LIVE, &queue->flags), true);
if (unlikely(ret))
return ret;
@@ -174,15 +167,12 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret)
return ret;
+ blk_mq_start_request(req);
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
iod->req.port = nvmet_loop_port;
if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
- &queue->nvme_sq, &nvme_loop_ops)) {
- nvme_cleanup_cmd(req);
- blk_mq_start_request(req);
- nvme_loop_queue_response(&iod->req);
+ &queue->nvme_sq, &nvme_loop_ops))
return BLK_STS_OK;
- }
if (blk_rq_payload_bytes(req)) {
iod->sg_table.sgl = iod->first_sgl;
@@ -196,8 +186,6 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->req.transfer_len = blk_rq_payload_bytes(req);
}
- blk_mq_start_request(req);
-
schedule_work(&iod->work);
return BLK_STS_OK;
}
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
index 8fd0ea4b92b0..013d85e694c6 100644
--- a/drivers/of/unittest-data/Makefile
+++ b/drivers/of/unittest-data/Makefile
@@ -21,8 +21,6 @@ obj-$(CONFIG_OF_OVERLAY) += overlay.dtb.o \
overlay_bad_symbol.dtb.o \
overlay_base.dtb.o
-targets += $(foreach suffix, dtb dtb.S, $(patsubst %.dtb.o,%.$(suffix),$(obj-y)))
-
# enable creation of __symbols__ node
DTC_FLAGS_overlay += -@
DTC_FLAGS_overlay_bad_phandle += -@
@@ -32,7 +30,3 @@ DTC_FLAGS_testcases += -@
# suppress warnings about intentional errors
DTC_FLAGS_testcases += -Wno-interrupts_property
-
-.PRECIOUS: \
- $(obj)/%.dtb.S \
- $(obj)/%.dtb
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index aa86e904f93c..e597655a5643 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5628,7 +5628,6 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
return;
}
- pci_info(dev, "Disabling memory decoding and releasing memory resources\n");
pci_read_config_word(dev, PCI_COMMAND, &command);
command &= ~PCI_COMMAND_MEMORY;
pci_write_config_word(dev, PCI_COMMAND, command);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 26141b1946ee..2990ad1e7c99 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4673,9 +4673,13 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
#ifdef CONFIG_PCI_ATS
/*
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 0cd83a4bd4c8..d8ca40a97693 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -168,8 +168,6 @@ EXPORT_SYMBOL(pci_claim_resource);
void pci_disable_bridge_window(struct pci_dev *dev)
{
- pci_info(dev, "disabling bridge mem windows\n");
-
/* MMIO Base/Limit */
pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index d8599736a41a..5c47f451e43b 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -1,33 +1,20 @@
-/*
- * chromeos_laptop.c - Driver to instantiate Chromebook i2c/smbus devices.
- *
- * Author : Benson Leung <bleung@chromium.org>
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Driver to instantiate Chromebook i2c/smbus devices.
+//
+// Copyright (C) 2012 Google, Inc.
+// Author: Benson Leung <bleung@chromium.org>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dmi.h>
#include <linux/i2c.h>
-#include <linux/platform_data/atmel_mxt_ts.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#define ATMEL_TP_I2C_ADDR 0x4b
#define ATMEL_TP_I2C_BL_ADDR 0x25
@@ -38,18 +25,11 @@
#define ISL_ALS_I2C_ADDR 0x44
#define TAOS_ALS_I2C_ADDR 0x29
-#define MAX_I2C_DEVICE_DEFERRALS 5
-
-static struct i2c_client *als;
-static struct i2c_client *tp;
-static struct i2c_client *ts;
-
static const char *i2c_adapter_names[] = {
"SMBus I801 adapter",
"i915 gmbus vga",
"i915 gmbus panel",
"Synopsys DesignWare I2C adapter",
- "Synopsys DesignWare I2C adapter",
};
/* Keep this enum consistent with i2c_adapter_names */
@@ -57,126 +37,41 @@ enum i2c_adapter_type {
I2C_ADAPTER_SMBUS = 0,
I2C_ADAPTER_VGADDC,
I2C_ADAPTER_PANEL,
- I2C_ADAPTER_DESIGNWARE_0,
- I2C_ADAPTER_DESIGNWARE_1,
-};
-
-enum i2c_peripheral_state {
- UNPROBED = 0,
- PROBED,
- TIMEDOUT,
+ I2C_ADAPTER_DESIGNWARE,
};
struct i2c_peripheral {
- int (*add)(enum i2c_adapter_type type);
- enum i2c_adapter_type type;
- enum i2c_peripheral_state state;
- int tries;
-};
-
-#define MAX_I2C_PERIPHERALS 4
+ struct i2c_board_info board_info;
+ unsigned short alt_addr;
-struct chromeos_laptop {
- struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS];
-};
-
-static struct chromeos_laptop *cros_laptop;
-
-static struct i2c_board_info cyapa_device = {
- I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
- .flags = I2C_CLIENT_WAKE,
-};
+ const char *dmi_name;
+ unsigned long irqflags;
+ struct resource irq_resource;
-static struct i2c_board_info elantech_device = {
- I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
- .flags = I2C_CLIENT_WAKE,
-};
-
-static struct i2c_board_info isl_als_device = {
- I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
-};
-
-static struct i2c_board_info tsl2583_als_device = {
- I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
-};
-
-static struct i2c_board_info tsl2563_als_device = {
- I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
-};
-
-static int mxt_t19_keys[] = {
- KEY_RESERVED,
- KEY_RESERVED,
- KEY_RESERVED,
- KEY_RESERVED,
- KEY_RESERVED,
- BTN_LEFT
-};
-
-static struct mxt_platform_data atmel_224s_tp_platform_data = {
- .irqflags = IRQF_TRIGGER_FALLING,
- .t19_num_keys = ARRAY_SIZE(mxt_t19_keys),
- .t19_keymap = mxt_t19_keys,
- .suspend_mode = MXT_SUSPEND_T9_CTRL,
-};
+ enum i2c_adapter_type type;
+ u32 pci_devid;
-static struct i2c_board_info atmel_224s_tp_device = {
- I2C_BOARD_INFO("atmel_mxt_tp", ATMEL_TP_I2C_ADDR),
- .platform_data = &atmel_224s_tp_platform_data,
- .flags = I2C_CLIENT_WAKE,
+ struct i2c_client *client;
};
-static struct mxt_platform_data atmel_1664s_platform_data = {
- .irqflags = IRQF_TRIGGER_FALLING,
- .suspend_mode = MXT_SUSPEND_T9_CTRL,
+struct chromeos_laptop {
+ /*
+ * Note that we can't mark this pointer as const because
+ * i2c_new_probed_device() changes passed in I2C board info, so.
+ */
+ struct i2c_peripheral *i2c_peripherals;
+ unsigned int num_i2c_peripherals;
};
-static struct i2c_board_info atmel_1664s_device = {
- I2C_BOARD_INFO("atmel_mxt_ts", ATMEL_TS_I2C_ADDR),
- .platform_data = &atmel_1664s_platform_data,
- .flags = I2C_CLIENT_WAKE,
-};
+static const struct chromeos_laptop *cros_laptop;
-static struct i2c_client *__add_probed_i2c_device(
- const char *name,
- int bus,
- struct i2c_board_info *info,
- const unsigned short *alt_addr_list)
+static struct i2c_client *
+chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
+ struct i2c_board_info *info,
+ unsigned short alt_addr)
{
- const struct dmi_device *dmi_dev;
- const struct dmi_dev_onboard *dev_data;
- struct i2c_adapter *adapter;
- struct i2c_client *client = NULL;
const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END };
-
- if (bus < 0)
- return NULL;
- /*
- * If a name is specified, look for irq platform information stashed
- * in DMI_DEV_TYPE_DEV_ONBOARD by the Chrome OS custom system firmware.
- */
- if (name) {
- dmi_dev = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, name, NULL);
- if (!dmi_dev) {
- pr_err("%s failed to dmi find device %s.\n",
- __func__,
- name);
- return NULL;
- }
- dev_data = (struct dmi_dev_onboard *)dmi_dev->device_data;
- if (!dev_data) {
- pr_err("%s failed to get data from dmi for %s.\n",
- __func__, name);
- return NULL;
- }
- info->irq = dev_data->instance;
- }
-
- adapter = i2c_get_adapter(bus);
- if (!adapter) {
- pr_err("%s failed to get i2c adapter %d.\n", __func__, bus);
- return NULL;
- }
+ struct i2c_client *client;
/*
* Add the i2c device. If we can't detect it at the primary
@@ -184,339 +79,345 @@ static struct i2c_client *__add_probed_i2c_device(
* structure gets assigned primary address.
*/
client = i2c_new_probed_device(adapter, info, addr_list, NULL);
- if (!client && alt_addr_list) {
+ if (!client && alt_addr) {
struct i2c_board_info dummy_info = {
I2C_BOARD_INFO("dummy", info->addr),
};
+ const unsigned short alt_addr_list[] = {
+ alt_addr, I2C_CLIENT_END
+ };
struct i2c_client *dummy;
dummy = i2c_new_probed_device(adapter, &dummy_info,
alt_addr_list, NULL);
if (dummy) {
- pr_debug("%s %d-%02x is probed at %02x\n",
- __func__, bus, info->addr, dummy->addr);
+ pr_debug("%d-%02x is probed at %02x\n",
+ adapter->nr, info->addr, dummy->addr);
i2c_unregister_device(dummy);
client = i2c_new_device(adapter, info);
}
}
if (!client)
- pr_notice("%s failed to register device %d-%02x\n",
- __func__, bus, info->addr);
+ pr_debug("failed to register device %d-%02x\n",
+ adapter->nr, info->addr);
else
- pr_debug("%s added i2c device %d-%02x\n",
- __func__, bus, info->addr);
+ pr_debug("added i2c device %d-%02x\n",
+ adapter->nr, info->addr);
- i2c_put_adapter(adapter);
return client;
}
-struct i2c_lookup {
- const char *name;
- int instance;
- int n;
-};
-
-static int __find_i2c_adap(struct device *dev, void *data)
-{
- struct i2c_lookup *lookup = data;
- static const char *prefix = "i2c-";
- struct i2c_adapter *adapter;
-
- if (strncmp(dev_name(dev), prefix, strlen(prefix)) != 0)
- return 0;
- adapter = to_i2c_adapter(dev);
- if (strncmp(adapter->name, lookup->name, strlen(lookup->name)) == 0 &&
- lookup->n++ == lookup->instance)
- return 1;
- return 0;
-}
-
-static int find_i2c_adapter_num(enum i2c_adapter_type type)
-{
- struct device *dev = NULL;
- struct i2c_adapter *adapter;
- struct i2c_lookup lookup;
-
- memset(&lookup, 0, sizeof(lookup));
- lookup.name = i2c_adapter_names[type];
- lookup.instance = (type == I2C_ADAPTER_DESIGNWARE_1) ? 1 : 0;
-
- /* find the adapter by name */
- dev = bus_find_device(&i2c_bus_type, NULL, &lookup, __find_i2c_adap);
- if (!dev) {
- /* Adapters may appear later. Deferred probing will retry */
- pr_notice("%s: i2c adapter %s not found on system.\n", __func__,
- lookup.name);
- return -ENODEV;
- }
- adapter = to_i2c_adapter(dev);
- return adapter->nr;
-}
-
-/*
- * Takes a list of addresses in addrs as such :
- * { addr1, ... , addrn, I2C_CLIENT_END };
- * add_probed_i2c_device will use i2c_new_probed_device
- * and probe for devices at all of the addresses listed.
- * Returns NULL if no devices found.
- * See Documentation/i2c/instantiating-devices for more information.
- */
-static struct i2c_client *add_probed_i2c_device(
- const char *name,
- enum i2c_adapter_type type,
- struct i2c_board_info *info,
- const unsigned short *addrs)
+static bool chromeos_laptop_match_adapter_devid(struct device *dev, u32 devid)
{
- return __add_probed_i2c_device(name,
- find_i2c_adapter_num(type),
- info,
- addrs);
-}
+ struct pci_dev *pdev;
-/*
- * Probes for a device at a single address, the one provided by
- * info->addr.
- * Returns NULL if no device found.
- */
-static struct i2c_client *add_i2c_device(const char *name,
- enum i2c_adapter_type type,
- struct i2c_board_info *info)
-{
- return __add_probed_i2c_device(name,
- find_i2c_adapter_num(type),
- info,
- NULL);
-}
-
-static int setup_cyapa_tp(enum i2c_adapter_type type)
-{
- if (tp)
- return 0;
+ if (!dev_is_pci(dev))
+ return false;
- /* add cyapa touchpad */
- tp = add_i2c_device("trackpad", type, &cyapa_device);
- return (!tp) ? -EAGAIN : 0;
+ pdev = to_pci_dev(dev);
+ return devid == PCI_DEVID(pdev->bus->number, pdev->devfn);
}
-static int setup_atmel_224s_tp(enum i2c_adapter_type type)
+static void chromeos_laptop_check_adapter(struct i2c_adapter *adapter)
{
- const unsigned short addr_list[] = { ATMEL_TP_I2C_BL_ADDR,
- I2C_CLIENT_END };
- if (tp)
- return 0;
-
- /* add atmel mxt touchpad */
- tp = add_probed_i2c_device("trackpad", type,
- &atmel_224s_tp_device, addr_list);
- return (!tp) ? -EAGAIN : 0;
-}
+ struct i2c_peripheral *i2c_dev;
+ int i;
-static int setup_elantech_tp(enum i2c_adapter_type type)
-{
- if (tp)
- return 0;
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
- /* add elantech touchpad */
- tp = add_i2c_device("trackpad", type, &elantech_device);
- return (!tp) ? -EAGAIN : 0;
-}
+ /* Skip devices already created */
+ if (i2c_dev->client)
+ continue;
-static int setup_atmel_1664s_ts(enum i2c_adapter_type type)
-{
- const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
- I2C_CLIENT_END };
- if (ts)
- return 0;
-
- /* add atmel mxt touch device */
- ts = add_probed_i2c_device("touchscreen", type,
- &atmel_1664s_device, addr_list);
- return (!ts) ? -EAGAIN : 0;
-}
+ if (strncmp(adapter->name, i2c_adapter_names[i2c_dev->type],
+ strlen(i2c_adapter_names[i2c_dev->type])))
+ continue;
-static int setup_isl29018_als(enum i2c_adapter_type type)
-{
- if (als)
- return 0;
+ if (i2c_dev->pci_devid &&
+ !chromeos_laptop_match_adapter_devid(adapter->dev.parent,
+ i2c_dev->pci_devid)) {
+ continue;
+ }
- /* add isl29018 light sensor */
- als = add_i2c_device("lightsensor", type, &isl_als_device);
- return (!als) ? -EAGAIN : 0;
+ i2c_dev->client =
+ chromes_laptop_instantiate_i2c_device(adapter,
+ &i2c_dev->board_info,
+ i2c_dev->alt_addr);
+ }
}
-static int setup_tsl2583_als(enum i2c_adapter_type type)
+static void chromeos_laptop_detach_i2c_client(struct i2c_client *client)
{
- if (als)
- return 0;
-
- /* add tsl2583 light sensor */
- als = add_i2c_device(NULL, type, &tsl2583_als_device);
- return (!als) ? -EAGAIN : 0;
-}
+ struct i2c_peripheral *i2c_dev;
+ int i;
-static int setup_tsl2563_als(enum i2c_adapter_type type)
-{
- if (als)
- return 0;
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
- /* add tsl2563 light sensor */
- als = add_i2c_device(NULL, type, &tsl2563_als_device);
- return (!als) ? -EAGAIN : 0;
+ if (i2c_dev->client == client)
+ i2c_dev->client = NULL;
+ }
}
-static int __init chromeos_laptop_dmi_matched(const struct dmi_system_id *id)
+static int chromeos_laptop_i2c_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- cros_laptop = (void *)id->driver_data;
- pr_debug("DMI Matched %s.\n", id->ident);
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ if (dev->type == &i2c_adapter_type)
+ chromeos_laptop_check_adapter(to_i2c_adapter(dev));
+ break;
+
+ case BUS_NOTIFY_REMOVED_DEVICE:
+ if (dev->type == &i2c_client_type)
+ chromeos_laptop_detach_i2c_client(to_i2c_client(dev));
+ break;
+ }
- /* Indicate to dmi_scan that processing is done. */
- return 1;
+ return 0;
}
-static int chromeos_laptop_probe(struct platform_device *pdev)
-{
- int i;
- int ret = 0;
-
- for (i = 0; i < MAX_I2C_PERIPHERALS; i++) {
- struct i2c_peripheral *i2c_dev;
-
- i2c_dev = &cros_laptop->i2c_peripherals[i];
-
- /* No more peripherals. */
- if (i2c_dev->add == NULL)
- break;
-
- if (i2c_dev->state == TIMEDOUT || i2c_dev->state == PROBED)
- continue;
-
- /*
- * Check that the i2c adapter is present.
- * -EPROBE_DEFER if missing as the adapter may appear much
- * later.
- */
- if (find_i2c_adapter_num(i2c_dev->type) == -ENODEV) {
- ret = -EPROBE_DEFER;
- continue;
- }
-
- /* Add the device. */
- if (i2c_dev->add(i2c_dev->type) == -EAGAIN) {
- /*
- * Set -EPROBE_DEFER a limited num of times
- * if device is not successfully added.
- */
- if (++i2c_dev->tries < MAX_I2C_DEVICE_DEFERRALS) {
- ret = -EPROBE_DEFER;
- } else {
- /* Ran out of tries. */
- pr_notice("%s: Ran out of tries for device.\n",
- __func__);
- i2c_dev->state = TIMEDOUT;
- }
- } else {
- i2c_dev->state = PROBED;
- }
- }
+static struct notifier_block chromeos_laptop_i2c_notifier = {
+ .notifier_call = chromeos_laptop_i2c_notifier_call,
+};
- return ret;
+#define DECLARE_CROS_LAPTOP(_name) \
+static const struct chromeos_laptop _name __initconst = { \
+ .i2c_peripherals = _name##_peripherals, \
+ .num_i2c_peripherals = ARRAY_SIZE(_name##_peripherals), \
}
-static struct chromeos_laptop samsung_series_5_550 = {
- .i2c_peripherals = {
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
- /* Light Sensor. */
- { .add = setup_isl29018_als, I2C_ADAPTER_SMBUS },
+static struct i2c_peripheral samsung_series_5_550_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_SMBUS,
+ },
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
+ },
+ .dmi_name = "lightsensor",
+ .type = I2C_ADAPTER_SMBUS,
},
};
+DECLARE_CROS_LAPTOP(samsung_series_5_550);
-static struct chromeos_laptop samsung_series_5 = {
- .i2c_peripherals = {
- /* Light Sensor. */
- { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS },
+static struct i2c_peripheral samsung_series_5_peripherals[] __initdata = {
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
+ },
+ .type = I2C_ADAPTER_SMBUS,
},
};
+DECLARE_CROS_LAPTOP(samsung_series_5);
-static struct chromeos_laptop chromebook_pixel = {
- .i2c_peripherals = {
- /* Touch Screen. */
- { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL },
- /* Touchpad. */
- { .add = setup_atmel_224s_tp, I2C_ADAPTER_VGADDC },
- /* Light Sensor. */
- { .add = setup_isl29018_als, I2C_ADAPTER_PANEL },
- },
+static const int chromebook_pixel_tp_keys[] __initconst = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ BTN_LEFT
};
-static struct chromeos_laptop hp_chromebook_14 = {
- .i2c_peripherals = {
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+static const struct property_entry
+chromebook_pixel_trackpad_props[] __initconst = {
+ PROPERTY_ENTRY_U32_ARRAY("linux,gpio-keymap", chromebook_pixel_tp_keys),
+ { }
+};
+
+static struct i2c_peripheral chromebook_pixel_peripherals[] __initdata = {
+ /* Touch Screen. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("atmel_mxt_ts",
+ ATMEL_TS_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "touchscreen",
+ .irqflags = IRQF_TRIGGER_FALLING,
+ .type = I2C_ADAPTER_PANEL,
+ .alt_addr = ATMEL_TS_I2C_BL_ADDR,
+ },
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("atmel_mxt_tp",
+ ATMEL_TP_I2C_ADDR),
+ .properties =
+ chromebook_pixel_trackpad_props,
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .irqflags = IRQF_TRIGGER_FALLING,
+ .type = I2C_ADAPTER_VGADDC,
+ .alt_addr = ATMEL_TP_I2C_BL_ADDR,
+ },
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
+ },
+ .dmi_name = "lightsensor",
+ .type = I2C_ADAPTER_PANEL,
},
};
+DECLARE_CROS_LAPTOP(chromebook_pixel);
-static struct chromeos_laptop dell_chromebook_11 = {
- .i2c_peripherals = {
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
- /* Elan Touchpad option. */
- { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 },
+static struct i2c_peripheral hp_chromebook_14_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
},
};
+DECLARE_CROS_LAPTOP(hp_chromebook_14);
-static struct chromeos_laptop toshiba_cb35 = {
- .i2c_peripherals = {
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+static struct i2c_peripheral dell_chromebook_11_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ },
+ /* Elan Touchpad option. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
},
};
+DECLARE_CROS_LAPTOP(dell_chromebook_11);
-static struct chromeos_laptop acer_c7_chromebook = {
- .i2c_peripherals = {
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+static struct i2c_peripheral toshiba_cb35_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
},
};
+DECLARE_CROS_LAPTOP(toshiba_cb35);
-static struct chromeos_laptop acer_ac700 = {
- .i2c_peripherals = {
- /* Light Sensor. */
- { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
+static struct i2c_peripheral acer_c7_chromebook_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_SMBUS,
},
};
+DECLARE_CROS_LAPTOP(acer_c7_chromebook);
-static struct chromeos_laptop acer_c720 = {
- .i2c_peripherals = {
- /* Touchscreen. */
- { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 },
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
- /* Elan Touchpad option. */
- { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 },
- /* Light Sensor. */
- { .add = setup_isl29018_als, I2C_ADAPTER_DESIGNWARE_1 },
+static struct i2c_peripheral acer_ac700_peripherals[] __initdata = {
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
+ },
+ .type = I2C_ADAPTER_SMBUS,
},
};
+DECLARE_CROS_LAPTOP(acer_ac700);
-static struct chromeos_laptop hp_pavilion_14_chromebook = {
- .i2c_peripherals = {
- /* Touchpad. */
- { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
+static struct i2c_peripheral acer_c720_peripherals[] __initdata = {
+ /* Touchscreen. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("atmel_mxt_ts",
+ ATMEL_TS_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "touchscreen",
+ .irqflags = IRQF_TRIGGER_FALLING,
+ .type = I2C_ADAPTER_DESIGNWARE,
+ .pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x2)),
+ .alt_addr = ATMEL_TS_I2C_BL_ADDR,
+ },
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ .pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x1)),
+ },
+ /* Elan Touchpad option. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ .pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x1)),
+ },
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
+ },
+ .dmi_name = "lightsensor",
+ .type = I2C_ADAPTER_DESIGNWARE,
+ .pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x2)),
},
};
+DECLARE_CROS_LAPTOP(acer_c720);
-static struct chromeos_laptop cr48 = {
- .i2c_peripherals = {
- /* Light Sensor. */
- { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
+static struct i2c_peripheral
+hp_pavilion_14_chromebook_peripherals[] __initdata = {
+ /* Touchpad. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+ },
+ .dmi_name = "trackpad",
+ .type = I2C_ADAPTER_SMBUS,
},
};
+DECLARE_CROS_LAPTOP(hp_pavilion_14_chromebook);
-#define _CBDD(board_) \
- .callback = chromeos_laptop_dmi_matched, \
- .driver_data = (void *)&board_
+static struct i2c_peripheral cr48_peripherals[] __initdata = {
+ /* Light Sensor. */
+ {
+ .board_info = {
+ I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
+ },
+ .type = I2C_ADAPTER_SMBUS,
+ },
+};
+DECLARE_CROS_LAPTOP(cr48);
static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
{
@@ -525,14 +426,14 @@ static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
},
- _CBDD(samsung_series_5_550),
+ .driver_data = (void *)&samsung_series_5_550,
},
{
.ident = "Samsung Series 5",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
},
- _CBDD(samsung_series_5),
+ .driver_data = (void *)&samsung_series_5,
},
{
.ident = "Chromebook Pixel",
@@ -540,7 +441,7 @@ static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
},
- _CBDD(chromebook_pixel),
+ .driver_data = (void *)&chromebook_pixel,
},
{
.ident = "Wolf",
@@ -548,7 +449,7 @@ static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
DMI_MATCH(DMI_PRODUCT_NAME, "Wolf"),
},
- _CBDD(dell_chromebook_11),
+ .driver_data = (void *)&dell_chromebook_11,
},
{
.ident = "HP Chromebook 14",
@@ -556,7 +457,7 @@ static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
DMI_MATCH(DMI_PRODUCT_NAME, "Falco"),
},
- _CBDD(hp_chromebook_14),
+ .driver_data = (void *)&hp_chromebook_14,
},
{
.ident = "Toshiba CB35",
@@ -564,99 +465,214 @@ static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
DMI_MATCH(DMI_PRODUCT_NAME, "Leon"),
},
- _CBDD(toshiba_cb35),
+ .driver_data = (void *)&toshiba_cb35,
},
{
.ident = "Acer C7 Chromebook",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
},
- _CBDD(acer_c7_chromebook),
+ .driver_data = (void *)&acer_c7_chromebook,
},
{
.ident = "Acer AC700",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
},
- _CBDD(acer_ac700),
+ .driver_data = (void *)&acer_ac700,
},
{
.ident = "Acer C720",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"),
},
- _CBDD(acer_c720),
+ .driver_data = (void *)&acer_c720,
},
{
.ident = "HP Pavilion 14 Chromebook",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
},
- _CBDD(hp_pavilion_14_chromebook),
+ .driver_data = (void *)&hp_pavilion_14_chromebook,
},
{
.ident = "Cr-48",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
},
- _CBDD(cr48),
+ .driver_data = (void *)&cr48,
},
{ }
};
MODULE_DEVICE_TABLE(dmi, chromeos_laptop_dmi_table);
-static struct platform_device *cros_platform_device;
+static int __init chromeos_laptop_scan_adapter(struct device *dev, void *data)
+{
+ struct i2c_adapter *adapter;
-static struct platform_driver cros_platform_driver = {
- .driver = {
- .name = "chromeos_laptop",
- },
- .probe = chromeos_laptop_probe,
-};
+ adapter = i2c_verify_adapter(dev);
+ if (adapter)
+ chromeos_laptop_check_adapter(adapter);
+
+ return 0;
+}
+
+static int __init chromeos_laptop_get_irq_from_dmi(const char *dmi_name)
+{
+ const struct dmi_device *dmi_dev;
+ const struct dmi_dev_onboard *dev_data;
+
+ dmi_dev = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, dmi_name, NULL);
+ if (!dmi_dev) {
+ pr_err("failed to find DMI device '%s'\n", dmi_name);
+ return -ENOENT;
+ }
+
+ dev_data = dmi_dev->device_data;
+ if (!dev_data) {
+ pr_err("failed to get data from DMI for '%s'\n", dmi_name);
+ return -EINVAL;
+ }
+
+ return dev_data->instance;
+}
+
+static int __init chromeos_laptop_setup_irq(struct i2c_peripheral *i2c_dev)
+{
+ int irq;
+
+ if (i2c_dev->dmi_name) {
+ irq = chromeos_laptop_get_irq_from_dmi(i2c_dev->dmi_name);
+ if (irq < 0)
+ return irq;
+
+ i2c_dev->irq_resource = (struct resource)
+ DEFINE_RES_NAMED(irq, 1, NULL,
+ IORESOURCE_IRQ | i2c_dev->irqflags);
+ i2c_dev->board_info.resources = &i2c_dev->irq_resource;
+ i2c_dev->board_info.num_resources = 1;
+ }
+
+ return 0;
+}
+
+static struct chromeos_laptop * __init
+chromeos_laptop_prepare(const struct chromeos_laptop *src)
+{
+ struct chromeos_laptop *cros_laptop;
+ struct i2c_peripheral *i2c_dev;
+ struct i2c_board_info *info;
+ int error;
+ int i;
+
+ cros_laptop = kzalloc(sizeof(*cros_laptop), GFP_KERNEL);
+ if (!cros_laptop)
+ return ERR_PTR(-ENOMEM);
+
+ cros_laptop->i2c_peripherals = kmemdup(src->i2c_peripherals,
+ src->num_i2c_peripherals *
+ sizeof(*src->i2c_peripherals),
+ GFP_KERNEL);
+ if (!cros_laptop->i2c_peripherals) {
+ error = -ENOMEM;
+ goto err_free_cros_laptop;
+ }
+
+ cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals;
+
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+ info = &i2c_dev->board_info;
+
+ error = chromeos_laptop_setup_irq(i2c_dev);
+ if (error)
+ goto err_destroy_cros_peripherals;
+
+ /* We need to deep-copy properties */
+ if (info->properties) {
+ info->properties =
+ property_entries_dup(info->properties);
+ if (IS_ERR(info->properties)) {
+ error = PTR_ERR(info->properties);
+ goto err_destroy_cros_peripherals;
+ }
+ }
+ }
+
+ return cros_laptop;
+
+err_destroy_cros_peripherals:
+ while (--i >= 0) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+ info = &i2c_dev->board_info;
+ if (info->properties)
+ property_entries_free(info->properties);
+ }
+ kfree(cros_laptop->i2c_peripherals);
+err_free_cros_laptop:
+ kfree(cros_laptop);
+ return ERR_PTR(error);
+}
+
+static void chromeos_laptop_destroy(const struct chromeos_laptop *cros_laptop)
+{
+ struct i2c_peripheral *i2c_dev;
+ struct i2c_board_info *info;
+ int i;
+
+ for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
+ i2c_dev = &cros_laptop->i2c_peripherals[i];
+ info = &i2c_dev->board_info;
+
+ if (i2c_dev->client)
+ i2c_unregister_device(i2c_dev->client);
+
+ if (info->properties)
+ property_entries_free(info->properties);
+ }
+
+ kfree(cros_laptop->i2c_peripherals);
+ kfree(cros_laptop);
+}
static int __init chromeos_laptop_init(void)
{
- int ret;
+ const struct dmi_system_id *dmi_id;
+ int error;
- if (!dmi_check_system(chromeos_laptop_dmi_table)) {
- pr_debug("%s unsupported system.\n", __func__);
+ dmi_id = dmi_first_match(chromeos_laptop_dmi_table);
+ if (!dmi_id) {
+ pr_debug("unsupported system\n");
return -ENODEV;
}
- ret = platform_driver_register(&cros_platform_driver);
- if (ret)
- return ret;
+ pr_debug("DMI Matched %s\n", dmi_id->ident);
+
+ cros_laptop = chromeos_laptop_prepare((void *)dmi_id->driver_data);
+ if (IS_ERR(cros_laptop))
+ return PTR_ERR(cros_laptop);
- cros_platform_device = platform_device_alloc("chromeos_laptop", -1);
- if (!cros_platform_device) {
- ret = -ENOMEM;
- goto fail_platform_device1;
+ error = bus_register_notifier(&i2c_bus_type,
+ &chromeos_laptop_i2c_notifier);
+ if (error) {
+ pr_err("failed to register i2c bus notifier: %d\n", error);
+ chromeos_laptop_destroy(cros_laptop);
+ return error;
}
- ret = platform_device_add(cros_platform_device);
- if (ret)
- goto fail_platform_device2;
+ /*
+ * Scan adapters that have been registered before we installed
+ * the notifier to make sure we do not miss any devices.
+ */
+ i2c_for_each_dev(NULL, chromeos_laptop_scan_adapter);
return 0;
-
-fail_platform_device2:
- platform_device_put(cros_platform_device);
-fail_platform_device1:
- platform_driver_unregister(&cros_platform_driver);
- return ret;
}
static void __exit chromeos_laptop_exit(void)
{
- if (als)
- i2c_unregister_device(als);
- if (tp)
- i2c_unregister_device(tp);
- if (ts)
- i2c_unregister_device(ts);
-
- platform_device_unregister(cros_platform_device);
- platform_driver_unregister(&cros_platform_driver);
+ bus_unregister_notifier(&i2c_bus_type, &chromeos_laptop_i2c_notifier);
+ chromeos_laptop_destroy(cros_laptop);
}
module_init(chromeos_laptop_init);
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 0e88e18362c1..cc265ed8deb7 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -211,6 +211,58 @@ static int cros_ec_console_log_release(struct inode *inode, struct file *file)
return 0;
}
+static ssize_t cros_ec_pdinfo_read(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ char read_buf[EC_USB_PD_MAX_PORTS * 40], *p = read_buf;
+ struct cros_ec_debugfs *debug_info = file->private_data;
+ struct cros_ec_device *ec_dev = debug_info->ec->ec_dev;
+ struct {
+ struct cros_ec_command msg;
+ union {
+ struct ec_response_usb_pd_control_v1 resp;
+ struct ec_params_usb_pd_control params;
+ };
+ } __packed ec_buf;
+ struct cros_ec_command *msg;
+ struct ec_response_usb_pd_control_v1 *resp;
+ struct ec_params_usb_pd_control *params;
+ int i;
+
+ msg = &ec_buf.msg;
+ params = (struct ec_params_usb_pd_control *)msg->data;
+ resp = (struct ec_response_usb_pd_control_v1 *)msg->data;
+
+ msg->command = EC_CMD_USB_PD_CONTROL;
+ msg->version = 1;
+ msg->insize = sizeof(*resp);
+ msg->outsize = sizeof(*params);
+
+ /*
+ * Read status from all PD ports until failure, typically caused
+ * by attempting to read status on a port that doesn't exist.
+ */
+ for (i = 0; i < EC_USB_PD_MAX_PORTS; ++i) {
+ params->port = i;
+ params->role = 0;
+ params->mux = 0;
+ params->swap = 0;
+
+ if (cros_ec_cmd_xfer_status(ec_dev, msg) < 0)
+ break;
+
+ p += scnprintf(p, sizeof(read_buf) + read_buf - p,
+ "p%d: %s en:%.2x role:%.2x pol:%.2x\n", i,
+ resp->state, resp->enabled, resp->role,
+ resp->polarity);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ read_buf, p - read_buf);
+}
+
const struct file_operations cros_ec_console_log_fops = {
.owner = THIS_MODULE,
.open = cros_ec_console_log_open,
@@ -220,6 +272,13 @@ const struct file_operations cros_ec_console_log_fops = {
.release = cros_ec_console_log_release,
};
+const struct file_operations cros_ec_pdinfo_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = cros_ec_pdinfo_read,
+ .llseek = default_llseek,
+};
+
static int ec_read_version_supported(struct cros_ec_dev *ec)
{
struct ec_params_get_cmd_versions_v1 *params;
@@ -288,7 +347,7 @@ static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
init_waitqueue_head(&debug_info->log_wq);
if (!debugfs_create_file("console_log",
- S_IFREG | S_IRUGO,
+ S_IFREG | 0444,
debug_info->dir,
debug_info,
&cros_ec_console_log_fops))
@@ -341,7 +400,7 @@ static int cros_ec_create_panicinfo(struct cros_ec_debugfs *debug_info)
debug_info->panicinfo_blob.size = ret;
if (!debugfs_create_blob("panicinfo",
- S_IFREG | S_IRUGO,
+ S_IFREG | 0444,
debug_info->dir,
&debug_info->panicinfo_blob)) {
ret = -ENOMEM;
@@ -355,6 +414,15 @@ free:
return ret;
}
+static int cros_ec_create_pdinfo(struct cros_ec_debugfs *debug_info)
+{
+ if (!debugfs_create_file("pdinfo", 0444, debug_info->dir, debug_info,
+ &cros_ec_pdinfo_fops))
+ return -ENOMEM;
+
+ return 0;
+}
+
int cros_ec_debugfs_init(struct cros_ec_dev *ec)
{
struct cros_ec_platform *ec_platform = dev_get_platdata(ec->dev);
@@ -379,6 +447,10 @@ int cros_ec_debugfs_init(struct cros_ec_dev *ec)
if (ret)
goto remove_debugfs;
+ ret = cros_ec_create_pdinfo(debug_info);
+ if (ret)
+ goto remove_debugfs;
+
ec->debug_info = debug_info;
return 0;
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index af89e82eecd2..3682e1539251 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
+#include <linux/suspend.h>
#define DRV_NAME "cros_ec_lpcs"
#define ACPI_DRV_NAME "GOOG0004"
@@ -235,6 +236,9 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
cros_ec_get_next_event(ec_dev, NULL) > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier, 0,
ec_dev);
+
+ if (value == ACPI_NOTIFY_DEVICE_WAKE)
+ pm_system_wakeup();
}
static int cros_ec_lpc_probe(struct platform_device *pdev)
@@ -342,6 +346,18 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
},
},
{
+ /*
+ * If the box is running custom coreboot firmware then the
+ * DMI BIOS version string will not be matched by "Google_",
+ * but the system vendor string will still be matched by
+ * "GOOGLE".
+ */
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ },
+ },
+ {
/* x86-link, the Chromebook Pixel. */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index da0a719d32f7..5a6db3fe213a 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -34,10 +34,12 @@
#include <linux/types.h>
#include <linux/uaccess.h>
+#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
+
/* Accessor functions */
-static ssize_t show_ec_reboot(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t reboot_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int count = 0;
@@ -48,9 +50,9 @@ static ssize_t show_ec_reboot(struct device *dev,
return count;
}
-static ssize_t store_ec_reboot(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t reboot_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
static const struct {
const char * const str;
@@ -70,8 +72,7 @@ static ssize_t store_ec_reboot(struct device *dev,
int got_cmd = 0, offset = 0;
int i;
int ret;
- struct cros_ec_dev *ec = container_of(dev,
- struct cros_ec_dev, class_dev);
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
msg = kmalloc(sizeof(*msg) + sizeof(*param), GFP_KERNEL);
if (!msg)
@@ -114,22 +115,16 @@ static ssize_t store_ec_reboot(struct device *dev,
msg->command = EC_CMD_REBOOT_EC + ec->cmd_offset;
msg->outsize = sizeof(*param);
msg->insize = 0;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
- if (ret < 0) {
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
count = ret;
- goto exit;
- }
- if (msg->result != EC_RES_SUCCESS) {
- dev_dbg(ec->dev, "EC result %d\n", msg->result);
- count = -EINVAL;
- }
exit:
kfree(msg);
return count;
}
-static ssize_t show_ec_version(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
static const char * const image_names[] = {"unknown", "RO", "RW"};
struct ec_response_get_version *r_ver;
@@ -138,8 +133,7 @@ static ssize_t show_ec_version(struct device *dev,
struct cros_ec_command *msg;
int ret;
int count = 0;
- struct cros_ec_dev *ec = container_of(dev,
- struct cros_ec_dev, class_dev);
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
msg = kmalloc(sizeof(*msg) + EC_HOST_PARAM_SIZE, GFP_KERNEL);
if (!msg)
@@ -150,17 +144,11 @@ static ssize_t show_ec_version(struct device *dev,
msg->command = EC_CMD_GET_VERSION + ec->cmd_offset;
msg->insize = sizeof(*r_ver);
msg->outsize = 0;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
count = ret;
goto exit;
}
- if (msg->result != EC_RES_SUCCESS) {
- count = scnprintf(buf, PAGE_SIZE,
- "ERROR: EC returned %d\n", msg->result);
- goto exit;
- }
-
r_ver = (struct ec_response_get_version *)msg->data;
/* Strings should be null-terminated, but let's be sure. */
r_ver->version_string_ro[sizeof(r_ver->version_string_ro) - 1] = '\0';
@@ -237,14 +225,13 @@ exit:
return count;
}
-static ssize_t show_ec_flashinfo(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t flashinfo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ec_response_flash_info *resp;
struct cros_ec_command *msg;
int ret;
- struct cros_ec_dev *ec = container_of(dev,
- struct cros_ec_dev, class_dev);
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
msg = kmalloc(sizeof(*msg) + sizeof(*resp), GFP_KERNEL);
if (!msg)
@@ -255,14 +242,9 @@ static ssize_t show_ec_flashinfo(struct device *dev,
msg->command = EC_CMD_FLASH_INFO + ec->cmd_offset;
msg->insize = sizeof(*resp);
msg->outsize = 0;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
- if (msg->result != EC_RES_SUCCESS) {
- ret = scnprintf(buf, PAGE_SIZE,
- "ERROR: EC returned %d\n", msg->result);
- goto exit;
- }
resp = (struct ec_response_flash_info *)msg->data;
@@ -276,21 +258,102 @@ exit:
return ret;
}
+/* Keyboard wake angle control */
+static ssize_t kb_wake_angle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ struct ec_response_motion_sense *resp;
+ struct ec_params_motion_sense *param;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kmalloc(sizeof(*msg) + EC_HOST_PARAM_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ param = (struct ec_params_motion_sense *)msg->data;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->version = 2;
+ param->cmd = MOTIONSENSE_CMD_KB_WAKE_ANGLE;
+ param->kb_wake_angle.data = EC_MOTION_SENSE_NO_VALUE;
+ msg->outsize = sizeof(*param);
+ msg->insize = sizeof(*resp);
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0)
+ goto exit;
+
+ resp = (struct ec_response_motion_sense *)msg->data;
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", resp->kb_wake_angle.ret);
+exit:
+ kfree(msg);
+ return ret;
+}
+
+static ssize_t kb_wake_angle_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ struct ec_params_motion_sense *param;
+ struct cros_ec_command *msg;
+ u16 angle;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &angle);
+ if (ret)
+ return ret;
+
+ msg = kmalloc(sizeof(*msg) + EC_HOST_PARAM_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ param = (struct ec_params_motion_sense *)msg->data;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->version = 2;
+ param->cmd = MOTIONSENSE_CMD_KB_WAKE_ANGLE;
+ param->kb_wake_angle.data = angle;
+ msg->outsize = sizeof(*param);
+ msg->insize = sizeof(struct ec_response_motion_sense);
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ kfree(msg);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
/* Module initialization */
-static DEVICE_ATTR(reboot, S_IWUSR | S_IRUGO, show_ec_reboot, store_ec_reboot);
-static DEVICE_ATTR(version, S_IRUGO, show_ec_version, NULL);
-static DEVICE_ATTR(flashinfo, S_IRUGO, show_ec_flashinfo, NULL);
+static DEVICE_ATTR_RW(reboot);
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_RO(flashinfo);
+static DEVICE_ATTR_RW(kb_wake_angle);
static struct attribute *__ec_attrs[] = {
+ &dev_attr_kb_wake_angle.attr,
&dev_attr_reboot.attr,
&dev_attr_version.attr,
&dev_attr_flashinfo.attr,
NULL,
};
+static umode_t cros_ec_ctrl_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+
+ if (a == &dev_attr_kb_wake_angle.attr && !ec->has_kb_wake_angle)
+ return 0;
+
+ return a->mode;
+}
+
struct attribute_group cros_ec_attr_group = {
.attrs = __ec_attrs,
+ .is_visible = cros_ec_ctrl_visible,
};
EXPORT_SYMBOL(cros_ec_attr_group);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 38d49dbbf9b7..4635cb35008c 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -200,10 +200,10 @@ config PWM_IMX
will be called pwm-imx.
config PWM_JZ4740
- tristate "Ingenic JZ4740 PWM support"
- depends on MACH_JZ4740
+ tristate "Ingenic JZ47xx PWM support"
+ depends on MACH_INGENIC
help
- Generic PWM framework driver for Ingenic JZ4740 based
+ Generic PWM framework driver for Ingenic JZ47xx based
machines.
To compile this driver as a module, choose M here: the module
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index acd3ce8ecf3f..4fb1be246c44 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -401,7 +401,6 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
if (tcbpwm == NULL) {
err = -ENOMEM;
- dev_err(&pdev->dev, "failed to allocate memory\n");
goto err_free_tc;
}
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 2ba5c3a398ff..08cbe8120588 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -35,6 +35,7 @@
#define MX3_PWMSAR 0x0C /* PWM Sample Register */
#define MX3_PWMPR 0x10 /* PWM Period Register */
#define MX3_PWMCR_PRESCALER(x) ((((x) - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_STOPEN (1 << 25)
#define MX3_PWMCR_DOZEEN (1 << 24)
#define MX3_PWMCR_WAITEN (1 << 23)
#define MX3_PWMCR_DBGEN (1 << 22)
@@ -210,7 +211,7 @@ static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm,
writel(period_cycles, imx->mmio_base + MX3_PWMPR);
cr = MX3_PWMCR_PRESCALER(prescale) |
- MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+ MX3_PWMCR_STOPEN | MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH |
MX3_PWMCR_EN;
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index a75ff3622450..a7b134af5e04 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -18,6 +18,7 @@
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
@@ -71,9 +72,15 @@ static void jz4740_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
uint32_t ctrl = jz4740_timer_get_ctrl(pwm->hwpwm);
+ /* Disable PWM output.
+ * In TCU2 mode (channel 1/2 on JZ4750+), this must be done before the
+ * counter is stopped, while in TCU1 mode the order does not matter.
+ */
ctrl &= ~JZ_TIMER_CTRL_PWM_ENABLE;
- jz4740_timer_disable(pwm->hwpwm);
jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
+
+ /* Stop counter */
+ jz4740_timer_disable(pwm->hwpwm);
}
static int jz4740_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -124,10 +131,29 @@ static int jz4740_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
+static int jz4740_pwm_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm, enum pwm_polarity polarity)
+{
+ uint32_t ctrl = jz4740_timer_get_ctrl(pwm->pwm);
+
+ switch (polarity) {
+ case PWM_POLARITY_NORMAL:
+ ctrl &= ~JZ_TIMER_CTRL_PWM_ACTIVE_LOW;
+ break;
+ case PWM_POLARITY_INVERSED:
+ ctrl |= JZ_TIMER_CTRL_PWM_ACTIVE_LOW;
+ break;
+ }
+
+ jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
+ return 0;
+}
+
static const struct pwm_ops jz4740_pwm_ops = {
.request = jz4740_pwm_request,
.free = jz4740_pwm_free,
.config = jz4740_pwm_config,
+ .set_polarity = jz4740_pwm_set_polarity,
.enable = jz4740_pwm_enable,
.disable = jz4740_pwm_disable,
.owner = THIS_MODULE,
@@ -149,6 +175,8 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
jz4740->chip.ops = &jz4740_pwm_ops;
jz4740->chip.npwm = NUM_PWM;
jz4740->chip.base = -1;
+ jz4740->chip.of_xlate = of_pwm_xlate_with_flags;
+ jz4740->chip.of_pwm_n_cells = 3;
platform_set_drvdata(pdev, jz4740);
@@ -162,9 +190,20 @@ static int jz4740_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&jz4740->chip);
}
+#ifdef CONFIG_OF
+static const struct of_device_id jz4740_pwm_dt_ids[] = {
+ { .compatible = "ingenic,jz4740-pwm", },
+ { .compatible = "ingenic,jz4770-pwm", },
+ { .compatible = "ingenic,jz4780-pwm", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4740_pwm_dt_ids);
+#endif
+
static struct platform_driver jz4740_pwm_driver = {
.driver = {
.name = "jz4740-pwm",
+ .of_match_table = of_match_ptr(jz4740_pwm_dt_ids),
},
.probe = jz4740_pwm_probe,
.remove = jz4740_pwm_remove,
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index f5d97e0ad52b..328c124773b2 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -29,7 +29,9 @@
#define PWMGDUR 0x0c
#define PWMWAVENUM 0x28
#define PWMDWIDTH 0x2c
+#define PWM45DWIDTH_FIXUP 0x30
#define PWMTHRES 0x30
+#define PWM45THRES_FIXUP 0x34
#define PWM_CLK_DIV_MAX 7
@@ -54,6 +56,7 @@ static const char * const mtk_pwm_clk_name[MTK_CLK_MAX] = {
struct mtk_pwm_platform_data {
unsigned int num_pwms;
+ bool pwm45_fixup;
};
/**
@@ -66,6 +69,7 @@ struct mtk_pwm_chip {
struct pwm_chip chip;
void __iomem *regs;
struct clk *clks[MTK_CLK_MAX];
+ const struct mtk_pwm_platform_data *soc;
};
static const unsigned int mtk_pwm_reg_offset[] = {
@@ -131,18 +135,25 @@ static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
struct clk *clk = pc->clks[MTK_CLK_PWM1 + pwm->hwpwm];
- u32 resolution, clkdiv = 0;
+ u32 clkdiv = 0, cnt_period, cnt_duty, reg_width = PWMDWIDTH,
+ reg_thres = PWMTHRES;
+ u64 resolution;
int ret;
ret = mtk_pwm_clk_enable(chip, pwm);
if (ret < 0)
return ret;
- resolution = NSEC_PER_SEC / clk_get_rate(clk);
+ /* Using resolution in picosecond gets accuracy higher */
+ resolution = (u64)NSEC_PER_SEC * 1000;
+ do_div(resolution, clk_get_rate(clk));
- while (period_ns / resolution > 8191) {
+ cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution);
+ while (cnt_period > 8191) {
resolution *= 2;
clkdiv++;
+ cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000,
+ resolution);
}
if (clkdiv > PWM_CLK_DIV_MAX) {
@@ -151,9 +162,19 @@ static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return -EINVAL;
}
+ if (pc->soc->pwm45_fixup && pwm->hwpwm > 2) {
+ /*
+ * PWM[4,5] has distinct offset for PWMDWIDTH and PWMTHRES
+ * from the other PWMs on MT7623.
+ */
+ reg_width = PWM45DWIDTH_FIXUP;
+ reg_thres = PWM45THRES_FIXUP;
+ }
+
+ cnt_duty = DIV_ROUND_CLOSEST_ULL((u64)duty_ns * 1000, resolution);
mtk_pwm_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | clkdiv);
- mtk_pwm_writel(pc, pwm->hwpwm, PWMDWIDTH, period_ns / resolution);
- mtk_pwm_writel(pc, pwm->hwpwm, PWMTHRES, duty_ns / resolution);
+ mtk_pwm_writel(pc, pwm->hwpwm, reg_width, cnt_period);
+ mtk_pwm_writel(pc, pwm->hwpwm, reg_thres, cnt_duty);
mtk_pwm_clk_disable(chip, pwm);
@@ -211,6 +232,7 @@ static int mtk_pwm_probe(struct platform_device *pdev)
data = of_device_get_match_data(&pdev->dev);
if (data == NULL)
return -EINVAL;
+ pc->soc = data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pc->regs = devm_ioremap_resource(&pdev->dev, res);
@@ -251,14 +273,17 @@ static int mtk_pwm_remove(struct platform_device *pdev)
static const struct mtk_pwm_platform_data mt2712_pwm_data = {
.num_pwms = 8,
+ .pwm45_fixup = false,
};
static const struct mtk_pwm_platform_data mt7622_pwm_data = {
.num_pwms = 6,
+ .pwm45_fixup = false,
};
static const struct mtk_pwm_platform_data mt7623_pwm_data = {
.num_pwms = 5,
+ .pwm45_fixup = true,
};
static const struct of_device_id mtk_pwm_of_match[] = {
@@ -280,5 +305,4 @@ static struct platform_driver mtk_pwm_driver = {
module_platform_driver(mtk_pwm_driver);
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_ALIAS("platform:mtk-pwm");
MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-puv3.c b/drivers/pwm/pwm-puv3.c
index ed6007b27585..754fd9a98f6b 100644
--- a/drivers/pwm/pwm-puv3.c
+++ b/drivers/pwm/pwm-puv3.c
@@ -107,10 +107,8 @@ static int pwm_probe(struct platform_device *pdev)
int ret;
puv3 = devm_kzalloc(&pdev->dev, sizeof(*puv3), GFP_KERNEL);
- if (puv3 == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!puv3)
return -ENOMEM;
- }
puv3->clk = devm_clk_get(&pdev->dev, "OST_CLK");
if (IS_ERR(puv3->clk))
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 1c85ecc9e7ac..91d11f2e2fef 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -134,16 +134,12 @@ static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, int duty_ns,
static int rcar_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
-
- return clk_prepare_enable(rp->clk);
+ return pm_runtime_get_sync(chip->dev);
}
static void rcar_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip);
-
- clk_disable_unprepare(rp->clk);
+ pm_runtime_put(chip->dev);
}
static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -156,8 +152,12 @@ static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (div < 0)
return div;
- /* Let the core driver set pwm->period if disabled and duty_ns == 0 */
- if (!pwm_is_enabled(pwm) && !duty_ns)
+ /*
+ * Let the core driver set pwm->period if disabled and duty_ns == 0.
+ * But, this driver should prevent to set the new duty_ns if current
+ * duty_cycle is not set
+ */
+ if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle)
return 0;
rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
@@ -258,11 +258,53 @@ static const struct of_device_id rcar_pwm_of_table[] = {
};
MODULE_DEVICE_TABLE(of, rcar_pwm_of_table);
+#ifdef CONFIG_PM_SLEEP
+static struct pwm_device *rcar_pwm_dev_to_pwm_dev(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = &rcar_pwm->chip;
+
+ return &chip->pwms[0];
+}
+
+static int rcar_pwm_suspend(struct device *dev)
+{
+ struct pwm_device *pwm = rcar_pwm_dev_to_pwm_dev(dev);
+
+ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
+ return 0;
+
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int rcar_pwm_resume(struct device *dev)
+{
+ struct pwm_device *pwm = rcar_pwm_dev_to_pwm_dev(dev);
+
+ if (!test_bit(PWMF_REQUESTED, &pwm->flags))
+ return 0;
+
+ pm_runtime_get_sync(dev);
+
+ rcar_pwm_config(pwm->chip, pwm, pwm->state.duty_cycle,
+ pwm->state.period);
+ if (pwm_is_enabled(pwm))
+ rcar_pwm_enable(pwm->chip, pwm);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+static SIMPLE_DEV_PM_OPS(rcar_pwm_pm_ops, rcar_pwm_suspend, rcar_pwm_resume);
+
static struct platform_driver rcar_pwm_driver = {
.probe = rcar_pwm_probe,
.remove = rcar_pwm_remove,
.driver = {
.name = "pwm-rcar",
+ .pm = &rcar_pwm_pm_ops,
.of_match_table = of_match_ptr(rcar_pwm_of_table),
}
};
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 1ac9e4384142..7c13e2505080 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Low-Power Timer PWM driver
*
@@ -5,8 +6,6 @@
*
* Author: Gerald Baeza <gerald.baeza@st.com>
*
- * License terms: GNU General Public License (GPL), version 2
- *
* Inspired by Gerald Baeza's pwm-stm32 driver
*/
@@ -203,6 +202,8 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
priv->chip.dev = &pdev->dev;
priv->chip.ops = &stm32_pwm_lp_ops;
priv->chip.npwm = 1;
+ priv->chip.of_xlate = of_pwm_xlate_with_flags;
+ priv->chip.of_pwm_n_cells = 3;
ret = pwmchip_add(&priv->chip);
if (ret < 0)
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 6139512aab7b..2708212933f7 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2016
*
* Author: Gerald Baeza <gerald.baeza@st.com>
*
- * License terms: GNU General Public License (GPL), version 2
- *
* Inspired by timer-stm32.c from Maxime Coquelin
* pwm-atmel.c from Bo Shen
*/
@@ -21,7 +20,7 @@
struct stm32_pwm {
struct pwm_chip chip;
- struct device *dev;
+ struct mutex lock; /* protect pwm config/enable */
struct clk *clk;
struct regmap *regmap;
u32 max_arr;
@@ -214,9 +213,23 @@ static int stm32_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
+static int stm32_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
+ int ret;
+
+ /* protect common prescaler for all active channels */
+ mutex_lock(&priv->lock);
+ ret = stm32_pwm_apply(chip, pwm, state);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
static const struct pwm_ops stm32pwm_ops = {
.owner = THIS_MODULE,
- .apply = stm32_pwm_apply,
+ .apply = stm32_pwm_apply_locked,
};
static int stm32_pwm_set_breakinput(struct stm32_pwm *priv,
@@ -336,6 +349,7 @@ static int stm32_pwm_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ mutex_init(&priv->lock);
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 334199c58f1d..470d4f71e7eb 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -73,7 +73,6 @@ static const u32 prescaler_table[] = {
struct sun4i_pwm_data {
bool has_prescaler_bypass;
- bool has_rdy;
unsigned int npwm;
};
@@ -117,7 +116,8 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
- if ((val == PWM_PRESCAL_MASK) && sun4i_pwm->data->has_prescaler_bypass)
+ if ((PWM_REG_PRESCAL(val, pwm->hwpwm) == PWM_PRESCAL_MASK) &&
+ sun4i_pwm->data->has_prescaler_bypass)
prescaler = 1;
else
prescaler = prescaler_table[PWM_REG_PRESCAL(val, pwm->hwpwm)];
@@ -130,7 +130,8 @@ static void sun4i_pwm_get_state(struct pwm_chip *chip,
else
state->polarity = PWM_POLARITY_INVERSED;
- if (val & BIT_CH(PWM_CLK_GATING | PWM_EN, pwm->hwpwm))
+ if ((val & BIT_CH(PWM_CLK_GATING | PWM_EN, pwm->hwpwm)) ==
+ BIT_CH(PWM_CLK_GATING | PWM_EN, pwm->hwpwm))
state->enabled = true;
else
state->enabled = false;
@@ -311,52 +312,37 @@ static const struct pwm_ops sun4i_pwm_ops = {
.owner = THIS_MODULE,
};
-static const struct sun4i_pwm_data sun4i_pwm_data_a10 = {
+static const struct sun4i_pwm_data sun4i_pwm_dual_nobypass = {
.has_prescaler_bypass = false,
- .has_rdy = false,
.npwm = 2,
};
-static const struct sun4i_pwm_data sun4i_pwm_data_a10s = {
+static const struct sun4i_pwm_data sun4i_pwm_dual_bypass = {
.has_prescaler_bypass = true,
- .has_rdy = true,
.npwm = 2,
};
-static const struct sun4i_pwm_data sun4i_pwm_data_a13 = {
+static const struct sun4i_pwm_data sun4i_pwm_single_bypass = {
.has_prescaler_bypass = true,
- .has_rdy = true,
- .npwm = 1,
-};
-
-static const struct sun4i_pwm_data sun4i_pwm_data_a20 = {
- .has_prescaler_bypass = true,
- .has_rdy = true,
- .npwm = 2,
-};
-
-static const struct sun4i_pwm_data sun4i_pwm_data_h3 = {
- .has_prescaler_bypass = true,
- .has_rdy = true,
.npwm = 1,
};
static const struct of_device_id sun4i_pwm_dt_ids[] = {
{
.compatible = "allwinner,sun4i-a10-pwm",
- .data = &sun4i_pwm_data_a10,
+ .data = &sun4i_pwm_dual_nobypass,
}, {
.compatible = "allwinner,sun5i-a10s-pwm",
- .data = &sun4i_pwm_data_a10s,
+ .data = &sun4i_pwm_dual_bypass,
}, {
.compatible = "allwinner,sun5i-a13-pwm",
- .data = &sun4i_pwm_data_a13,
+ .data = &sun4i_pwm_single_bypass,
}, {
.compatible = "allwinner,sun7i-a20-pwm",
- .data = &sun4i_pwm_data_a20,
+ .data = &sun4i_pwm_dual_bypass,
}, {
.compatible = "allwinner,sun8i-h3-pwm",
- .data = &sun4i_pwm_data_h3,
+ .data = &sun4i_pwm_single_bypass,
}, {
/* sentinel */
},
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 83f2b0b15712..7c71cdb8a9d8 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -273,7 +273,8 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
ret = device_register(&export->child);
if (ret) {
clear_bit(PWMF_EXPORTED, &pwm->flags);
- kfree(export);
+ put_device(&export->child);
+ export = NULL;
return ret;
}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index bfec1485ca23..5535312602af 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -323,6 +323,9 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
struct ccw_dev_id dev_id;
int rc, i;
+ if (num_devices < 1)
+ return -EINVAL;
+
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
GFP_KERNEL);
if (!gdev)
@@ -375,7 +378,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
goto error;
}
/* Check if the devices are bound to the required ccw driver. */
- if (gdev->count && gdrv && gdrv->ccw_driver &&
+ if (gdrv && gdrv->ccw_driver &&
gdev->cdev[0]->drv != gdrv->ccw_driver) {
rc = -EINVAL;
goto error;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 6886b3d34cf8..5130d7c67239 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -25,7 +25,6 @@
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/setup.h>
-#include <asm/reset.h>
#include <asm/ipl.h>
#include <asm/chpid.h>
#include <asm/airq.h>
@@ -767,262 +766,6 @@ void cio_register_early_subchannels(void)
}
#endif /* CONFIG_CCW_CONSOLE */
-static int
-__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
-{
- int retry, cc;
-
- cc = 0;
- for (retry=0;retry<3;retry++) {
- schib->pmcw.ena = 0;
- cc = msch(schid, schib);
- if (cc)
- return (cc==3?-ENODEV:-EBUSY);
- if (stsch(schid, schib) || !css_sch_is_valid(schib))
- return -ENODEV;
- if (!schib->pmcw.ena)
- return 0;
- }
- return -EBUSY; /* uhm... */
-}
-
-static int
-__clear_io_subchannel_easy(struct subchannel_id schid)
-{
- int retry;
-
- if (csch(schid))
- return -ENODEV;
- for (retry=0;retry<20;retry++) {
- struct tpi_info ti;
-
- if (tpi(&ti)) {
- tsch(ti.schid, this_cpu_ptr(&cio_irb));
- if (schid_equal(&ti.schid, &schid))
- return 0;
- }
- udelay_simple(100);
- }
- return -EBUSY;
-}
-
-static void __clear_chsc_subchannel_easy(void)
-{
- /* It seems we can only wait for a bit here :/ */
- udelay_simple(100);
-}
-
-static int pgm_check_occured;
-
-static void cio_reset_pgm_check_handler(void)
-{
- pgm_check_occured = 1;
-}
-
-static int stsch_reset(struct subchannel_id schid, struct schib *addr)
-{
- int rc;
-
- pgm_check_occured = 0;
- s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
- rc = stsch(schid, addr);
- s390_base_pgm_handler_fn = NULL;
-
- /* The program check handler could have changed pgm_check_occured. */
- barrier();
-
- if (pgm_check_occured)
- return -EIO;
- else
- return rc;
-}
-
-static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
-{
- struct schib schib;
-
- if (stsch_reset(schid, &schib))
- return -ENXIO;
- if (!schib.pmcw.ena)
- return 0;
- switch(__disable_subchannel_easy(schid, &schib)) {
- case 0:
- case -ENODEV:
- break;
- default: /* -EBUSY */
- switch (schib.pmcw.st) {
- case SUBCHANNEL_TYPE_IO:
- if (__clear_io_subchannel_easy(schid))
- goto out; /* give up... */
- break;
- case SUBCHANNEL_TYPE_CHSC:
- __clear_chsc_subchannel_easy();
- break;
- default:
- /* No default clear strategy */
- break;
- }
- stsch(schid, &schib);
- __disable_subchannel_easy(schid, &schib);
- }
-out:
- return 0;
-}
-
-static atomic_t chpid_reset_count;
-
-static void s390_reset_chpids_mcck_handler(void)
-{
- struct crw crw;
- union mci mci;
-
- /* Check for pending channel report word. */
- mci.val = S390_lowcore.mcck_interruption_code;
- if (!mci.cp)
- return;
- /* Process channel report words. */
- while (stcrw(&crw) == 0) {
- /* Check for responses to RCHP. */
- if (crw.slct && crw.rsc == CRW_RSC_CPATH)
- atomic_dec(&chpid_reset_count);
- }
-}
-
-#define RCHP_TIMEOUT (30 * USEC_PER_SEC)
-static void css_reset(void)
-{
- int i, ret;
- unsigned long long timeout;
- struct chp_id chpid;
-
- /* Reset subchannels. */
- for_each_subchannel(__shutdown_subchannel_easy, NULL);
- /* Reset channel paths. */
- s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
- /* Enable channel report machine checks. */
- __ctl_set_bit(14, 28);
- /* Temporarily reenable machine checks. */
- local_mcck_enable();
- chp_id_init(&chpid);
- for (i = 0; i <= __MAX_CHPID; i++) {
- chpid.id = i;
- ret = rchp(chpid);
- if ((ret == 0) || (ret == 2))
- /*
- * rchp either succeeded, or another rchp is already
- * in progress. In either case, we'll get a crw.
- */
- atomic_inc(&chpid_reset_count);
- }
- /* Wait for machine check for all channel paths. */
- timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
- while (atomic_read(&chpid_reset_count) != 0) {
- if (get_tod_clock_fast() > timeout)
- break;
- cpu_relax();
- }
- /* Disable machine checks again. */
- local_mcck_disable();
- /* Disable channel report machine checks. */
- __ctl_clear_bit(14, 28);
- s390_base_mcck_handler_fn = NULL;
-}
-
-static struct reset_call css_reset_call = {
- .fn = css_reset,
-};
-
-static int __init init_css_reset_call(void)
-{
- atomic_set(&chpid_reset_count, 0);
- register_reset_call(&css_reset_call);
- return 0;
-}
-
-arch_initcall(init_css_reset_call);
-
-struct sch_match_id {
- struct subchannel_id schid;
- struct ccw_dev_id devid;
- int rc;
-};
-
-static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
-{
- struct schib schib;
- struct sch_match_id *match_id = data;
-
- if (stsch_reset(schid, &schib))
- return -ENXIO;
- if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
- (schib.pmcw.dev == match_id->devid.devno) &&
- (schid.ssid == match_id->devid.ssid)) {
- match_id->schid = schid;
- match_id->rc = 0;
- return 1;
- }
- return 0;
-}
-
-static int reipl_find_schid(struct ccw_dev_id *devid,
- struct subchannel_id *schid)
-{
- struct sch_match_id match_id;
-
- match_id.devid = *devid;
- match_id.rc = -ENODEV;
- for_each_subchannel(__reipl_subchannel_match, &match_id);
- if (match_id.rc == 0)
- *schid = match_id.schid;
- return match_id.rc;
-}
-
-extern void do_reipl_asm(__u32 schid);
-
-/* Make sure all subchannels are quiet before we re-ipl an lpar. */
-void reipl_ccw_dev(struct ccw_dev_id *devid)
-{
- struct subchannel_id uninitialized_var(schid);
-
- s390_reset_system();
- if (reipl_find_schid(devid, &schid) != 0)
- panic("IPL Device not found\n");
- do_reipl_asm(*((__u32*)&schid));
-}
-
-int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
-{
- static struct chsc_sda_area sda_area __initdata;
- struct subchannel_id schid;
- struct schib schib;
-
- schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
- if (!schid.one)
- return -ENODEV;
-
- if (schid.ssid) {
- /*
- * Firmware should have already enabled MSS but whoever started
- * the kernel might have initiated a channel subsystem reset.
- * Ensure that MSS is enabled.
- */
- memset(&sda_area, 0, sizeof(sda_area));
- if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS))
- return -ENODEV;
- }
- if (stsch(schid, &schib))
- return -ENODEV;
- if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
- return -ENODEV;
- if (!schib.pmcw.dnv)
- return -ENODEV;
-
- iplinfo->ssid = schid.ssid;
- iplinfo->devno = schib.pmcw.dev;
- iplinfo->is_qdio = schib.pmcw.qf;
- return 0;
-}
-
/**
* cio_tm_start_key - perform start function
* @sch: subchannel on which to perform the start function
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 4fa9ee1d09fa..14d328338ce2 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -183,30 +183,6 @@ int chsc(void *chsc_area)
}
EXPORT_SYMBOL(chsc);
-static inline int __rchp(struct chp_id chpid)
-{
- register struct chp_id reg1 asm ("1") = chpid;
- int ccode;
-
- asm volatile(
- " lr 1,%1\n"
- " rchp\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode) : "d" (reg1) : "cc");
- return ccode;
-}
-
-int rchp(struct chp_id chpid)
-{
- int ccode;
-
- ccode = __rchp(chpid);
- trace_s390_cio_rchp(chpid, ccode);
-
- return ccode;
-}
-
static inline int __rsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 35ad4ddd61e0..4be539cb9adc 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -20,7 +20,6 @@ int ssch(struct subchannel_id schid, union orb *addr);
int csch(struct subchannel_id schid);
int tpi(struct tpi_info *addr);
int chsc(void *chsc_area);
-int rchp(struct chp_id chpid);
int rsch(struct subchannel_id schid);
int hsch(struct subchannel_id schid);
int xsch(struct subchannel_id schid);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index a337281337a7..f4ca72dd862f 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1207,8 +1207,10 @@ no_cleanup:
qdio_shutdown_thinint(irq_ptr);
/* restore interrupt handler */
- if ((void *)cdev->handler == (void *)qdio_int_handler)
+ if ((void *)cdev->handler == (void *)qdio_int_handler) {
cdev->handler = irq_ptr->orig_handler;
+ cdev->private->intparm = 0;
+ }
spin_unlock_irq(get_ccwdev_lock(cdev));
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 98f3cfdc0d02..439991d71b14 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -507,8 +507,10 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
irq_ptr->aqueue = *ciw;
/* set new interrupt handler */
+ spin_lock_irq(get_ccwdev_lock(irq_ptr->cdev));
irq_ptr->orig_handler = init_data->cdev->handler;
init_data->cdev->handler = qdio_int_handler;
+ spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev));
return 0;
out_err:
qdio_release_memory(irq_ptr);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 48d55dc9e986..35a0c2b52f82 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -25,7 +25,6 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
-#include <asm/reset.h>
#include <asm/airq.h>
#include <linux/atomic.h>
#include <asm/isc.h>
@@ -1197,26 +1196,7 @@ static void ap_config_timeout(struct timer_list *unused)
queue_work(system_long_wq, &ap_scan_work);
}
-static void ap_reset_all(void)
-{
- int i, j;
-
- for (i = 0; i < AP_DOMAINS; i++) {
- if (!ap_test_config_domain(i))
- continue;
- for (j = 0; j < AP_DEVICES; j++) {
- if (!ap_test_config_card_id(j))
- continue;
- ap_rapq(AP_MKQID(j, i));
- }
- }
-}
-
-static struct reset_call ap_reset_call = {
- .fn = ap_reset_all,
-};
-
-int __init ap_debug_init(void)
+static int __init ap_debug_init(void)
{
ap_dbf_info = debug_register("ap", 1, 1,
DBF_MAX_SPRINTF_ARGS * sizeof(long));
@@ -1226,17 +1206,12 @@ int __init ap_debug_init(void)
return 0;
}
-void ap_debug_exit(void)
-{
- debug_unregister(ap_dbf_info);
-}
-
/**
* ap_module_init(): The module initialization code.
*
* Initializes the module.
*/
-int __init ap_module_init(void)
+static int __init ap_module_init(void)
{
int max_domain_id;
int rc, i;
@@ -1274,8 +1249,6 @@ int __init ap_module_init(void)
ap_airq_flag = (rc == 0);
}
- register_reset_call(&ap_reset_call);
-
/* Create /sys/bus/ap. */
rc = bus_register(&ap_bus_type);
if (rc)
@@ -1331,7 +1304,6 @@ out_bus:
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
bus_unregister(&ap_bus_type);
out:
- unregister_reset_call(&ap_reset_call);
if (ap_using_interrupts())
unregister_adapter_interrupt(&ap_airq);
kfree(ap_configuration);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index e0827eaa42f1..02184cf35834 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -17,7 +17,7 @@
#include <linux/types.h>
#include <asm/ap.h>
-#define AP_DEVICES 64 /* Number of AP devices. */
+#define AP_DEVICES 256 /* Number of AP devices. */
#define AP_DOMAINS 256 /* Number of AP domains. */
#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
@@ -240,7 +240,4 @@ void ap_queue_resume(struct ap_device *ap_dev);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
int comp_device_type, unsigned int functions);
-int ap_module_init(void);
-void ap_module_exit(void);
-
#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
index 6a9d77c75ec3..dc675eb5aef6 100644
--- a/drivers/s390/crypto/ap_debug.h
+++ b/drivers/s390/crypto/ap_debug.h
@@ -23,7 +23,4 @@
extern debug_info_t *ap_dbf_info;
-int ap_debug_init(void);
-void ap_debug_exit(void);
-
#endif /* AP_DEBUG_H */
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index e7c2e4f9529a..ed80d00cdb6f 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -889,7 +889,7 @@ int pkey_findcard(const struct pkey_seckey *seckey,
u16 *pcardnr, u16 *pdomain, int verify)
{
struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
- struct zcrypt_device_matrix *device_matrix;
+ struct zcrypt_device_status_ext *device_status;
u16 card, dom;
u64 mkvp[2];
int i, rc, oi = -1;
@@ -899,18 +899,19 @@ int pkey_findcard(const struct pkey_seckey *seckey,
return -EINVAL;
/* fetch status of all crypto cards */
- device_matrix = kmalloc(sizeof(struct zcrypt_device_matrix),
+ device_status = kmalloc(MAX_ZDEV_ENTRIES_EXT
+ * sizeof(struct zcrypt_device_status_ext),
GFP_KERNEL);
- if (!device_matrix)
+ if (!device_status)
return -ENOMEM;
- zcrypt_device_status_mask(device_matrix);
+ zcrypt_device_status_mask_ext(device_status);
/* walk through all crypto cards */
- for (i = 0; i < MAX_ZDEV_ENTRIES; i++) {
- card = AP_QID_CARD(device_matrix->device[i].qid);
- dom = AP_QID_QUEUE(device_matrix->device[i].qid);
- if (device_matrix->device[i].online &&
- device_matrix->device[i].functions & 0x04) {
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ if (device_status[i].online &&
+ device_status[i].functions & 0x04) {
/* an enabled CCA Coprocessor card */
/* try cached mkvp */
if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
@@ -930,14 +931,14 @@ int pkey_findcard(const struct pkey_seckey *seckey,
mkvp_cache_scrub(card, dom);
}
}
- if (i >= MAX_ZDEV_ENTRIES) {
+ if (i >= MAX_ZDEV_ENTRIES_EXT) {
/* nothing found, so this time without cache */
- for (i = 0; i < MAX_ZDEV_ENTRIES; i++) {
- if (!(device_matrix->device[i].online &&
- device_matrix->device[i].functions & 0x04))
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ if (!(device_status[i].online &&
+ device_status[i].functions & 0x04))
continue;
- card = AP_QID_CARD(device_matrix->device[i].qid);
- dom = AP_QID_QUEUE(device_matrix->device[i].qid);
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
/* fresh fetch mkvp from adapter */
if (fetch_mkvp(card, dom, mkvp) == 0) {
mkvp_cache_update(card, dom, mkvp);
@@ -947,13 +948,13 @@ int pkey_findcard(const struct pkey_seckey *seckey,
oi = i;
}
}
- if (i >= MAX_ZDEV_ENTRIES && oi >= 0) {
+ if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
/* old mkvp matched, use this card then */
- card = AP_QID_CARD(device_matrix->device[oi].qid);
- dom = AP_QID_QUEUE(device_matrix->device[oi].qid);
+ card = AP_QID_CARD(device_status[oi].qid);
+ dom = AP_QID_QUEUE(device_status[oi].qid);
}
}
- if (i < MAX_ZDEV_ENTRIES || oi >= 0) {
+ if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
if (pcardnr)
*pcardnr = card;
if (pdomain)
@@ -962,7 +963,7 @@ int pkey_findcard(const struct pkey_seckey *seckey,
} else
rc = -ENODEV;
- kfree(device_matrix);
+ kfree(device_status);
return rc;
}
EXPORT_SYMBOL(pkey_findcard);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index ce15f101ee28..5efd84862ccb 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -18,8 +18,6 @@
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
@@ -607,19 +605,24 @@ out:
return rc;
}
-void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
+static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
struct zcrypt_device_status *stat;
+ int card, queue;
+
+ memset(devstatus, 0, MAX_ZDEV_ENTRIES
+ * sizeof(struct zcrypt_device_status));
- memset(matrix, 0, sizeof(*matrix));
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
- stat = matrix->device;
- stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS;
- stat += AP_QID_QUEUE(zq->queue->qid);
+ card = AP_QID_CARD(zq->queue->qid);
+ if (card >= MAX_ZDEV_CARDIDS)
+ continue;
+ queue = AP_QID_QUEUE(zq->queue->qid);
+ stat = &devstatus[card * AP_DOMAINS + queue];
stat->hwtype = zc->card->ap_dev.device_type;
stat->functions = zc->card->functions >> 26;
stat->qid = zq->queue->qid;
@@ -628,40 +631,70 @@ void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
}
spin_unlock(&zcrypt_list_lock);
}
-EXPORT_SYMBOL(zcrypt_device_status_mask);
-static void zcrypt_status_mask(char status[AP_DEVICES])
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
+ struct zcrypt_device_status_ext *stat;
+ int card, queue;
+
+ memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
+ * sizeof(struct zcrypt_device_status_ext));
- memset(status, 0, sizeof(char) * AP_DEVICES);
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ card = AP_QID_CARD(zq->queue->qid);
+ queue = AP_QID_QUEUE(zq->queue->qid);
+ stat = &devstatus[card * AP_DOMAINS + queue];
+ stat->hwtype = zc->card->ap_dev.device_type;
+ stat->functions = zc->card->functions >> 26;
+ stat->qid = zq->queue->qid;
+ stat->online = zq->online ? 0x01 : 0x00;
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
+}
+EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
+
+static void zcrypt_status_mask(char status[], size_t max_adapters)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+ int card;
+
+ memset(status, 0, max_adapters);
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ card = AP_QID_CARD(zq->queue->qid);
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+ || card >= max_adapters)
continue;
- status[AP_QID_CARD(zq->queue->qid)] =
- zc->online ? zc->user_space_type : 0x0d;
+ status[card] = zc->online ? zc->user_space_type : 0x0d;
}
}
spin_unlock(&zcrypt_list_lock);
}
-static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
+static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
+ int card;
- memset(qdepth, 0, sizeof(char) * AP_DEVICES);
+ memset(qdepth, 0, max_adapters);
spin_lock(&zcrypt_list_lock);
local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ card = AP_QID_CARD(zq->queue->qid);
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+ || card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
- qdepth[AP_QID_CARD(zq->queue->qid)] =
+ qdepth[card] =
zq->queue->pendingq_count +
zq->queue->requestq_count;
spin_unlock(&zq->queue->lock);
@@ -671,21 +704,23 @@ static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
spin_unlock(&zcrypt_list_lock);
}
-static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
+static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
+ int card;
- memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
+ memset(reqcnt, 0, sizeof(int) * max_adapters);
spin_lock(&zcrypt_list_lock);
local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+ card = AP_QID_CARD(zq->queue->qid);
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
+ || card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
- reqcnt[AP_QID_CARD(zq->queue->qid)] =
- zq->queue->total_request_count;
+ reqcnt[card] = zq->queue->total_request_count;
spin_unlock(&zq->queue->lock);
}
}
@@ -739,60 +774,10 @@ static int zcrypt_requestq_count(void)
return requestq_count;
}
-static int zcrypt_count_type(int type)
-{
- struct zcrypt_card *zc;
- struct zcrypt_queue *zq;
- int device_count;
-
- device_count = 0;
- spin_lock(&zcrypt_list_lock);
- for_each_zcrypt_card(zc) {
- if (zc->card->id != type)
- continue;
- for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
- continue;
- device_count++;
- }
- }
- spin_unlock(&zcrypt_list_lock);
- return device_count;
-}
-
-/**
- * zcrypt_ica_status(): Old, depracted combi status call.
- *
- * Old, deprecated combi status call.
- */
-static long zcrypt_ica_status(struct file *filp, unsigned long arg)
-{
- struct ica_z90_status *pstat;
- int ret;
-
- pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
- if (!pstat)
- return -ENOMEM;
- pstat->totalcount = zcrypt_device_count;
- pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
- pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
- pstat->requestqWaitCount = zcrypt_requestq_count();
- pstat->pendingqWaitCount = zcrypt_pendingq_count();
- pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
- pstat->cryptoDomain = ap_domain_index;
- zcrypt_status_mask(pstat->status);
- zcrypt_qdepth_mask(pstat->qdepth);
- ret = 0;
- if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
- ret = -EFAULT;
- kfree(pstat);
- return ret;
-}
-
static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
- int rc;
+ int rc = 0;
switch (cmd) {
case ICARSAMODEXPO: {
@@ -871,48 +856,48 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
return rc;
}
- case ZDEVICESTATUS: {
- struct zcrypt_device_matrix *device_status;
+ case ZCRYPT_DEVICE_STATUS: {
+ struct zcrypt_device_status_ext *device_status;
+ size_t total_size = MAX_ZDEV_ENTRIES_EXT
+ * sizeof(struct zcrypt_device_status_ext);
- device_status = kzalloc(sizeof(struct zcrypt_device_matrix),
- GFP_KERNEL);
+ device_status = kzalloc(total_size, GFP_KERNEL);
if (!device_status)
return -ENOMEM;
-
- zcrypt_device_status_mask(device_status);
-
+ zcrypt_device_status_mask_ext(device_status);
if (copy_to_user((char __user *) arg, device_status,
- sizeof(struct zcrypt_device_matrix))) {
- kfree(device_status);
- return -EFAULT;
- }
-
+ total_size))
+ rc = -EFAULT;
kfree(device_status);
- return 0;
+ return rc;
}
- case Z90STAT_STATUS_MASK: {
+ case ZCRYPT_STATUS_MASK: {
char status[AP_DEVICES];
- zcrypt_status_mask(status);
- if (copy_to_user((char __user *) arg, status,
- sizeof(char) * AP_DEVICES))
+
+ zcrypt_status_mask(status, AP_DEVICES);
+ if (copy_to_user((char __user *) arg, status, sizeof(status)))
return -EFAULT;
return 0;
}
- case Z90STAT_QDEPTH_MASK: {
+ case ZCRYPT_QDEPTH_MASK: {
char qdepth[AP_DEVICES];
- zcrypt_qdepth_mask(qdepth);
- if (copy_to_user((char __user *) arg, qdepth,
- sizeof(char) * AP_DEVICES))
+
+ zcrypt_qdepth_mask(qdepth, AP_DEVICES);
+ if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
return -EFAULT;
return 0;
}
- case Z90STAT_PERDEV_REQCNT: {
- int reqcnt[AP_DEVICES];
- zcrypt_perdev_reqcnt(reqcnt);
- if (copy_to_user((int __user *) arg, reqcnt,
- sizeof(int) * AP_DEVICES))
- return -EFAULT;
- return 0;
+ case ZCRYPT_PERDEV_REQCNT: {
+ int *reqcnt;
+
+ reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
+ if (!reqcnt)
+ return -ENOMEM;
+ zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
+ if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+ rc = -EFAULT;
+ kfree(reqcnt);
+ return rc;
}
case Z90STAT_REQUESTQ_COUNT:
return put_user(zcrypt_requestq_count(), (int __user *) arg);
@@ -924,38 +909,54 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
case Z90STAT_DOMAIN_INDEX:
return put_user(ap_domain_index, (int __user *) arg);
/*
- * Deprecated ioctls. Don't add another device count ioctl,
- * you can count them yourself in the user space with the
- * output of the Z90STAT_STATUS_MASK ioctl.
+ * Deprecated ioctls
*/
- case ICAZ90STATUS:
- return zcrypt_ica_status(filp, arg);
- case Z90STAT_TOTALCOUNT:
- return put_user(zcrypt_device_count, (int __user *) arg);
- case Z90STAT_PCICACOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCICA),
- (int __user *) arg);
- case Z90STAT_PCICCCOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCICC),
- (int __user *) arg);
- case Z90STAT_PCIXCCMCL2COUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
- (int __user *) arg);
- case Z90STAT_PCIXCCMCL3COUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
- (int __user *) arg);
- case Z90STAT_PCIXCCCOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
- zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
- (int __user *) arg);
- case Z90STAT_CEX2CCOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
- (int __user *) arg);
- case Z90STAT_CEX2ACOUNT:
- return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
- (int __user *) arg);
+ case ZDEVICESTATUS: {
+ /* the old ioctl supports only 64 adapters */
+ struct zcrypt_device_status *device_status;
+ size_t total_size = MAX_ZDEV_ENTRIES
+ * sizeof(struct zcrypt_device_status);
+
+ device_status = kzalloc(total_size, GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask(device_status);
+ if (copy_to_user((char __user *) arg, device_status,
+ total_size))
+ rc = -EFAULT;
+ kfree(device_status);
+ return rc;
+ }
+ case Z90STAT_STATUS_MASK: {
+ /* the old ioctl supports only 64 adapters */
+ char status[MAX_ZDEV_CARDIDS];
+
+ zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((char __user *) arg, status, sizeof(status)))
+ return -EFAULT;
+ return 0;
+ }
+ case Z90STAT_QDEPTH_MASK: {
+ /* the old ioctl supports only 64 adapters */
+ char qdepth[MAX_ZDEV_CARDIDS];
+
+ zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
+ return -EFAULT;
+ return 0;
+ }
+ case Z90STAT_PERDEV_REQCNT: {
+ /* the old ioctl supports only 64 adapters */
+ int reqcnt[MAX_ZDEV_CARDIDS];
+
+ zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
+ if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+ return -EFAULT;
+ return 0;
+ }
+ /* unknown ioctl number */
default:
- /* unknown ioctl number */
+ ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
return -ENOIOCTLCMD;
}
}
@@ -1152,201 +1153,6 @@ static struct miscdevice zcrypt_misc_device = {
.fops = &zcrypt_fops,
};
-/*
- * Deprecated /proc entry support.
- */
-static struct proc_dir_entry *zcrypt_entry;
-
-static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
-{
- int i;
-
- for (i = 0; i < len; i++)
- seq_printf(m, "%01x", (unsigned int) addr[i]);
- seq_putc(m, ' ');
-}
-
-static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
-{
- int inl, c, cx;
-
- seq_printf(m, " ");
- inl = 0;
- for (c = 0; c < (len / 16); c++) {
- sprintcl(m, addr+inl, 16);
- inl += 16;
- }
- cx = len%16;
- if (cx) {
- sprintcl(m, addr+inl, cx);
- inl += cx;
- }
- seq_putc(m, '\n');
-}
-
-static void sprinthx(unsigned char *title, struct seq_file *m,
- unsigned char *addr, unsigned int len)
-{
- int inl, r, rx;
-
- seq_printf(m, "\n%s\n", title);
- inl = 0;
- for (r = 0; r < (len / 64); r++) {
- sprintrw(m, addr+inl, 64);
- inl += 64;
- }
- rx = len % 64;
- if (rx) {
- sprintrw(m, addr+inl, rx);
- inl += rx;
- }
- seq_putc(m, '\n');
-}
-
-static void sprinthx4(unsigned char *title, struct seq_file *m,
- unsigned int *array, unsigned int len)
-{
- seq_printf(m, "\n%s\n", title);
- seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, array, len, false);
- seq_putc(m, '\n');
-}
-
-static int zcrypt_proc_show(struct seq_file *m, void *v)
-{
- char workarea[sizeof(int) * AP_DEVICES];
-
- seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
- ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
- seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
- seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
- seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
- seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
- seq_printf(m, "PCIXCC MCL2 count: %d\n",
- zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
- seq_printf(m, "PCIXCC MCL3 count: %d\n",
- zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
- seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
- seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
- seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
- seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
- seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
- seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
- seq_printf(m, "Total open handles: %d\n\n",
- atomic_read(&zcrypt_open_count));
- zcrypt_status_mask(workarea);
- sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
- "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
- m, workarea, AP_DEVICES);
- zcrypt_qdepth_mask(workarea);
- sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
- zcrypt_perdev_reqcnt((int *) workarea);
- sprinthx4("Per-device successfully completed request counts",
- m, (unsigned int *) workarea, AP_DEVICES);
- return 0;
-}
-
-static int zcrypt_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, zcrypt_proc_show, NULL);
-}
-
-static void zcrypt_disable_card(int index)
-{
- struct zcrypt_card *zc;
- struct zcrypt_queue *zq;
-
- spin_lock(&zcrypt_list_lock);
- for_each_zcrypt_card(zc) {
- for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
- continue;
- zq->online = 0;
- ap_flush_queue(zq->queue);
- }
- }
- spin_unlock(&zcrypt_list_lock);
-}
-
-static void zcrypt_enable_card(int index)
-{
- struct zcrypt_card *zc;
- struct zcrypt_queue *zq;
-
- spin_lock(&zcrypt_list_lock);
- for_each_zcrypt_card(zc) {
- for_each_zcrypt_queue(zq, zc) {
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
- continue;
- zq->online = 1;
- ap_flush_queue(zq->queue);
- }
- }
- spin_unlock(&zcrypt_list_lock);
-}
-
-static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- unsigned char *lbuf, *ptr;
- size_t local_count;
- int j;
-
- if (count <= 0)
- return 0;
-
-#define LBUFSIZE 1200UL
- lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
- if (!lbuf)
- return 0;
-
- local_count = min(LBUFSIZE - 1, count);
- if (copy_from_user(lbuf, buffer, local_count) != 0) {
- kfree(lbuf);
- return -EFAULT;
- }
- lbuf[local_count] = '\0';
-
- ptr = strstr(lbuf, "Online devices");
- if (!ptr)
- goto out;
- ptr = strstr(ptr, "\n");
- if (!ptr)
- goto out;
- ptr++;
-
- if (strstr(ptr, "Waiting work element counts") == NULL)
- goto out;
-
- for (j = 0; j < 64 && *ptr; ptr++) {
- /*
- * '0' for no device, '1' for PCICA, '2' for PCICC,
- * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
- * '5' for CEX2C and '6' for CEX2A'
- * '7' for CEX3C and '8' for CEX3A
- */
- if (*ptr >= '0' && *ptr <= '8')
- j++;
- else if (*ptr == 'd' || *ptr == 'D')
- zcrypt_disable_card(j++);
- else if (*ptr == 'e' || *ptr == 'E')
- zcrypt_enable_card(j++);
- else if (*ptr != ' ' && *ptr != '\t')
- break;
- }
-out:
- kfree(lbuf);
- return count;
-}
-
-static const struct file_operations zcrypt_proc_fops = {
- .owner = THIS_MODULE,
- .open = zcrypt_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = zcrypt_proc_write,
-};
-
static int zcrypt_rng_device_count;
static u32 *zcrypt_rng_buffer;
static int zcrypt_rng_buffer_index;
@@ -1448,27 +1254,15 @@ int __init zcrypt_api_init(void)
if (rc)
goto out;
- atomic_set(&zcrypt_rescan_req, 0);
-
/* Register the request sprayer. */
rc = misc_register(&zcrypt_misc_device);
if (rc < 0)
goto out;
- /* Set up the proc file system */
- zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL,
- &zcrypt_proc_fops);
- if (!zcrypt_entry) {
- rc = -ENOMEM;
- goto out_misc;
- }
-
zcrypt_msgtype6_init();
zcrypt_msgtype50_init();
return 0;
-out_misc:
- misc_deregister(&zcrypt_misc_device);
out:
return rc;
}
@@ -1480,7 +1274,6 @@ out:
*/
void __exit zcrypt_api_exit(void)
{
- remove_proc_entry("driver/z90crypt", NULL);
misc_deregister(&zcrypt_misc_device);
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 9fff8912f6e3..f149a8fee60d 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -21,30 +21,6 @@
#include <asm/zcrypt.h>
#include "ap_bus.h"
-/* deprecated status calls */
-#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status)
-#define Z90STAT_PCIXCCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x43, int)
-
-/**
- * This structure is deprecated and the corresponding ioctl() has been
- * replaced with individual ioctl()s for each piece of data!
- */
-struct ica_z90_status {
- int totalcount;
- int leedslitecount; // PCICA
- int leeds2count; // PCICC
- // int PCIXCCCount; is not in struct for backward compatibility
- int requestqWaitCount;
- int pendingqWaitCount;
- int totalOpenCount;
- int cryptoDomain;
- // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
- // 5=CEX2C
- unsigned char status[64];
- // qdepth: # work elements waiting for each device
- unsigned char qdepth[64];
-};
-
/**
* device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2,
* PCIXCC_MCL3, CEX2C, or CEX2A
@@ -179,6 +155,6 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
long zcrypt_send_cprb(struct ica_xcRB *xcRB);
-void zcrypt_device_status_mask(struct zcrypt_device_matrix *devstatus);
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 84858d5c8257..0156c9623c35 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1502,9 +1502,10 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
host = aac->scsi_host_ptr;
scsi_block_requests(host);
aac_adapter_disable_int(aac);
- if (aac->thread->pid != current->pid) {
+ if (aac->thread && aac->thread->pid != current->pid) {
spin_unlock_irq(host->host_lock);
kthread_stop(aac->thread);
+ aac->thread = NULL;
jafo = 1;
}
@@ -1591,6 +1592,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
aac->name);
if (IS_ERR(aac->thread)) {
retval = PTR_ERR(aac->thread);
+ aac->thread = NULL;
goto out;
}
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 2664ea0df35f..f24fb942065d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1562,6 +1562,7 @@ static void __aac_shutdown(struct aac_dev * aac)
up(&fib->event_wait);
}
kthread_stop(aac->thread);
+ aac->thread = NULL;
}
aac_send_shutdown(aac);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 406e94312d4e..211da1d5a869 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -2108,12 +2108,12 @@ static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
log_debug(1 << CXGBI_DBG_TOE,
"cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
- if (cxgb4i_cplhandlers[opc])
- cxgb4i_cplhandlers[opc](cdev, skb);
- else {
+ if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) {
pr_err("No handler for opcode 0x%x.\n", opc);
__kfree_skb(skb);
- }
+ } else
+ cxgb4i_cplhandlers[opc](cdev, skb);
+
return 0;
nomem:
log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 6866975b25f3..5ceea8da7bb6 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2051,13 +2051,16 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong ar
}
break;
}
- case I2ORESETCMD:
- if(pHba->host)
- spin_lock_irqsave(pHba->host->host_lock, flags);
+ case I2ORESETCMD: {
+ struct Scsi_Host *shost = pHba->host;
+
+ if (shost)
+ spin_lock_irqsave(shost->host_lock, flags);
adpt_hba_reset(pHba);
- if(pHba->host)
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
+ if (shost)
+ spin_unlock_irqrestore(shost->host_lock, flags);
break;
+ }
case I2ORESCANCMD:
adpt_rescan(pHba);
break;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 7649d63a1b8d..3771e59a9fae 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -472,7 +472,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else
shost->dma_boundary = 0xffffffff;
- shost->use_blk_mq = scsi_use_blk_mq;
shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq;
device_initialize(&shost->shost_gendev);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 2288757b5c9e..9e914f9c3ffb 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3794,6 +3794,7 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
/* CT_IU preamble */
@@ -3812,7 +3813,6 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla24xx_async_gffid_sp_done;
rval = qla2x00_start_sp(sp);
@@ -4230,6 +4230,8 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->name = "gnnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
sp->gen2 = fc4_type;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
@@ -4246,7 +4248,6 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_gpnft_gnnft_sp_done;
rval = qla2x00_start_sp(sp);
@@ -4370,6 +4371,8 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->name = "gpnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
sp->gen2 = fc4_type;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
rspsz = sizeof(struct ct_sns_gpnft_rsp) +
@@ -4385,7 +4388,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_gpnft_gnnft_sp_done;
rval = qla2x00_start_sp(sp);
@@ -4495,6 +4497,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
/* CT_IU preamble */
@@ -4516,7 +4519,6 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_gnnid_sp_done;
rval = qla2x00_start_sp(sp);
@@ -4632,6 +4634,7 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
/* CT_IU preamble */
@@ -4653,7 +4656,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_gfpnid_sp_done;
rval = qla2x00_start_sp(sp);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 8aeb0ed524a1..8f55dd44adae 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -183,10 +183,11 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
sp->name = "login";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
sp->done = qla2x00_async_login_sp_done;
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
@@ -245,10 +246,11 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->type = SRB_LOGOUT_CMD;
sp->name = "logout";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
sp->done = qla2x00_async_logout_sp_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -307,10 +309,11 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->type = SRB_PRLO_CMD;
sp->name = "prlo";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
sp->done = qla2x00_async_prlo_sp_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -412,10 +415,11 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
sp->type = SRB_ADISC_CMD;
sp->name = "adisc";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
sp->done = qla2x00_async_adisc_sp_done;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
@@ -745,6 +749,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ mbx = &sp->u.iocb_cmd;
+ mbx->timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
mb = sp->u.iocb_cmd.u.mbx.out_mb;
@@ -757,9 +763,6 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
mb[8] = vha->gnl.size;
mb[9] = vha->vp_idx;
- mbx = &sp->u.iocb_cmd;
- mbx->timeout = qla2x00_async_iocb_timeout;
-
sp->done = qla24xx_async_gnl_sp_done;
rval = qla2x00_start_sp(sp);
@@ -887,10 +890,11 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->type = SRB_PRLI_CMD;
sp->name = "prli";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
lio->timeout = qla2x00_async_iocb_timeout;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
sp->done = qla2x00_async_prli_sp_done;
lio->u.logio.flags = 0;
@@ -955,6 +959,9 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
sp->name = "gpdb";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+
+ mbx = &sp->u.iocb_cmd;
+ mbx->timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -974,8 +981,6 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
mb[9] = vha->vp_idx;
mb[10] = opt;
- mbx = &sp->u.iocb_cmd;
- mbx->timeout = qla2x00_async_iocb_timeout;
mbx->u.mbx.in = (void *)pd;
mbx->u.mbx.in_dma = pd_dma;
@@ -1490,13 +1495,15 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
tm_iocb = &sp->u.iocb_cmd;
sp->type = SRB_TM_CMD;
sp->name = "tmf";
+
+ tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
+ init_completion(&tm_iocb->u.tmf.comp);
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+
tm_iocb->u.tmf.flags = flags;
tm_iocb->u.tmf.lun = lun;
tm_iocb->u.tmf.data = tag;
sp->done = qla2x00_tmf_sp_done;
- tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
- init_completion(&tm_iocb->u.tmf.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -1550,8 +1557,8 @@ qla24xx_abort_sp_done(void *ptr, int res)
srb_t *sp = ptr;
struct srb_iocb *abt = &sp->u.iocb_cmd;
- del_timer(&sp->u.iocb_cmd.timer);
- complete(&abt->u.abt.comp);
+ if (del_timer(&sp->u.iocb_cmd.timer))
+ complete(&abt->u.abt.comp);
}
int
@@ -1570,7 +1577,11 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
+
+ abt_iocb->timeout = qla24xx_abort_iocb_timeout;
+ init_completion(&abt_iocb->u.abt.comp);
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
if (vha->flags.qpairs_available && cmd_sp->qpair)
@@ -1580,8 +1591,6 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
sp->done = qla24xx_abort_sp_done;
- abt_iocb->timeout = qla24xx_abort_iocb_timeout;
- init_completion(&abt_iocb->u.abt.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index b7a05aebf065..37ae0f6d8ae5 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -272,13 +272,13 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
{
timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
- add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
init_completion(&sp->comp);
if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
if (sp->type == SRB_ELS_DCMD)
init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
+ add_timer(&sp->u.iocb_cmd.timer);
}
static inline int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index f74ff7b550b6..a91cca52b5d5 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2460,8 +2460,8 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
+ qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
sp->done = qla2x00_els_dcmd_sp_done;
sp->free = qla2x00_els_dcmd_sp_free;
@@ -2658,8 +2658,11 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+
elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
+ init_completion(&elsio->u.els_plogi.comp);
+ qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+
sp->done = qla2x00_els_dcmd2_sp_done;
sp->free = qla2x00_els_dcmd2_sp_free;
@@ -2696,7 +2699,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109,
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
- init_completion(&elsio->u.els_plogi.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7cacdc3408fa..a3dc83f9444d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2174,7 +2174,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
0x10, 0x1);
set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
@@ -2184,7 +2184,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
0x10, 0x3);
set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
@@ -2194,7 +2194,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
0x10, 0x2);
set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
@@ -2347,7 +2347,7 @@ done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
/* Always return DID_OK, bsg will send the vendor specific response
* in this case only */
- sp->done(sp, DID_OK << 6);
+ sp->done(sp, DID_OK << 16);
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 5db0262d5c94..d8a36c13aeda 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -6023,14 +6023,14 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
sp->type = SRB_MB_IOCB;
sp->name = mb_to_str(mcp->mb[0]);
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
-
c = &sp->u.iocb_cmd;
c->timeout = qla2x00_async_iocb_timeout;
init_completion(&c->u.mbx.comp);
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
+
sp->done = qla2x00_async_mb_sp_done;
rval = qla2x00_start_sp(sp);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index da85cd89639f..f6f0a759a7c2 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -929,8 +929,8 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
sp->type = SRB_CTRL_VP;
sp->name = "ctrl_vp";
sp->done = qla_ctrlvp_sp_done;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 7113acf42ff3..521a51370554 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1821,9 +1821,11 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
sp->type = SRB_FXIOCB_DCMD;
sp->name = "fxdisc";
- qla2x00_init_timer(sp, FXDISC_TIMEOUT);
fdisc = &sp->u.iocb_cmd;
+ fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
+ qla2x00_init_timer(sp, FXDISC_TIMEOUT);
+
switch (fx_type) {
case FXDISC_GET_CONFIG_INFO:
fdisc->u.fxiocb.flags =
@@ -1924,7 +1926,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
goto done_unmap_req;
}
- fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
sp->done = qla2x00_fxdisc_sp_done;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2bbf0bff0da0..15eaa6dded04 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -470,9 +470,6 @@ fail_req_map:
static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
{
- if (!ha->req_q_map)
- return;
-
if (IS_QLAFX00(ha)) {
if (req && req->ring_fx00)
dma_free_coherent(&ha->pdev->dev,
@@ -483,17 +480,14 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
(req->length + 1) * sizeof(request_t),
req->ring, req->dma);
- if (req) {
+ if (req)
kfree(req->outstanding_cmds);
- kfree(req);
- }
+
+ kfree(req);
}
static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
{
- if (!ha->rsp_q_map)
- return;
-
if (IS_QLAFX00(ha)) {
if (rsp && rsp->ring_fx00)
dma_free_coherent(&ha->pdev->dev,
@@ -504,8 +498,7 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
(rsp->length + 1) * sizeof(response_t),
rsp->ring, rsp->dma);
}
- if (rsp)
- kfree(rsp);
+ kfree(rsp);
}
static void qla2x00_free_queues(struct qla_hw_data *ha)
@@ -3106,7 +3099,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto probe_failed;
/* Alloc arrays of request and response ring ptrs */
- if (qla2x00_alloc_queues(ha, req, rsp)) {
+ ret = qla2x00_alloc_queues(ha, req, rsp);
+ if (ret) {
ql_log(ql_log_fatal, base_vha, 0x003d,
"Failed to allocate memory for queue pointers..."
"aborting.\n");
@@ -3407,8 +3401,15 @@ probe_failed:
}
qla2x00_free_device(base_vha);
-
scsi_host_put(base_vha->host);
+ /*
+ * Need to NULL out local req/rsp after
+ * qla2x00_free_device => qla2x00_free_queues frees
+ * what these are pointing to. Or else we'll
+ * fall over below in qla2x00_free_req/rsp_que.
+ */
+ req = NULL;
+ rsp = NULL;
probe_hw_failed:
qla2x00_mem_free(ha);
@@ -4114,6 +4115,7 @@ fail_npiv_info:
(*rsp)->dma = 0;
fail_rsp_ring:
kfree(*rsp);
+ *rsp = NULL;
fail_rsp:
dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
sizeof(request_t), (*req)->ring, (*req)->dma);
@@ -4121,6 +4123,7 @@ fail_rsp:
(*req)->dma = 0;
fail_req_ring:
kfree(*req);
+ *req = NULL;
fail_req:
dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
ha->ct_sns, ha->ct_sns_dma);
@@ -4508,16 +4511,11 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
ha->init_cb, ha->init_cb_dma);
- if (ha->optrom_buffer)
- vfree(ha->optrom_buffer);
- if (ha->nvram)
- kfree(ha->nvram);
- if (ha->npiv_info)
- kfree(ha->npiv_info);
- if (ha->swl)
- kfree(ha->swl);
- if (ha->loop_id_map)
- kfree(ha->loop_id_map);
+ vfree(ha->optrom_buffer);
+ kfree(ha->nvram);
+ kfree(ha->npiv_info);
+ kfree(ha->swl);
+ kfree(ha->loop_id_map);
ha->srb_mempool = NULL;
ha->ctx_mempool = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 5546ac9c3d9d..025dc2d3f3de 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -664,10 +664,10 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
sp->type = type;
sp->name = "nack";
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
sp->u.iocb_cmd.u.nack.ntfy = ntfy;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_nack_sp_done;
rval = qla2x00_start_sp(sp);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index e5370d718058..dd107dc4db0e 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -202,7 +202,7 @@ static struct {
{"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN},
{"MegaRAID", "LD", NULL, BLIST_FORCELUN},
{"MICROP", "4110", NULL, BLIST_NOTQ},
- {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC},
+ {"MSFT", "Virtual HD", NULL, BLIST_MAX_1024 | BLIST_NO_RSOC},
{"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
{"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
{"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index b88b5dbbc444..188f30572aa1 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -112,6 +112,9 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name)
{
struct scsi_device_handler *dh;
+ if (!name || strlen(name) == 0)
+ return NULL;
+
dh = __scsi_dh_lookup(name);
if (!dh) {
request_module("scsi_dh_%s", name);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0dfec0dedd5e..e9b4f279d29c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -723,18 +723,25 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
}
/**
- * __scsi_error_from_host_byte - translate SCSI error code into errno
- * @cmd: SCSI command (unused)
+ * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
+ * @cmd: SCSI command
* @result: scsi error code
*
- * Translate SCSI error code into block errors.
+ * Translate a SCSI result code into a blk_status_t value. May reset the host
+ * byte of @cmd->result.
*/
-static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd,
- int result)
+static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
{
switch (host_byte(result)) {
case DID_OK:
- return BLK_STS_OK;
+ /*
+ * Also check the other bytes than the status byte in result
+ * to handle the case when a SCSI LLD sets result to
+ * DRIVER_SENSE << 24 without setting SAM_STAT_CHECK_CONDITION.
+ */
+ if (scsi_status_is_good(result) && (result & ~0xff) == 0)
+ return BLK_STS_OK;
+ return BLK_STS_IOERR;
case DID_TRANSPORT_FAILFAST:
return BLK_STS_TRANSPORT;
case DID_TARGET_FAILURE:
@@ -812,10 +819,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
SCSI_SENSE_BUFFERSIZE);
}
if (!sense_deferred)
- error = __scsi_error_from_host_byte(cmd, result);
+ error = scsi_result_to_blk_status(cmd, result);
}
/*
- * __scsi_error_from_host_byte may have reset the host_byte
+ * scsi_result_to_blk_status may have reset the host_byte
*/
scsi_req(req)->result = cmd->result;
scsi_req(req)->resid_len = scsi_get_resid(cmd);
@@ -837,7 +844,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* good_bytes != blk_rq_bytes(req) as the signal for an error.
* This sets the error explicitly for the problem case.
*/
- error = __scsi_error_from_host_byte(cmd, result);
+ error = scsi_result_to_blk_status(cmd, result);
}
/* no bidi support for !blk_rq_is_passthrough yet */
@@ -907,7 +914,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (result == 0)
goto requeue;
- error = __scsi_error_from_host_byte(cmd, result);
+ error = scsi_result_to_blk_status(cmd, result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0cf25d789d05..3f3cb72e0c0c 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -587,18 +587,28 @@ out:
static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
{
- struct scsi_cd *cd = scsi_cd(disk);
+ unsigned int ret = 0;
+ struct scsi_cd *cd;
- if (atomic_read(&cd->device->disk_events_disable_depth))
+ cd = scsi_cd_get(disk);
+ if (!cd)
return 0;
- return cdrom_check_events(&cd->cdi, clearing);
+ if (!atomic_read(&cd->device->disk_events_disable_depth))
+ ret = cdrom_check_events(&cd->cdi, clearing);
+
+ scsi_cd_put(cd);
+ return ret;
}
static int sr_block_revalidate_disk(struct gendisk *disk)
{
- struct scsi_cd *cd = scsi_cd(disk);
struct scsi_sense_hdr sshdr;
+ struct scsi_cd *cd;
+
+ cd = scsi_cd_get(disk);
+ if (!cd)
+ return -ENXIO;
/* if the unit is not ready, nothing more to do */
if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
@@ -607,6 +617,7 @@ static int sr_block_revalidate_disk(struct gendisk *disk)
sr_cd_check(&cd->cdi);
get_sectorsize(cd);
out:
+ scsi_cd_put(cd);
return 0;
}
diff --git a/drivers/soc/samsung/pm_domains.c b/drivers/soc/samsung/pm_domains.c
index b6a436594a19..caf45cf7aa8e 100644
--- a/drivers/soc/samsung/pm_domains.c
+++ b/drivers/soc/samsung/pm_domains.c
@@ -147,6 +147,12 @@ static __init const char *exynos_get_domain_name(struct device_node *node)
return kstrdup_const(name, GFP_KERNEL);
}
+static const char *soc_force_no_clk[] = {
+ "samsung,exynos5250-clock",
+ "samsung,exynos5420-clock",
+ "samsung,exynos5800-clock",
+};
+
static __init int exynos4_pm_init_power_domain(void)
{
struct device_node *np;
@@ -183,6 +189,11 @@ static __init int exynos4_pm_init_power_domain(void)
pd->pd.power_on = exynos_pd_power_on;
pd->local_pwr_cfg = pm_domain_cfg->local_pwr_cfg;
+ for (i = 0; i < ARRAY_SIZE(soc_force_no_clk); i++)
+ if (of_find_compatible_node(NULL, NULL,
+ soc_force_no_clk[i]))
+ goto no_clk;
+
for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
char clk_name[8];
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index b6adc54b96f1..82979880f985 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -15,6 +15,13 @@ menuconfig THERMAL
if THERMAL
+config THERMAL_STATISTICS
+ bool "Thermal state transition statistics"
+ help
+ Export thermal state transition statistics information through sysfs.
+
+ If in doubt, say N.
+
config THERMAL_EMERGENCY_POWEROFF_DELAY_MS
int "Emergency poweroff delay in milli-seconds"
depends on THERMAL
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index a67781b7a0b2..ee3a215b333a 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -637,6 +637,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+ data->irq_enabled = true;
+ data->mode = THERMAL_DEVICE_ENABLED;
+
ret = devm_request_threaded_irq(&pdev->dev, data->irq,
imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
0, "imx_thermal", data);
@@ -649,9 +652,6 @@ static int imx_thermal_probe(struct platform_device *pdev)
return ret;
}
- data->irq_enabled = true;
- data->mode = THERMAL_DEVICE_ENABLED;
-
return 0;
}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 2b1b0ba393a4..d64325e078db 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -972,8 +972,8 @@ __thermal_cooling_device_register(struct device_node *np,
cdev->ops = ops;
cdev->updated = false;
cdev->device.class = &thermal_class;
- thermal_cooling_device_setup_sysfs(cdev);
cdev->devdata = devdata;
+ thermal_cooling_device_setup_sysfs(cdev);
dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
result = device_register(&cdev->device);
if (result) {
@@ -1106,6 +1106,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
ida_simple_remove(&thermal_cdev_ida, cdev->id);
device_unregister(&cdev->device);
+ thermal_cooling_device_destroy_sysfs(cdev);
}
EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 27e3b1df7360..5e4150261500 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -73,6 +73,7 @@ int thermal_build_list_of_policies(char *buf);
int thermal_zone_create_device_groups(struct thermal_zone_device *, int);
void thermal_zone_destroy_device_groups(struct thermal_zone_device *);
void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *);
+void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev);
/* used only at binding time */
ssize_t
thermal_cooling_device_trip_point_show(struct device *,
@@ -84,6 +85,15 @@ ssize_t thermal_cooling_device_weight_store(struct device *,
struct device_attribute *,
const char *, size_t);
+#ifdef CONFIG_THERMAL_STATISTICS
+void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
+ unsigned long new_state);
+#else
+static inline void
+thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
+ unsigned long new_state) {}
+#endif /* CONFIG_THERMAL_STATISTICS */
+
#ifdef CONFIG_THERMAL_GOV_STEP_WISE
int thermal_gov_step_wise_register(void);
void thermal_gov_step_wise_unregister(void);
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index 8cdf75adcce1..eb03d7e099bb 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -187,7 +187,10 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
if (instance->target > target)
target = instance->target;
}
- cdev->ops->set_cur_state(cdev, target);
+
+ if (!cdev->ops->set_cur_state(cdev, target))
+ thermal_cooling_device_stats_update(cdev, target);
+
cdev->updated = true;
mutex_unlock(&cdev->lock);
trace_cdev_update(cdev, target);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index ba81c9080f6e..23b5e0a709b0 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/jiffies.h>
#include "thermal_core.h"
@@ -721,6 +722,7 @@ thermal_cooling_device_cur_state_store(struct device *dev,
result = cdev->ops->set_cur_state(cdev, state);
if (result)
return result;
+ thermal_cooling_device_stats_update(cdev, state);
return count;
}
@@ -745,14 +747,237 @@ static const struct attribute_group cooling_device_attr_group = {
static const struct attribute_group *cooling_device_attr_groups[] = {
&cooling_device_attr_group,
+ NULL, /* Space allocated for cooling_device_stats_attr_group */
NULL,
};
+#ifdef CONFIG_THERMAL_STATISTICS
+struct cooling_dev_stats {
+ spinlock_t lock;
+ unsigned int total_trans;
+ unsigned long state;
+ unsigned long max_states;
+ ktime_t last_time;
+ ktime_t *time_in_state;
+ unsigned int *trans_table;
+};
+
+static void update_time_in_state(struct cooling_dev_stats *stats)
+{
+ ktime_t now = ktime_get(), delta;
+
+ delta = ktime_sub(now, stats->last_time);
+ stats->time_in_state[stats->state] =
+ ktime_add(stats->time_in_state[stats->state], delta);
+ stats->last_time = now;
+}
+
+void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
+ unsigned long new_state)
+{
+ struct cooling_dev_stats *stats = cdev->stats;
+
+ spin_lock(&stats->lock);
+
+ if (stats->state == new_state)
+ goto unlock;
+
+ update_time_in_state(stats);
+ stats->trans_table[stats->state * stats->max_states + new_state]++;
+ stats->state = new_state;
+ stats->total_trans++;
+
+unlock:
+ spin_unlock(&stats->lock);
+}
+
+static ssize_t
+thermal_cooling_device_total_trans_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ struct cooling_dev_stats *stats = cdev->stats;
+ int ret;
+
+ spin_lock(&stats->lock);
+ ret = sprintf(buf, "%u\n", stats->total_trans);
+ spin_unlock(&stats->lock);
+
+ return ret;
+}
+
+static ssize_t
+thermal_cooling_device_time_in_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ struct cooling_dev_stats *stats = cdev->stats;
+ ssize_t len = 0;
+ int i;
+
+ spin_lock(&stats->lock);
+ update_time_in_state(stats);
+
+ for (i = 0; i < stats->max_states; i++) {
+ len += sprintf(buf + len, "state%u\t%llu\n", i,
+ ktime_to_ms(stats->time_in_state[i]));
+ }
+ spin_unlock(&stats->lock);
+
+ return len;
+}
+
+static ssize_t
+thermal_cooling_device_reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ struct cooling_dev_stats *stats = cdev->stats;
+ int i, states = stats->max_states;
+
+ spin_lock(&stats->lock);
+
+ stats->total_trans = 0;
+ stats->last_time = ktime_get();
+ memset(stats->trans_table, 0,
+ states * states * sizeof(*stats->trans_table));
+
+ for (i = 0; i < stats->max_states; i++)
+ stats->time_in_state[i] = ktime_set(0, 0);
+
+ spin_unlock(&stats->lock);
+
+ return count;
+}
+
+static ssize_t
+thermal_cooling_device_trans_table_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ struct cooling_dev_stats *stats = cdev->stats;
+ ssize_t len = 0;
+ int i, j;
+
+ len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
+ len += snprintf(buf + len, PAGE_SIZE - len, " : ");
+ for (i = 0; i < stats->max_states; i++) {
+ if (len >= PAGE_SIZE)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len, "state%2u ", i);
+ }
+ if (len >= PAGE_SIZE)
+ return PAGE_SIZE;
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+
+ for (i = 0; i < stats->max_states; i++) {
+ if (len >= PAGE_SIZE)
+ break;
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "state%2u:", i);
+
+ for (j = 0; j < stats->max_states; j++) {
+ if (len >= PAGE_SIZE)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len, "%8u ",
+ stats->trans_table[i * stats->max_states + j]);
+ }
+ if (len >= PAGE_SIZE)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ }
+
+ if (len >= PAGE_SIZE) {
+ pr_warn_once("Thermal transition table exceeds PAGE_SIZE. Disabling\n");
+ return -EFBIG;
+ }
+ return len;
+}
+
+static DEVICE_ATTR(total_trans, 0444, thermal_cooling_device_total_trans_show,
+ NULL);
+static DEVICE_ATTR(time_in_state_ms, 0444,
+ thermal_cooling_device_time_in_state_show, NULL);
+static DEVICE_ATTR(reset, 0200, NULL, thermal_cooling_device_reset_store);
+static DEVICE_ATTR(trans_table, 0444,
+ thermal_cooling_device_trans_table_show, NULL);
+
+static struct attribute *cooling_device_stats_attrs[] = {
+ &dev_attr_total_trans.attr,
+ &dev_attr_time_in_state_ms.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_trans_table.attr,
+ NULL
+};
+
+static const struct attribute_group cooling_device_stats_attr_group = {
+ .attrs = cooling_device_stats_attrs,
+ .name = "stats"
+};
+
+static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
+{
+ struct cooling_dev_stats *stats;
+ unsigned long states;
+ int var;
+
+ if (cdev->ops->get_max_state(cdev, &states))
+ return;
+
+ states++; /* Total number of states is highest state + 1 */
+
+ var = sizeof(*stats);
+ var += sizeof(*stats->time_in_state) * states;
+ var += sizeof(*stats->trans_table) * states * states;
+
+ stats = kzalloc(var, GFP_KERNEL);
+ if (!stats)
+ return;
+
+ stats->time_in_state = (ktime_t *)(stats + 1);
+ stats->trans_table = (unsigned int *)(stats->time_in_state + states);
+ cdev->stats = stats;
+ stats->last_time = ktime_get();
+ stats->max_states = states;
+
+ spin_lock_init(&stats->lock);
+
+ /* Fill the empty slot left in cooling_device_attr_groups */
+ var = ARRAY_SIZE(cooling_device_attr_groups) - 2;
+ cooling_device_attr_groups[var] = &cooling_device_stats_attr_group;
+}
+
+static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
+{
+ kfree(cdev->stats);
+ cdev->stats = NULL;
+}
+
+#else
+
+static inline void
+cooling_device_stats_setup(struct thermal_cooling_device *cdev) {}
+static inline void
+cooling_device_stats_destroy(struct thermal_cooling_device *cdev) {}
+
+#endif /* CONFIG_THERMAL_STATISTICS */
+
void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *cdev)
{
+ cooling_device_stats_setup(cdev);
cdev->device.groups = cooling_device_attr_groups;
}
+void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev)
+{
+ cooling_device_stats_destroy(cdev);
+}
+
/* these helper will be used only at the time of bindig */
ssize_t
thermal_cooling_device_trip_point_show(struct device *dev,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index bec722e41f58..f3bd8e941224 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -641,14 +641,14 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
-static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
+static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
{
u64 a = addr / VHOST_PAGE_SIZE / 8;
/* Make sure 64 bit math will not overflow. */
if (a > ULONG_MAX - (unsigned long)log_base ||
a + (unsigned long)log_base > ULONG_MAX)
- return 0;
+ return false;
return access_ok(VERIFY_WRITE, log_base + a,
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
@@ -661,30 +661,30 @@ static bool vhost_overflow(u64 uaddr, u64 size)
}
/* Caller should have vq mutex and device mutex. */
-static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
- int log_all)
+static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
+ int log_all)
{
struct vhost_umem_node *node;
if (!umem)
- return 0;
+ return false;
list_for_each_entry(node, &umem->umem_list, link) {
unsigned long a = node->userspace_addr;
if (vhost_overflow(node->userspace_addr, node->size))
- return 0;
+ return false;
if (!access_ok(VERIFY_WRITE, (void __user *)a,
node->size))
- return 0;
+ return false;
else if (log_all && !log_access_ok(log_base,
node->start,
node->size))
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
@@ -701,13 +701,13 @@ static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
/* Can we switch to this memory table? */
/* Caller should have device mutex but not vq mutex */
-static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
- int log_all)
+static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
+ int log_all)
{
int i;
for (i = 0; i < d->nvqs; ++i) {
- int ok;
+ bool ok;
bool log;
mutex_lock(&d->vqs[i]->mutex);
@@ -717,12 +717,12 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
ok = vq_memory_access_ok(d->vqs[i]->log_base,
umem, log);
else
- ok = 1;
+ ok = true;
mutex_unlock(&d->vqs[i]->mutex);
if (!ok)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
@@ -744,7 +744,7 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
struct iov_iter t;
void __user *uaddr = vhost_vq_meta_fetch(vq,
(u64)(uintptr_t)to, size,
- VHOST_ADDR_DESC);
+ VHOST_ADDR_USED);
if (uaddr)
return __copy_to_user(uaddr, from, size);
@@ -959,21 +959,21 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
spin_unlock(&d->iotlb_lock);
}
-static int umem_access_ok(u64 uaddr, u64 size, int access)
+static bool umem_access_ok(u64 uaddr, u64 size, int access)
{
unsigned long a = uaddr;
/* Make sure 64 bit math will not overflow. */
if (vhost_overflow(uaddr, size))
- return -EFAULT;
+ return false;
if ((access & VHOST_ACCESS_RO) &&
!access_ok(VERIFY_READ, (void __user *)a, size))
- return -EFAULT;
+ return false;
if ((access & VHOST_ACCESS_WO) &&
!access_ok(VERIFY_WRITE, (void __user *)a, size))
- return -EFAULT;
- return 0;
+ return false;
+ return true;
}
static int vhost_process_iotlb_msg(struct vhost_dev *dev,
@@ -988,7 +988,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
ret = -EFAULT;
break;
}
- if (umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
+ if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
ret = -EFAULT;
break;
}
@@ -1135,10 +1135,10 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
return 0;
}
-static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
- struct vring_desc __user *desc,
- struct vring_avail __user *avail,
- struct vring_used __user *used)
+static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
+ struct vring_desc __user *desc,
+ struct vring_avail __user *avail,
+ struct vring_used __user *used)
{
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
@@ -1161,8 +1161,8 @@ static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
vq->meta_iotlb[type] = node;
}
-static int iotlb_access_ok(struct vhost_virtqueue *vq,
- int access, u64 addr, u64 len, int type)
+static bool iotlb_access_ok(struct vhost_virtqueue *vq,
+ int access, u64 addr, u64 len, int type)
{
const struct vhost_umem_node *node;
struct vhost_umem *umem = vq->iotlb;
@@ -1220,7 +1220,7 @@ EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
/* Can we log writes? */
/* Caller should have device mutex but not vq mutex */
-int vhost_log_access_ok(struct vhost_dev *dev)
+bool vhost_log_access_ok(struct vhost_dev *dev)
{
return memory_access_ok(dev, dev->umem, 1);
}
@@ -1228,8 +1228,8 @@ EXPORT_SYMBOL_GPL(vhost_log_access_ok);
/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
-static int vq_log_access_ok(struct vhost_virtqueue *vq,
- void __user *log_base)
+static bool vq_log_access_ok(struct vhost_virtqueue *vq,
+ void __user *log_base)
{
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
@@ -1242,12 +1242,14 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
/* Can we start vq? */
/* Caller should have vq mutex and device mutex */
-int vhost_vq_access_ok(struct vhost_virtqueue *vq)
+bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
- int ret = vq_log_access_ok(vq, vq->log_base);
+ if (!vq_log_access_ok(vq, vq->log_base))
+ return false;
- if (ret || vq->iotlb)
- return ret;
+ /* Access validation occurs at prefetch time with IOTLB */
+ if (vq->iotlb)
+ return true;
return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index d8ee85ae8fdc..6c844b90a168 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -178,8 +178,8 @@ void vhost_dev_cleanup(struct vhost_dev *);
void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
-int vhost_vq_access_ok(struct vhost_virtqueue *vq);
-int vhost_log_access_ok(struct vhost_dev *);
+bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
+bool vhost_log_access_ok(struct vhost_dev *);
int vhost_get_vq_desc(struct vhost_virtqueue *,
struct iovec iov[], unsigned int iov_count,
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index dfe5684000be..6b237e3f4983 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -272,6 +272,12 @@ static unsigned int update_balloon_stats(struct virtio_balloon *vb)
pages_to_bytes(events[PSWPOUT]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+#ifdef CONFIG_HUGETLB_PAGE
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC,
+ events[HTLB_BUDDY_PGALLOC]);
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGFAIL,
+ events[HTLB_BUDDY_PGALLOC_FAIL]);
+#endif
#endif
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
pages_to_bytes(i.freeram));
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 66a7f5a2f474..9af07fd92763 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -514,6 +514,17 @@ config COH901327_WATCHDOG
This watchdog is used to reset the system and thus cannot be
compiled as a module.
+config NPCM7XX_WATCHDOG
+ bool "Nuvoton NPCM750 watchdog"
+ depends on ARCH_NPCM || COMPILE_TEST
+ default y if ARCH_NPCM750
+ select WATCHDOG_CORE
+ help
+ Say Y here to include Watchdog timer support for the
+ watchdog embedded into the NPCM7xx.
+ This watchdog is used to reset the system and thus cannot be
+ compiled as a module.
+
config TWL4030_WATCHDOG
tristate "TWL4030 Watchdog"
depends on TWL4030_CORE
@@ -1102,6 +1113,7 @@ config IT87_WDT
config HP_WATCHDOG
tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
+ select WATCHDOG_CORE
depends on X86 && PCI
help
A software monitoring watchdog and NMI sourcing driver. This driver
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index e4dd91f5585a..1d3c6b094fe5 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o
obj-$(CONFIG_RN5T618_WATCHDOG) += rn5t618_wdt.o
obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
+obj-$(CONFIG_NPCM7XX_WATCHDOG) += npcm_wdt.o
obj-$(CONFIG_STMP3XXX_RTC_WATCHDOG) += stmp3xxx_rtc_wdt.o
obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
obj-$(CONFIG_TS4800_WATCHDOG) += ts4800_wdt.o
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 6d5ae251e309..ee1ab12ab04f 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/watchdog/ar7_wdt.c
*
@@ -8,19 +9,6 @@
* National Semiconductor SCx200 Watchdog support
* Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c
index 7dd0da644a7f..2cf56b459d84 100644
--- a/drivers/watchdog/asm9260_wdt.c
+++ b/drivers/watchdog/asm9260_wdt.c
@@ -292,14 +292,14 @@ static int asm9260_wdt_probe(struct platform_device *pdev)
if (IS_ERR(priv->iobase))
return PTR_ERR(priv->iobase);
- ret = asm9260_wdt_get_dt_clks(priv);
- if (ret)
- return ret;
-
priv->rst = devm_reset_control_get_exclusive(&pdev->dev, "wdt_rst");
if (IS_ERR(priv->rst))
return PTR_ERR(priv->rst);
+ ret = asm9260_wdt_get_dt_clks(priv);
+ if (ret)
+ return ret;
+
wdd = &priv->wdd;
wdd->info = &asm9260_wdt_ident;
wdd->ops = &asm9260_wdt_ops;
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index ca5b91e2eb92..a5b8eb21201f 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -46,6 +46,7 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
#define WDT_RELOAD_VALUE 0x04
#define WDT_RESTART 0x08
#define WDT_CTRL 0x0C
+#define WDT_CTRL_BOOT_SECONDARY BIT(7)
#define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
#define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
#define WDT_CTRL_RESET_MODE_ARM_CPU (0x10 << 5)
@@ -158,6 +159,7 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd,
{
struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+ wdt->ctrl &= ~WDT_CTRL_BOOT_SECONDARY;
aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000);
mdelay(1000);
@@ -232,16 +234,21 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | WDT_CTRL_RESET_SYSTEM;
} else {
if (!strcmp(reset_type, "cpu"))
- wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU;
+ wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU |
+ WDT_CTRL_RESET_SYSTEM;
else if (!strcmp(reset_type, "soc"))
- wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC;
+ wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC |
+ WDT_CTRL_RESET_SYSTEM;
else if (!strcmp(reset_type, "system"))
- wdt->ctrl |= WDT_CTRL_RESET_SYSTEM;
+ wdt->ctrl |= WDT_CTRL_RESET_MODE_FULL_CHIP |
+ WDT_CTRL_RESET_SYSTEM;
else if (strcmp(reset_type, "none"))
return -EINVAL;
}
if (of_property_read_bool(np, "aspeed,external-signal"))
wdt->ctrl |= WDT_CTRL_WDT_EXT;
+ if (of_property_read_bool(np, "aspeed,alt-boot"))
+ wdt->ctrl |= WDT_CTRL_BOOT_SECONDARY;
if (readl(wdt->base + WDT_CTRL) & WDT_CTRL_ENABLE) {
/*
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index e12a797cb820..b45fc0aee667 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for Atmel AT91RM9200 (Thunder)
*
* Copyright (C) 2003 SAN People (Pty) Ltd
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 88c05d0448b2..f4050a229eb5 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for Atmel AT91SAM9x processors.
*
* Copyright (C) 2008 Renaud CERRATO r.cerrato@til-technologies.fr
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
/*
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
index b79a83b467ce..390941c65eee 100644
--- a/drivers/watchdog/at91sam9_wdt.h
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* drivers/watchdog/at91sam9_wdt.h
*
@@ -7,10 +8,6 @@
* Watchdog Timer (WDT) - System peripherals regsters.
* Based on AT91SAM9261 datasheet revision D.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef AT91_WDT_H
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index b339e0e67b4c..ed05514cc2dc 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for Broadcom BCM2835
*
@@ -7,10 +8,6 @@
*
* Copyright (C) 2013 Lubomir Rintel <lkundrak@v3.sk>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/delay.h>
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index f41b756d6dd5..05425c1dfd4c 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for Broadcom BCM47XX
*
@@ -5,10 +6,6 @@
* Copyright (C) 2009 Matthieu CASTET <castet.matthieu@free.fr>
* Copyright (C) 2012-2013 Hauke Mehrtens <hauke@hauke-m.de>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 8555afc70f9b..d3c1113e774c 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Broadcom BCM63xx SoC watchdog driver
*
* Copyright (C) 2007, Miguel Gaio <miguel.gaio@efixo.com>
* Copyright (C) 2008, Florian Fainelli <florian@openwrt.org>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index f88f546e8050..ce3f646e8077 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Broadcom Corporation
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -235,6 +227,6 @@ module_platform_driver(bcm7038_wdt_driver);
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for Broadcom 7038 SoCs Watchdog");
MODULE_AUTHOR("Justin Chen");
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index a5775dfd8d5f..1462be9e6fc5 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -1,14 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Broadcom Corporation
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/debugfs.h>
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 064cf7b6c1c5..3ec1f418837d 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cadence WDT driver - Used by Xilinx Zynq
*
* Copyright (C) 2010 - 2014 Xilinx, Inc.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/clk.h>
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index 4410337f4f7f..e3a78f927f83 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -1,8 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* coh901327_wdt.c
*
* Copyright (C) 2008-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
* Watchdog driver for the ST-Ericsson AB COH 901 327 IP core
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
@@ -67,7 +67,9 @@
#define U300_WDOG_IFR_WILL_BARK_IRQ_FORCE_ENABLE 0x0001U
/* Default timeout in seconds = 1 minute */
-static unsigned int margin = 60;
+#define U300_WDOG_DEFAULT_TIMEOUT 60
+
+static unsigned int margin;
static int irq;
static void __iomem *virtbase;
static struct device *parent;
@@ -235,8 +237,9 @@ static struct watchdog_device coh901327_wdt = {
* timeout register is max
* 0x7FFF = 327670ms ~= 327s.
*/
- .min_timeout = 0,
+ .min_timeout = 1,
.max_timeout = 327,
+ .timeout = U300_WDOG_DEFAULT_TIMEOUT,
};
static int __exit coh901327_remove(struct platform_device *pdev)
@@ -315,16 +318,15 @@ static int __init coh901327_probe(struct platform_device *pdev)
goto out_no_irq;
}
- ret = watchdog_init_timeout(&coh901327_wdt, margin, dev);
- if (ret < 0)
- coh901327_wdt.timeout = 60;
+ watchdog_init_timeout(&coh901327_wdt, margin, dev);
coh901327_wdt.parent = dev;
ret = watchdog_register_device(&coh901327_wdt);
if (ret)
goto out_no_wdog;
- dev_info(dev, "initialized. timer margin=%d sec\n", margin);
+ dev_info(dev, "initialized. (timeout=%d sec)\n",
+ coh901327_wdt.timeout);
return 0;
out_no_wdog:
@@ -419,5 +421,5 @@ MODULE_DESCRIPTION("COH 901 327 Watchdog");
module_param(margin, uint, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:coh901327-watchdog");
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index d6d5006efa71..e263bad99574 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* System monitoring driver for DA9052 PMICs.
*
@@ -5,11 +6,6 @@
*
* Author: Anthony Olech <Anthony.Olech@diasemi.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/module.h>
diff --git a/drivers/watchdog/da9055_wdt.c b/drivers/watchdog/da9055_wdt.c
index 50bdd1022186..26a5b2984094 100644
--- a/drivers/watchdog/da9055_wdt.c
+++ b/drivers/watchdog/da9055_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* System monitoring driver for DA9055 PMICs.
*
@@ -5,11 +6,6 @@
*
* Author: David Dajun Chen <dchen@diasemi.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/module.h>
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index 814dff6045a4..a001782bbfdb 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog device driver for DA9062 and DA9061 PMICs
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index 2a20fc163ed0..b17ac1bb1f28 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for DA9063 PMICs.
*
@@ -5,10 +6,6 @@
*
* Author: Mariusz Wojtasik <mariusz.wojtasik@diasemi.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 3e4c592c239f..6c6594261cb7 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -236,15 +236,22 @@ static int davinci_wdt_probe(struct platform_device *pdev)
wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem);
- if (IS_ERR(davinci_wdt->base))
- return PTR_ERR(davinci_wdt->base);
+ if (IS_ERR(davinci_wdt->base)) {
+ ret = PTR_ERR(davinci_wdt->base);
+ goto err_clk_disable;
+ }
ret = watchdog_register_device(wdd);
- if (ret < 0) {
- clk_disable_unprepare(davinci_wdt->clk);
+ if (ret) {
dev_err(dev, "cannot register watchdog device\n");
+ goto err_clk_disable;
}
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(davinci_wdt->clk);
+
return ret;
}
diff --git a/drivers/watchdog/digicolor_wdt.c b/drivers/watchdog/digicolor_wdt.c
index 5e4ef93caa02..a9e11df155b8 100644
--- a/drivers/watchdog/digicolor_wdt.c
+++ b/drivers/watchdog/digicolor_wdt.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for Conexant Digicolor
*
* Copyright (C) 2015 Paradox Innovation Ltd.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/types.h>
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index c2f4ff516230..501aebb5b81f 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -34,6 +34,7 @@
#define WDOG_CONTROL_REG_OFFSET 0x00
#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
+#define WDOG_CONTROL_REG_RESP_MODE_MASK 0x02
#define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04
#define WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT 4
#define WDOG_CURRENT_COUNT_REG_OFFSET 0x08
@@ -56,6 +57,9 @@ struct dw_wdt {
unsigned long rate;
struct watchdog_device wdd;
struct reset_control *rst;
+ /* Save/restore */
+ u32 control;
+ u32 timeout;
};
#define to_dw_wdt(wdd) container_of(wdd, struct dw_wdt, wdd)
@@ -121,14 +125,23 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
return 0;
}
+static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt)
+{
+ u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+
+ /* Disable interrupt mode; always perform system reset. */
+ val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
+ /* Enable watchdog. */
+ val |= WDOG_CONTROL_REG_WDT_EN_MASK;
+ writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+}
+
static int dw_wdt_start(struct watchdog_device *wdd)
{
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
dw_wdt_set_timeout(wdd, wdd->timeout);
-
- writel(WDOG_CONTROL_REG_WDT_EN_MASK,
- dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+ dw_wdt_arm_system_reset(dw_wdt);
return 0;
}
@@ -152,16 +165,13 @@ static int dw_wdt_restart(struct watchdog_device *wdd,
unsigned long action, void *data)
{
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
- u32 val;
writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
- val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
- if (val & WDOG_CONTROL_REG_WDT_EN_MASK)
+ if (dw_wdt_is_enabled(dw_wdt))
writel(WDOG_COUNTER_RESTART_KICK_VALUE,
dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET);
else
- writel(WDOG_CONTROL_REG_WDT_EN_MASK,
- dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+ dw_wdt_arm_system_reset(dw_wdt);
/* wait for reset to assert... */
mdelay(500);
@@ -198,6 +208,9 @@ static int dw_wdt_suspend(struct device *dev)
{
struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
+ dw_wdt->control = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+ dw_wdt->timeout = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+
clk_disable_unprepare(dw_wdt->clk);
return 0;
@@ -211,6 +224,9 @@ static int dw_wdt_resume(struct device *dev)
if (err)
return err;
+ writel(dw_wdt->timeout, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+ writel(dw_wdt->control, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+
dw_wdt_ping(&dw_wdt->wdd);
return 0;
diff --git a/drivers/watchdog/ebc-c384_wdt.c b/drivers/watchdog/ebc-c384_wdt.c
index 2170b275ea01..4c4c8ce78021 100644
--- a/drivers/watchdog/ebc-c384_wdt.c
+++ b/drivers/watchdog/ebc-c384_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Watchdog timer driver for the WinSystems EBC-C384
* Copyright (C) 2016 William Breathitt Gray
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 3a33c5344bd5..9a1c761258ce 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -496,7 +496,7 @@ static bool watchdog_is_running(void)
is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
&& (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
- & F71808FG_FLAG_WD_EN);
+ & BIT(F71808FG_FLAG_WD_EN));
superio_exit(watchdog.sioaddr);
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index 3ade28190341..ea77cae03c9d 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -152,9 +152,9 @@ static int gpio_wdt_probe(struct platform_device *pdev)
priv->wdd.min_timeout = SOFT_TIMEOUT_MIN;
priv->wdd.max_hw_heartbeat_ms = hw_margin;
priv->wdd.parent = dev;
+ priv->wdd.timeout = SOFT_TIMEOUT_DEF;
- if (watchdog_init_timeout(&priv->wdd, 0, dev) < 0)
- priv->wdd.timeout = SOFT_TIMEOUT_DEF;
+ watchdog_init_timeout(&priv->wdd, 0, &pdev->dev);
watchdog_stop_on_reboot(&priv->wdd);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index b0a158073abd..a43ab2cecca2 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -4,7 +4,7 @@
*
* SoftDog 0.05: A Software Watchdog Device
*
- * (c) Copyright 2015 Hewlett Packard Enterprise Development LP
+ * (c) Copyright 2018 Hewlett Packard Enterprise Development LP
* Thomas Mingarelli <thomas.mingarelli@hpe.com>
*
* This program is free software; you can redistribute it and/or
@@ -16,34 +16,27 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
-#include <linux/fs.h>
#include <linux/io.h>
-#include <linux/bitops.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/watchdog.h>
#include <asm/nmi.h>
-#define HPWDT_VERSION "1.4.0"
+#define HPWDT_VERSION "2.0.0"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535)
#define DEFAULT_MARGIN 30
+#define PRETIMEOUT_SEC 9
+static bool ilo5;
static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
-static unsigned int reload; /* the computed soft_margin */
static bool nowayout = WATCHDOG_NOWAYOUT;
-#ifdef CONFIG_HPWDT_NMI_DECODING
-static unsigned int allow_kdump = 1;
-#endif
-static char expect_release;
-static unsigned long hpwdt_is_open;
+static bool pretimeout = IS_ENABLED(CONFIG_HPWDT_NMI_DECODING);
static void __iomem *pci_mem_addr; /* the PCI-memory address */
static unsigned long __iomem *hpwdt_nmistat;
@@ -61,48 +54,92 @@ MODULE_DEVICE_TABLE(pci, hpwdt_devices);
/*
* Watchdog operations
*/
-static void hpwdt_start(void)
+static int hpwdt_start(struct watchdog_device *wdd)
{
- reload = SECS_TO_TICKS(soft_margin);
+ int control = 0x81 | (pretimeout ? 0x4 : 0);
+ int reload = SECS_TO_TICKS(wdd->timeout);
+
+ dev_dbg(wdd->parent, "start watchdog 0x%08x:0x%02x\n", reload, control);
iowrite16(reload, hpwdt_timer_reg);
- iowrite8(0x85, hpwdt_timer_con);
+ iowrite8(control, hpwdt_timer_con);
+
+ return 0;
}
static void hpwdt_stop(void)
{
unsigned long data;
+ pr_debug("stop watchdog\n");
+
data = ioread8(hpwdt_timer_con);
data &= 0xFE;
iowrite8(data, hpwdt_timer_con);
}
-static void hpwdt_ping(void)
+static int hpwdt_stop_core(struct watchdog_device *wdd)
{
- iowrite16(reload, hpwdt_timer_reg);
+ hpwdt_stop();
+
+ return 0;
}
-static int hpwdt_change_timer(int new_margin)
+static int hpwdt_ping(struct watchdog_device *wdd)
{
- if (new_margin < 1 || new_margin > HPWDT_MAX_TIMER) {
- pr_warn("New value passed in is invalid: %d seconds\n",
- new_margin);
- return -EINVAL;
- }
+ int reload = SECS_TO_TICKS(wdd->timeout);
- soft_margin = new_margin;
- pr_debug("New timer passed in is %d seconds\n", new_margin);
- reload = SECS_TO_TICKS(soft_margin);
+ dev_dbg(wdd->parent, "ping watchdog 0x%08x\n", reload);
+ iowrite16(reload, hpwdt_timer_reg);
return 0;
}
-static int hpwdt_time_left(void)
+static unsigned int hpwdt_gettimeleft(struct watchdog_device *wdd)
{
return TICKS_TO_SECS(ioread16(hpwdt_timer_reg));
}
+static int hpwdt_settimeout(struct watchdog_device *wdd, unsigned int val)
+{
+ dev_dbg(wdd->parent, "set_timeout = %d\n", val);
+
+ wdd->timeout = val;
+ if (val <= wdd->pretimeout) {
+ dev_dbg(wdd->parent, "pretimeout < timeout. Setting to zero\n");
+ wdd->pretimeout = 0;
+ pretimeout = 0;
+ if (watchdog_active(wdd))
+ hpwdt_start(wdd);
+ }
+ hpwdt_ping(wdd);
+
+ return 0;
+}
+
#ifdef CONFIG_HPWDT_NMI_DECODING
+static int hpwdt_set_pretimeout(struct watchdog_device *wdd, unsigned int req)
+{
+ unsigned int val = 0;
+
+ dev_dbg(wdd->parent, "set_pretimeout = %d\n", req);
+ if (req) {
+ val = PRETIMEOUT_SEC;
+ if (val >= wdd->timeout)
+ return -EINVAL;
+ }
+
+ if (val != req)
+ dev_dbg(wdd->parent, "Rounding pretimeout to: %d\n", val);
+
+ wdd->pretimeout = val;
+ pretimeout = !!val;
+
+ if (watchdog_active(wdd))
+ hpwdt_start(wdd);
+
+ return 0;
+}
+
static int hpwdt_my_nmi(void)
{
return ioread8(hpwdt_nmistat) & 0x6;
@@ -113,178 +150,71 @@ static int hpwdt_my_nmi(void)
*/
static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
{
- if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
- return NMI_DONE;
-
- if (allow_kdump)
- hpwdt_stop();
-
- nmi_panic(regs, "An NMI occurred. Depending on your system the reason "
- "for the NMI is logged in any one of the following "
- "resources:\n"
+ unsigned int mynmi = hpwdt_my_nmi();
+ static char panic_msg[] =
+ "00: An NMI occurred. Depending on your system the reason "
+ "for the NMI is logged in any one of the following resources:\n"
"1. Integrated Management Log (IML)\n"
"2. OA Syslog\n"
"3. OA Forward Progress Log\n"
- "4. iLO Event Log");
-
- return NMI_HANDLED;
-}
-#endif /* CONFIG_HPWDT_NMI_DECODING */
-
-/*
- * /dev/watchdog handling
- */
-static int hpwdt_open(struct inode *inode, struct file *file)
-{
- /* /dev/watchdog can only be opened once */
- if (test_and_set_bit(0, &hpwdt_is_open))
- return -EBUSY;
+ "4. iLO Event Log";
- /* Start the watchdog */
- hpwdt_start();
- hpwdt_ping();
-
- return nonseekable_open(inode, file);
-}
+ if (ilo5 && ulReason == NMI_UNKNOWN && mynmi)
+ return NMI_DONE;
-static int hpwdt_release(struct inode *inode, struct file *file)
-{
- /* Stop the watchdog */
- if (expect_release == 42) {
- hpwdt_stop();
- } else {
- pr_crit("Unexpected close, not stopping watchdog!\n");
- hpwdt_ping();
- }
+ if (ilo5 && !pretimeout)
+ return NMI_DONE;
- expect_release = 0;
+ hpwdt_stop();
- /* /dev/watchdog is being closed, make sure it can be re-opened */
- clear_bit(0, &hpwdt_is_open);
+ hex_byte_pack(panic_msg, mynmi);
+ nmi_panic(regs, panic_msg);
- return 0;
+ return NMI_HANDLED;
}
+#endif /* CONFIG_HPWDT_NMI_DECODING */
-static ssize_t hpwdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- /* See if we got the magic character 'V' and reload the timer */
- if (len) {
- if (!nowayout) {
- size_t i;
-
- /* note: just in case someone wrote the magic character
- * five months ago... */
- expect_release = 0;
-
- /* scan to see whether or not we got the magic char. */
- for (i = 0; i != len; i++) {
- char c;
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- expect_release = 42;
- }
- }
-
- /* someone wrote to us, we should reload the timer */
- hpwdt_ping();
- }
-
- return len;
-}
static const struct watchdog_info ident = {
- .options = WDIOF_SETTIMEOUT |
+ .options = WDIOF_PRETIMEOUT |
+ WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
.identity = "HPE iLO2+ HW Watchdog Timer",
};
-static long hpwdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_margin, options;
- int ret = -ENOTTY;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = 0;
- if (copy_to_user(argp, &ident, sizeof(ident)))
- ret = -EFAULT;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, p);
- break;
-
- case WDIOC_KEEPALIVE:
- hpwdt_ping();
- ret = 0;
- break;
-
- case WDIOC_SETOPTIONS:
- ret = get_user(options, p);
- if (ret)
- break;
-
- if (options & WDIOS_DISABLECARD)
- hpwdt_stop();
-
- if (options & WDIOS_ENABLECARD) {
- hpwdt_start();
- hpwdt_ping();
- }
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(new_margin, p);
- if (ret)
- break;
-
- ret = hpwdt_change_timer(new_margin);
- if (ret)
- break;
-
- hpwdt_ping();
- /* Fall */
- case WDIOC_GETTIMEOUT:
- ret = put_user(soft_margin, p);
- break;
-
- case WDIOC_GETTIMELEFT:
- ret = put_user(hpwdt_time_left(), p);
- break;
- }
- return ret;
-}
-
/*
* Kernel interfaces
*/
-static const struct file_operations hpwdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = hpwdt_write,
- .unlocked_ioctl = hpwdt_ioctl,
- .open = hpwdt_open,
- .release = hpwdt_release,
+
+static const struct watchdog_ops hpwdt_ops = {
+ .owner = THIS_MODULE,
+ .start = hpwdt_start,
+ .stop = hpwdt_stop_core,
+ .ping = hpwdt_ping,
+ .set_timeout = hpwdt_settimeout,
+ .get_timeleft = hpwdt_gettimeleft,
+#ifdef CONFIG_HPWDT_NMI_DECODING
+ .set_pretimeout = hpwdt_set_pretimeout,
+#endif
};
-static struct miscdevice hpwdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &hpwdt_fops,
+static struct watchdog_device hpwdt_dev = {
+ .info = &ident,
+ .ops = &hpwdt_ops,
+ .min_timeout = 1,
+ .max_timeout = HPWDT_MAX_TIMER,
+ .timeout = DEFAULT_MARGIN,
+#ifdef CONFIG_HPWDT_NMI_DECODING
+ .pretimeout = PRETIMEOUT_SEC,
+#endif
};
+
/*
* Init & Exit
*/
-
static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
{
#ifdef CONFIG_HPWDT_NMI_DECODING
@@ -303,9 +233,8 @@ static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
goto error2;
dev_info(&dev->dev,
- "HPE Watchdog Timer Driver: NMI decoding initialized"
- ", allow kernel dump: %s (default = 1/ON)\n",
- (allow_kdump == 0) ? "OFF" : "ON");
+ "HPE Watchdog Timer Driver: NMI decoding initialized\n");
+
return 0;
error2:
@@ -375,29 +304,32 @@ static int hpwdt_init_one(struct pci_dev *dev,
/* Make sure that timer is disabled until /dev/watchdog is opened */
hpwdt_stop();
- /* Make sure that we have a valid soft_margin */
- if (hpwdt_change_timer(soft_margin))
- hpwdt_change_timer(DEFAULT_MARGIN);
-
/* Initialize NMI Decoding functionality */
retval = hpwdt_init_nmi_decoding(dev);
if (retval != 0)
goto error_init_nmi_decoding;
- retval = misc_register(&hpwdt_miscdev);
+ watchdog_set_nowayout(&hpwdt_dev, nowayout);
+ if (watchdog_init_timeout(&hpwdt_dev, soft_margin, NULL))
+ dev_warn(&dev->dev, "Invalid soft_margin: %d.\n", soft_margin);
+
+ hpwdt_dev.parent = &dev->dev;
+ retval = watchdog_register_device(&hpwdt_dev);
if (retval < 0) {
- dev_warn(&dev->dev,
- "Unable to register miscdev on minor=%d (err=%d).\n",
- WATCHDOG_MINOR, retval);
- goto error_misc_register;
+ dev_err(&dev->dev, "watchdog register failed: %d.\n", retval);
+ goto error_wd_register;
}
dev_info(&dev->dev, "HPE Watchdog Timer Driver: %s"
", timer margin: %d seconds (nowayout=%d).\n",
- HPWDT_VERSION, soft_margin, nowayout);
+ HPWDT_VERSION, hpwdt_dev.timeout, nowayout);
+
+ if (dev->subsystem_vendor == PCI_VENDOR_ID_HP_3PAR)
+ ilo5 = true;
+
return 0;
-error_misc_register:
+error_wd_register:
hpwdt_exit_nmi_decoding();
error_init_nmi_decoding:
pci_iounmap(dev, pci_mem_addr);
@@ -411,7 +343,7 @@ static void hpwdt_exit(struct pci_dev *dev)
if (!nowayout)
hpwdt_stop();
- misc_deregister(&hpwdt_miscdev);
+ watchdog_unregister_device(&hpwdt_dev);
hpwdt_exit_nmi_decoding();
pci_iounmap(dev, pci_mem_addr);
pci_disable_device(dev);
@@ -425,7 +357,7 @@ static struct pci_driver hpwdt_driver = {
};
MODULE_AUTHOR("Tom Mingarelli");
-MODULE_DESCRIPTION("hp watchdog driver");
+MODULE_DESCRIPTION("hpe watchdog driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(HPWDT_VERSION);
@@ -437,8 +369,8 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
#ifdef CONFIG_HPWDT_NMI_DECODING
-module_param(allow_kdump, int, 0);
-MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
-#endif /* CONFIG_HPWDT_NMI_DECODING */
+module_param(pretimeout, bool, 0);
+MODULE_PARM_DESC(pretimeout, "Watchdog pretimeout enabled");
+#endif
module_pci_driver(hpwdt_driver);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 518dfa1047cb..f07850d2c977 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -76,7 +76,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static unsigned timeout = IMX2_WDT_DEFAULT_TIME;
+static unsigned timeout;
module_param(timeout, uint, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
__MODULE_STRING(IMX2_WDT_DEFAULT_TIME) ")");
@@ -281,6 +281,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
wdog->info = &imx2_wdt_info;
wdog->ops = &imx2_wdt_ops;
wdog->min_timeout = 1;
+ wdog->timeout = IMX2_WDT_DEFAULT_TIME;
wdog->max_hw_heartbeat_ms = IMX2_WDT_MAX_TIME * 1000;
wdog->parent = &pdev->dev;
@@ -299,11 +300,6 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
wdev->ext_reset = of_property_read_bool(pdev->dev.of_node,
"fsl,ext-reset-output");
- wdog->timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME);
- if (wdog->timeout != timeout)
- dev_warn(&pdev->dev, "Initial timeout out of range! Clamped from %u to %u\n",
- timeout, wdog->timeout);
-
platform_set_drvdata(pdev, wdog);
watchdog_set_drvdata(wdog, wdev);
watchdog_set_nowayout(wdog, nowayout);
diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
index b4221f43cd94..331cadb459ac 100644
--- a/drivers/watchdog/lpc18xx_wdt.c
+++ b/drivers/watchdog/lpc18xx_wdt.c
@@ -265,7 +265,7 @@ static int lpc18xx_wdt_probe(struct platform_device *pdev)
lpc18xx_wdt->wdt_dev.parent = dev;
watchdog_set_drvdata(&lpc18xx_wdt->wdt_dev, lpc18xx_wdt);
- ret = watchdog_init_timeout(&lpc18xx_wdt->wdt_dev, heartbeat, dev);
+ watchdog_init_timeout(&lpc18xx_wdt->wdt_dev, heartbeat, dev);
__lpc18xx_wdt_set_timeout(lpc18xx_wdt);
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
index b8194b02abe0..8023cf28657a 100644
--- a/drivers/watchdog/mei_wdt.c
+++ b/drivers/watchdog/mei_wdt.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/module.h>
@@ -687,5 +679,5 @@ static struct mei_cl_driver mei_wdt_driver = {
module_mei_cl_driver(mei_wdt_driver);
MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Device driver for Intel MEI iAMT watchdog");
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c
index 045201a6fdb3..25d5d2b8cfbe 100644
--- a/drivers/watchdog/mena21_wdt.c
+++ b/drivers/watchdog/mena21_wdt.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for the A21 VME CPU Boards
*
* Copyright (C) 2013 MEN Mikro Elektronik Nuernberg GmbH
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index 69a5a57f1446..69adeab3fde7 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -1,56 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <narmstrong@baylibre.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * BSD LICENSE
- *
- * Copyright (c) 2016 BayLibre, SAS.
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/clk.h>
#include <linux/err.h>
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index 304274c67735..cd0275a6cdac 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -36,7 +36,7 @@
#define MESON_SEC_TO_TC(s, c) ((s) * (c))
static bool nowayout = WATCHDOG_NOWAYOUT;
-static unsigned int timeout = MESON_WDT_TIMEOUT;
+static unsigned int timeout;
struct meson_wdt_data {
unsigned int enable;
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 7ed417a765c7..4baf64f21aa1 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Mediatek Watchdog Driver
*
@@ -5,16 +6,6 @@
*
* Matthias Brugger <matthias.bgg@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Based on sunxi_wdt.c
*/
@@ -57,7 +48,7 @@
#define DRV_VERSION "1.0"
static bool nowayout = WATCHDOG_NOWAYOUT;
-static unsigned int timeout = WDT_MAX_TIMEOUT;
+static unsigned int timeout;
struct mtk_wdt_dev {
struct watchdog_device wdt_dev;
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index ca360d204548..1fa7d2b32494 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for the MTX-1 Watchdog.
*
@@ -6,16 +7,6 @@
* http://www.4g-systems.biz
*
* (C) Copyright 2007 OpenWrt.org, Florian Fainelli <florian@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * Neither Michael Stickel nor 4G Systems admit liability nor provide
- * warranty for any of this software. This material is provided
- * "AS-IS" and at no charge.
- *
* (c) Copyright 2005 4G Systems <info@4g-systems.biz>
*
* Release 0.01.
diff --git a/drivers/watchdog/npcm_wdt.c b/drivers/watchdog/npcm_wdt.c
new file mode 100644
index 000000000000..0d4213652ecc
--- /dev/null
+++ b/drivers/watchdog/npcm_wdt.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Nuvoton Technology corporation.
+// Copyright (c) 2018 IBM Corp.
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+#define NPCM_WTCR 0x1C
+
+#define NPCM_WTCLK (BIT(10) | BIT(11)) /* Clock divider */
+#define NPCM_WTE BIT(7) /* Enable */
+#define NPCM_WTIE BIT(6) /* Enable irq */
+#define NPCM_WTIS (BIT(4) | BIT(5)) /* Interval selection */
+#define NPCM_WTIF BIT(3) /* Interrupt flag*/
+#define NPCM_WTRF BIT(2) /* Reset flag */
+#define NPCM_WTRE BIT(1) /* Reset enable */
+#define NPCM_WTR BIT(0) /* Reset counter */
+
+/*
+ * Watchdog timeouts
+ *
+ * 170 msec: WTCLK=01 WTIS=00 VAL= 0x400
+ * 670 msec: WTCLK=01 WTIS=01 VAL= 0x410
+ * 1360 msec: WTCLK=10 WTIS=00 VAL= 0x800
+ * 2700 msec: WTCLK=01 WTIS=10 VAL= 0x420
+ * 5360 msec: WTCLK=10 WTIS=01 VAL= 0x810
+ * 10700 msec: WTCLK=01 WTIS=11 VAL= 0x430
+ * 21600 msec: WTCLK=10 WTIS=10 VAL= 0x820
+ * 43000 msec: WTCLK=11 WTIS=00 VAL= 0xC00
+ * 85600 msec: WTCLK=10 WTIS=11 VAL= 0x830
+ * 172000 msec: WTCLK=11 WTIS=01 VAL= 0xC10
+ * 687000 msec: WTCLK=11 WTIS=10 VAL= 0xC20
+ * 2750000 msec: WTCLK=11 WTIS=11 VAL= 0xC30
+ */
+
+struct npcm_wdt {
+ struct watchdog_device wdd;
+ void __iomem *reg;
+};
+
+static inline struct npcm_wdt *to_npcm_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct npcm_wdt, wdd);
+}
+
+static int npcm_wdt_ping(struct watchdog_device *wdd)
+{
+ struct npcm_wdt *wdt = to_npcm_wdt(wdd);
+ u32 val;
+
+ val = readl(wdt->reg);
+ writel(val | NPCM_WTR, wdt->reg);
+
+ return 0;
+}
+
+static int npcm_wdt_start(struct watchdog_device *wdd)
+{
+ struct npcm_wdt *wdt = to_npcm_wdt(wdd);
+ u32 val;
+
+ if (wdd->timeout < 2)
+ val = 0x800;
+ else if (wdd->timeout < 3)
+ val = 0x420;
+ else if (wdd->timeout < 6)
+ val = 0x810;
+ else if (wdd->timeout < 11)
+ val = 0x430;
+ else if (wdd->timeout < 22)
+ val = 0x820;
+ else if (wdd->timeout < 44)
+ val = 0xC00;
+ else if (wdd->timeout < 87)
+ val = 0x830;
+ else if (wdd->timeout < 173)
+ val = 0xC10;
+ else if (wdd->timeout < 688)
+ val = 0xC20;
+ else
+ val = 0xC30;
+
+ val |= NPCM_WTRE | NPCM_WTE | NPCM_WTR | NPCM_WTIE;
+
+ writel(val, wdt->reg);
+
+ return 0;
+}
+
+static int npcm_wdt_stop(struct watchdog_device *wdd)
+{
+ struct npcm_wdt *wdt = to_npcm_wdt(wdd);
+
+ writel(0, wdt->reg);
+
+ return 0;
+}
+
+
+static int npcm_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ if (timeout < 2)
+ wdd->timeout = 1;
+ else if (timeout < 3)
+ wdd->timeout = 2;
+ else if (timeout < 6)
+ wdd->timeout = 5;
+ else if (timeout < 11)
+ wdd->timeout = 10;
+ else if (timeout < 22)
+ wdd->timeout = 21;
+ else if (timeout < 44)
+ wdd->timeout = 43;
+ else if (timeout < 87)
+ wdd->timeout = 86;
+ else if (timeout < 173)
+ wdd->timeout = 172;
+ else if (timeout < 688)
+ wdd->timeout = 687;
+ else
+ wdd->timeout = 2750;
+
+ if (watchdog_active(wdd))
+ npcm_wdt_start(wdd);
+
+ return 0;
+}
+
+static irqreturn_t npcm_wdt_interrupt(int irq, void *data)
+{
+ struct npcm_wdt *wdt = data;
+
+ watchdog_notify_pretimeout(&wdt->wdd);
+
+ return IRQ_HANDLED;
+}
+
+static int npcm_wdt_restart(struct watchdog_device *wdd,
+ unsigned long action, void *data)
+{
+ struct npcm_wdt *wdt = to_npcm_wdt(wdd);
+
+ writel(NPCM_WTR | NPCM_WTRE | NPCM_WTE, wdt->reg);
+ udelay(1000);
+
+ return 0;
+}
+
+static bool npcm_is_running(struct watchdog_device *wdd)
+{
+ struct npcm_wdt *wdt = to_npcm_wdt(wdd);
+
+ return readl(wdt->reg) & NPCM_WTE;
+}
+
+static const struct watchdog_info npcm_wdt_info = {
+ .identity = KBUILD_MODNAME,
+ .options = WDIOF_SETTIMEOUT
+ | WDIOF_KEEPALIVEPING
+ | WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops npcm_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = npcm_wdt_start,
+ .stop = npcm_wdt_stop,
+ .ping = npcm_wdt_ping,
+ .set_timeout = npcm_wdt_set_timeout,
+ .restart = npcm_wdt_restart,
+};
+
+static int npcm_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct npcm_wdt *wdt;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(wdt->reg))
+ return PTR_ERR(wdt->reg);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ wdt->wdd.info = &npcm_wdt_info;
+ wdt->wdd.ops = &npcm_wdt_ops;
+ wdt->wdd.min_timeout = 1;
+ wdt->wdd.max_timeout = 2750;
+ wdt->wdd.parent = dev;
+
+ wdt->wdd.timeout = 86;
+ watchdog_init_timeout(&wdt->wdd, 0, dev);
+
+ /* Ensure timeout is able to be represented by the hardware */
+ npcm_wdt_set_timeout(&wdt->wdd, wdt->wdd.timeout);
+
+ if (npcm_is_running(&wdt->wdd)) {
+ /* Restart with the default or device-tree specified timeout */
+ npcm_wdt_start(&wdt->wdd);
+ set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
+ }
+
+ ret = devm_request_irq(dev, irq, npcm_wdt_interrupt, 0,
+ "watchdog", wdt);
+ if (ret)
+ return ret;
+
+ ret = devm_watchdog_register_device(dev, &wdt->wdd);
+ if (ret) {
+ dev_err(dev, "failed to register watchdog\n");
+ return ret;
+ }
+
+ dev_info(dev, "NPCM watchdog driver enabled\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id npcm_wdt_match[] = {
+ {.compatible = "nuvoton,npcm750-wdt"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, npcm_wdt_match);
+#endif
+
+static struct platform_driver npcm_wdt_driver = {
+ .probe = npcm_wdt_probe,
+ .driver = {
+ .name = "npcm-wdt",
+ .of_match_table = of_match_ptr(npcm_wdt_match),
+ },
+};
+module_platform_driver(npcm_wdt_driver);
+
+MODULE_AUTHOR("Joel Stanley");
+MODULE_DESCRIPTION("Watchdog driver for NPCM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 1cf286945b7a..4acbe05e27bb 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog Device Driver for Xilinx axi/xps_timebase_wdt
*
* (C) Copyright 2013 - 2014 Xilinx, Inc.
* (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/clk.h>
@@ -323,4 +319,4 @@ module_platform_driver(xwdt_driver);
MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>");
MODULE_DESCRIPTION("Xilinx Watchdog driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 1b02bfa81b29..ae77112ce97f 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -253,10 +253,10 @@ static int omap_wdt_probe(struct platform_device *pdev)
wdev->wdog.ops = &omap_wdt_ops;
wdev->wdog.min_timeout = TIMER_MARGIN_MIN;
wdev->wdog.max_timeout = TIMER_MARGIN_MAX;
+ wdev->wdog.timeout = TIMER_MARGIN_DEFAULT;
wdev->wdog.parent = &pdev->dev;
- if (watchdog_init_timeout(&wdev->wdog, timer_margin, &pdev->dev) < 0)
- wdev->wdog.timeout = TIMER_MARGIN_DEFAULT;
+ watchdog_init_timeout(&wdev->wdog, timer_margin, &pdev->dev);
watchdog_set_nowayout(&wdev->wdog, nowayout);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 0529aed158a4..8e261799c84e 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -78,7 +78,7 @@
#define WDOG_COUNTER_RATE 13000000 /*the counter clock is 13 MHz fixed */
static bool nowayout = WATCHDOG_NOWAYOUT;
-static unsigned int heartbeat = DEFAULT_HEARTBEAT;
+static unsigned int heartbeat;
static DEFINE_SPINLOCK(io_lock);
static void __iomem *wdt_base;
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
index 831ef83f6de1..6b8c6ddfe30b 100644
--- a/drivers/watchdog/renesas_wdt.c
+++ b/drivers/watchdog/renesas_wdt.c
@@ -16,6 +16,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/smp.h>
+#include <linux/sys_soc.h>
#include <linux/watchdog.h>
#define RWTCNT 0
@@ -49,6 +51,7 @@ struct rwdt_priv {
void __iomem *base;
struct watchdog_device wdev;
unsigned long clk_rate;
+ u16 time_left;
u8 cks;
};
@@ -107,6 +110,16 @@ static unsigned int rwdt_get_timeleft(struct watchdog_device *wdev)
return DIV_BY_CLKS_PER_SEC(priv, 65536 - val);
}
+static int rwdt_restart(struct watchdog_device *wdev, unsigned long action,
+ void *data)
+{
+ struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ rwdt_start(wdev);
+ rwdt_write(priv, 0xffff, RWTCNT);
+ return 0;
+}
+
static const struct watchdog_info rwdt_ident = {
.options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
.identity = "Renesas WDT Watchdog",
@@ -118,8 +131,47 @@ static const struct watchdog_ops rwdt_ops = {
.stop = rwdt_stop,
.ping = rwdt_init_timeout,
.get_timeleft = rwdt_get_timeleft,
+ .restart = rwdt_restart,
};
+#if defined(CONFIG_ARCH_RCAR_GEN2) && defined(CONFIG_SMP)
+/*
+ * Watchdog-reset integration is broken on early revisions of R-Car Gen2 SoCs
+ */
+static const struct soc_device_attribute rwdt_quirks_match[] = {
+ {
+ .soc_id = "r8a7790",
+ .revision = "ES1.*",
+ .data = (void *)1, /* needs single CPU */
+ }, {
+ .soc_id = "r8a7791",
+ .revision = "ES[12].*",
+ .data = (void *)1, /* needs single CPU */
+ }, {
+ .soc_id = "r8a7792",
+ .revision = "*",
+ .data = (void *)0, /* needs SMP disabled */
+ },
+ { /* sentinel */ }
+};
+
+static bool rwdt_blacklisted(struct device *dev)
+{
+ const struct soc_device_attribute *attr;
+
+ attr = soc_device_match(rwdt_quirks_match);
+ if (attr && setup_max_cpus > (uintptr_t)attr->data) {
+ dev_info(dev, "Watchdog blacklisted on %s %s\n", attr->soc_id,
+ attr->revision);
+ return true;
+ }
+
+ return false;
+}
+#else /* !CONFIG_ARCH_RCAR_GEN2 || !CONFIG_SMP */
+static inline bool rwdt_blacklisted(struct device *dev) { return false; }
+#endif /* !CONFIG_ARCH_RCAR_GEN2 || !CONFIG_SMP */
+
static int rwdt_probe(struct platform_device *pdev)
{
struct rwdt_priv *priv;
@@ -128,6 +180,9 @@ static int rwdt_probe(struct platform_device *pdev)
unsigned long clks_per_sec;
int ret, i;
+ if (rwdt_blacklisted(&pdev->dev))
+ return -ENODEV;
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -176,6 +231,7 @@ static int rwdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
watchdog_set_drvdata(&priv->wdev, priv);
watchdog_set_nowayout(&priv->wdev, nowayout);
+ watchdog_set_restart_priority(&priv->wdev, 0);
/* This overrides the default timeout only if DT configuration was found */
ret = watchdog_init_timeout(&priv->wdev, 0, &pdev->dev);
@@ -203,12 +259,32 @@ static int rwdt_remove(struct platform_device *pdev)
return 0;
}
-/*
- * This driver does also fit for R-Car Gen2 (r8a779[0-4]) WDT. However, for SMP
- * to work there, one also needs a RESET (RST) driver which does not exist yet
- * due to HW issues. This needs to be solved before adding compatibles here.
- */
+static int __maybe_unused rwdt_suspend(struct device *dev)
+{
+ struct rwdt_priv *priv = dev_get_drvdata(dev);
+
+ if (watchdog_active(&priv->wdev)) {
+ priv->time_left = readw(priv->base + RWTCNT);
+ rwdt_stop(&priv->wdev);
+ }
+ return 0;
+}
+
+static int __maybe_unused rwdt_resume(struct device *dev)
+{
+ struct rwdt_priv *priv = dev_get_drvdata(dev);
+
+ if (watchdog_active(&priv->wdev)) {
+ rwdt_start(&priv->wdev);
+ rwdt_write(priv, priv->time_left, RWTCNT);
+ }
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rwdt_pm_ops, rwdt_suspend, rwdt_resume);
+
static const struct of_device_id rwdt_ids[] = {
+ { .compatible = "renesas,rcar-gen2-wdt", },
{ .compatible = "renesas,rcar-gen3-wdt", },
{ /* sentinel */ }
};
@@ -218,6 +294,7 @@ static struct platform_driver rwdt_driver = {
.driver = {
.name = "renesas_wdt",
.of_match_table = rwdt_ids,
+ .pm = &rwdt_pm_ops,
},
.probe = rwdt_probe,
.remove = rwdt_remove,
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index 0ae947c3d7bc..255169916dbb 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -33,7 +33,7 @@ struct sama5d4_wdt {
unsigned long last_ping;
};
-static int wdt_timeout = WDT_DEFAULT_TIMEOUT;
+static int wdt_timeout;
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(wdt_timeout, int, 0);
@@ -212,7 +212,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
return -ENOMEM;
wdd = &wdt->wdd;
- wdd->timeout = wdt_timeout;
+ wdd->timeout = WDT_DEFAULT_TIMEOUT;
wdd->info = &sama5d4_wdt_info;
wdd->ops = &sama5d4_wdt_ops;
wdd->min_timeout = MIN_WDT_TIMEOUT;
@@ -273,7 +273,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, wdt);
dev_info(&pdev->dev, "initialized (timeout = %d sec, nowayout = %d)\n",
- wdt_timeout, nowayout);
+ wdd->timeout, nowayout);
return 0;
}
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index 4eea351e09b0..ac0c9d2c4aee 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -29,7 +29,7 @@
#define SIRFSOC_WDT_MAX_TIMEOUT (10 * 60) /* 10 mins */
#define SIRFSOC_WDT_DEFAULT_TIMEOUT 30 /* 30 secs */
-static unsigned int timeout = SIRFSOC_WDT_DEFAULT_TIMEOUT;
+static unsigned int timeout;
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(timeout, uint, 0);
diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
index a8b280ff33e0..b4d484a42b70 100644
--- a/drivers/watchdog/sprd_wdt.c
+++ b/drivers/watchdog/sprd_wdt.c
@@ -154,8 +154,10 @@ static int sprd_wdt_enable(struct sprd_wdt *wdt)
if (ret)
return ret;
ret = clk_prepare_enable(wdt->rtc_enable);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(wdt->enable);
return ret;
+ }
sprd_wdt_unlock(wdt->base);
val = readl_relaxed(wdt->base + SPRD_WDT_CTRL);
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index e6100e447dd8..177829b379da 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ST's LPC Watchdog
*
@@ -5,11 +6,6 @@
*
* Author: David Paris <david.paris@st.com> for STMicroelectronics
* Lee Jones <lee.jones@linaro.org> for STMicroelectronics
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#include <linux/clk.h>
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 802e31b1416d..c6c73656997e 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -39,7 +39,7 @@
#define DRV_VERSION "1.0"
static bool nowayout = WATCHDOG_NOWAYOUT;
-static unsigned int timeout = WDT_MAX_TIMEOUT;
+static unsigned int timeout;
/*
* This structure stores the register offsets for different variants
diff --git a/drivers/watchdog/tangox_wdt.c b/drivers/watchdog/tangox_wdt.c
index d5fcce062920..b1de8297fa40 100644
--- a/drivers/watchdog/tangox_wdt.c
+++ b/drivers/watchdog/tangox_wdt.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
* SMP86xx/SMP87xx Watchdog driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/bitops.h>
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 9403c08816e3..877dd39bd41f 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/watchdog/uniphier_wdt.c b/drivers/watchdog/uniphier_wdt.c
index 0ea2339d9702..e20a7a459d69 100644
--- a/drivers/watchdog/uniphier_wdt.c
+++ b/drivers/watchdog/uniphier_wdt.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Watchdog driver for the UniPhier watchdog timer
*
* (c) Copyright 2014 Panasonic Corporation
* (c) Copyright 2016 Socionext Inc.
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/bitops.h>
@@ -212,11 +204,10 @@ static int uniphier_wdt_probe(struct platform_device *pdev)
wdev->wdt_dev.ops = &uniphier_wdt_ops;
wdev->wdt_dev.max_timeout = WDT_PERIOD_MAX;
wdev->wdt_dev.min_timeout = WDT_PERIOD_MIN;
+ wdev->wdt_dev.timeout = WDT_DEFAULT_TIMEOUT;
wdev->wdt_dev.parent = dev;
- if (watchdog_init_timeout(&wdev->wdt_dev, timeout, dev) < 0) {
- wdev->wdt_dev.timeout = WDT_DEFAULT_TIMEOUT;
- }
+ watchdog_init_timeout(&wdev->wdt_dev, timeout, dev);
watchdog_set_nowayout(&wdev->wdt_dev, nowayout);
watchdog_stop_on_reboot(&wdev->wdt_dev);
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 1ddc1f742cd4..116c2f47b463 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for the wm831x PMICs
*
* Copyright (C) 2009 Wolfson Microelectronics
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation
*/
#include <linux/module.h>
diff --git a/drivers/watchdog/wm8350_wdt.c b/drivers/watchdog/wm8350_wdt.c
index 4ab4b8347d45..33c62d51f00a 100644
--- a/drivers/watchdog/wm8350_wdt.c
+++ b/drivers/watchdog/wm8350_wdt.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Watchdog driver for the wm8350
*
* Copyright (C) 2007, 2008 Wolfson Microelectronics <linux@wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 23e391d3ec01..b29f4e40851f 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -53,6 +53,8 @@ static unsigned long *acpi_ids_done;
static unsigned long *acpi_id_present;
/* And if there is an _CST definition (or a PBLK) for the ACPI IDs */
static unsigned long *acpi_id_cst_present;
+/* Which ACPI P-State dependencies for a enumerated processor */
+static struct acpi_psd_package *acpi_psd;
static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
{
@@ -362,9 +364,9 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
}
/* There are more ACPI Processor objects than in x2APIC or MADT.
* This can happen with incorrect ACPI SSDT declerations. */
- if (acpi_id > nr_acpi_bits) {
- pr_debug("We only have %u, trying to set %u\n",
- nr_acpi_bits, acpi_id);
+ if (acpi_id >= nr_acpi_bits) {
+ pr_debug("max acpi id %u, trying to set %u\n",
+ nr_acpi_bits - 1, acpi_id);
return AE_OK;
}
/* OK, There is a ACPI Processor object */
@@ -372,6 +374,13 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
pr_debug("ACPI CPU%u w/ PBLK:0x%lx\n", acpi_id, (unsigned long)pblk);
+ /* It has P-state dependencies */
+ if (!acpi_processor_get_psd(handle, &acpi_psd[acpi_id])) {
+ pr_debug("ACPI CPU%u w/ PST:coord_type = %llu domain = %llu\n",
+ acpi_id, acpi_psd[acpi_id].coord_type,
+ acpi_psd[acpi_id].domain);
+ }
+
status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
if (ACPI_FAILURE(status)) {
if (!pblk)
@@ -405,6 +414,14 @@ static int check_acpi_ids(struct acpi_processor *pr_backup)
return -ENOMEM;
}
+ acpi_psd = kcalloc(nr_acpi_bits, sizeof(struct acpi_psd_package),
+ GFP_KERNEL);
+ if (!acpi_psd) {
+ kfree(acpi_id_present);
+ kfree(acpi_id_cst_present);
+ return -ENOMEM;
+ }
+
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
read_acpi_id, NULL, NULL, NULL);
@@ -417,6 +434,12 @@ upload:
pr_backup->acpi_id = i;
/* Mask out C-states if there are no _CST or PBLK */
pr_backup->flags.power = test_bit(i, acpi_id_cst_present);
+ /* num_entries is non-zero if we evaluated _PSD */
+ if (acpi_psd[i].num_entries) {
+ memcpy(&pr_backup->performance->domain_info,
+ &acpi_psd[i],
+ sizeof(struct acpi_psd_package));
+ }
(void)upload_pm_data(pr_backup);
}
}
@@ -566,6 +589,7 @@ static void __exit xen_acpi_processor_exit(void)
kfree(acpi_ids_done);
kfree(acpi_id_present);
kfree(acpi_id_cst_present);
+ kfree(acpi_psd);
for_each_possible_cpu(i)
acpi_processor_unregister_performance(i);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index a493e99bed21..0d6d9264d6a9 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -365,7 +365,7 @@ void xenbus_dev_queue_reply(struct xb_req_data *req)
if (WARN_ON(rc))
goto out;
}
- } else if (req->msg.type == XS_TRANSACTION_END) {
+ } else if (req->type == XS_TRANSACTION_END) {
trans = xenbus_get_transaction(u, req->msg.tx_id);
if (WARN_ON(!trans))
goto out;
@@ -429,6 +429,10 @@ static int xenbus_write_transaction(unsigned msg_type,
{
int rc;
struct xenbus_transaction_holder *trans = NULL;
+ struct {
+ struct xsd_sockmsg hdr;
+ char body[];
+ } *msg = (void *)u->u.buffer;
if (msg_type == XS_TRANSACTION_START) {
trans = kzalloc(sizeof(*trans), GFP_KERNEL);
@@ -437,11 +441,15 @@ static int xenbus_write_transaction(unsigned msg_type,
goto out;
}
list_add(&trans->list, &u->transactions);
- } else if (u->u.msg.tx_id != 0 &&
- !xenbus_get_transaction(u, u->u.msg.tx_id))
+ } else if (msg->hdr.tx_id != 0 &&
+ !xenbus_get_transaction(u, msg->hdr.tx_id))
return xenbus_command_reply(u, XS_ERROR, "ENOENT");
+ else if (msg_type == XS_TRANSACTION_END &&
+ !(msg->hdr.len == 2 &&
+ (!strcmp(msg->body, "T") || !strcmp(msg->body, "F"))))
+ return xenbus_command_reply(u, XS_ERROR, "EINVAL");
- rc = xenbus_dev_request_and_reply(&u->u.msg, u);
+ rc = xenbus_dev_request_and_reply(&msg->hdr, u);
if (rc && trans) {
list_del(&trans->list);
kfree(trans);
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 3f3b29398ab8..49a3874ae6bb 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -140,7 +140,9 @@ void xs_request_exit(struct xb_req_data *req)
spin_lock(&xs_state_lock);
xs_state_users--;
if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) ||
- req->type == XS_TRANSACTION_END)
+ (req->type == XS_TRANSACTION_END &&
+ !WARN_ON_ONCE(req->msg.type == XS_ERROR &&
+ !strcmp(req->body, "ENOENT"))))
xs_state_users--;
spin_unlock(&xs_state_lock);
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
index 45b7fc405fa6..532acae25453 100644
--- a/fs/afs/Makefile
+++ b/fs/afs/Makefile
@@ -12,6 +12,8 @@ kafs-objs := \
cell.o \
cmservice.o \
dir.o \
+ dir_edit.o \
+ dynroot.o \
file.o \
flock.o \
fsclient.o \
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
index fd9f28b8a933..3bedfed608a2 100644
--- a/fs/afs/addr_list.c
+++ b/fs/afs/addr_list.c
@@ -243,9 +243,9 @@ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
xport == a->sin6_port)
return;
if (xdr == a->sin6_addr.s6_addr32[3] &&
- xport < a->sin6_port)
+ (u16 __force)xport < (u16 __force)a->sin6_port)
break;
- if (xdr < a->sin6_addr.s6_addr32[3])
+ if ((u32 __force)xdr < (u32 __force)a->sin6_addr.s6_addr32[3])
break;
}
@@ -280,7 +280,7 @@ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
xport == a->sin6_port)
return;
if (diff == 0 &&
- xport < a->sin6_port)
+ (u16 __force)xport < (u16 __force)a->sin6_port)
break;
if (diff < 0)
break;
diff --git a/fs/afs/afs.h b/fs/afs/afs.h
index b94d0edc2b78..b4ff1f7ae4ab 100644
--- a/fs/afs/afs.h
+++ b/fs/afs/afs.h
@@ -67,10 +67,14 @@ typedef enum {
} afs_callback_type_t;
struct afs_callback {
- struct afs_fid fid; /* file identifier */
- unsigned version; /* callback version */
- unsigned expiry; /* time at which expires */
- afs_callback_type_t type; /* type of callback */
+ unsigned version; /* Callback version */
+ unsigned expiry; /* Time at which expires */
+ afs_callback_type_t type; /* Type of callback */
+};
+
+struct afs_callback_break {
+ struct afs_fid fid; /* File identifier */
+ struct afs_callback cb; /* Callback details */
};
#define AFSCBMAX 50 /* maximum callbacks transferred per bulk op */
@@ -123,21 +127,20 @@ typedef u32 afs_access_t;
* AFS file status information
*/
struct afs_file_status {
- unsigned if_version; /* interface version */
-#define AFS_FSTATUS_VERSION 1
+ u64 size; /* file size */
+ afs_dataversion_t data_version; /* current data version */
+ time_t mtime_client; /* last time client changed data */
+ time_t mtime_server; /* last time server changed data */
+ unsigned abort_code; /* Abort if bulk-fetching this failed */
afs_file_type_t type; /* file type */
unsigned nlink; /* link count */
- u64 size; /* file size */
- afs_dataversion_t data_version; /* current data version */
u32 author; /* author ID */
- kuid_t owner; /* owner ID */
- kgid_t group; /* group ID */
+ u32 owner; /* owner ID */
+ u32 group; /* group ID */
afs_access_t caller_access; /* access rights for authenticated caller */
afs_access_t anon_access; /* access rights for unauthenticated caller */
umode_t mode; /* UNIX mode */
- time_t mtime_client; /* last time client changed data */
- time_t mtime_server; /* last time server changed data */
s32 lock_count; /* file lock count (0=UNLK -1=WRLCK +ve=#RDLCK */
};
diff --git a/fs/afs/afs_fs.h b/fs/afs/afs_fs.h
index d47b6d01e4c0..ddfa88a7a9c0 100644
--- a/fs/afs/afs_fs.h
+++ b/fs/afs/afs_fs.h
@@ -31,10 +31,12 @@ enum AFS_FS_Operations {
FSGETVOLUMEINFO = 148, /* AFS Get information about a volume */
FSGETVOLUMESTATUS = 149, /* AFS Get volume status information */
FSGETROOTVOLUME = 151, /* AFS Get root volume name */
+ FSBULKSTATUS = 155, /* AFS Fetch multiple file statuses */
FSSETLOCK = 156, /* AFS Request a file lock */
FSEXTENDLOCK = 157, /* AFS Extend a file lock */
FSRELEASELOCK = 158, /* AFS Release a file lock */
FSLOOKUP = 161, /* AFS lookup file in directory */
+ FSINLINEBULKSTATUS = 65536, /* AFS Fetch multiple file statuses with inline errors */
FSFETCHDATA64 = 65537, /* AFS Fetch file data */
FSSTOREDATA64 = 65538, /* AFS Store file data */
FSGIVEUPALLCALLBACKS = 65539, /* AFS Give up all outstanding callbacks on a server */
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index f4291b576054..abd9a84f4e88 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -97,26 +97,6 @@ again:
}
/*
- * Set a vnode's interest on a server.
- */
-void afs_set_cb_interest(struct afs_vnode *vnode, struct afs_cb_interest *cbi)
-{
- struct afs_cb_interest *old_cbi = NULL;
-
- if (vnode->cb_interest == cbi)
- return;
-
- write_seqlock(&vnode->cb_lock);
- if (vnode->cb_interest != cbi) {
- afs_get_cb_interest(cbi);
- old_cbi = vnode->cb_interest;
- vnode->cb_interest = cbi;
- }
- write_sequnlock(&vnode->cb_lock);
- afs_put_cb_interest(afs_v2net(vnode), cbi);
-}
-
-/*
* Remove an interest on a server.
*/
void afs_put_cb_interest(struct afs_net *net, struct afs_cb_interest *cbi)
@@ -150,6 +130,7 @@ void afs_break_callback(struct afs_vnode *vnode)
write_seqlock(&vnode->cb_lock);
+ clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
vnode->cb_break++;
afs_clear_permits(vnode);
@@ -207,7 +188,7 @@ static void afs_break_one_callback(struct afs_server *server,
* allow the fileserver to break callback promises
*/
void afs_break_callbacks(struct afs_server *server, size_t count,
- struct afs_callback callbacks[])
+ struct afs_callback_break *callbacks)
{
_enter("%p,%zu,", server, count);
@@ -219,9 +200,9 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
callbacks->fid.vid,
callbacks->fid.vnode,
callbacks->fid.unique,
- callbacks->version,
- callbacks->expiry,
- callbacks->type
+ callbacks->cb.version,
+ callbacks->cb.expiry,
+ callbacks->cb.type
);
afs_break_one_callback(server, &callbacks->fid);
}
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 4235a05afc76..fdf4c36cff79 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -18,7 +18,7 @@
#include <keys/rxrpc-type.h>
#include "internal.h"
-unsigned __read_mostly afs_cell_gc_delay = 10;
+static unsigned __read_mostly afs_cell_gc_delay = 10;
static void afs_manage_cell(struct work_struct *);
@@ -75,7 +75,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
cell = rcu_dereference_raw(net->ws_cell);
if (cell) {
afs_get_cell(cell);
- continue;
+ break;
}
ret = -EDESTADDRREQ;
continue;
@@ -130,6 +130,8 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
_leave(" = -ENAMETOOLONG");
return ERR_PTR(-ENAMETOOLONG);
}
+ if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
+ return ERR_PTR(-EINVAL);
_enter("%*.*s,%s", namelen, namelen, name, vllist);
@@ -334,8 +336,8 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
return PTR_ERR(new_root);
}
- set_bit(AFS_CELL_FL_NO_GC, &new_root->flags);
- afs_get_cell(new_root);
+ if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
+ afs_get_cell(new_root);
/* install the new cell */
write_seqlock(&net->cells_lock);
@@ -411,7 +413,7 @@ static void afs_cell_destroy(struct rcu_head *rcu)
ASSERTCMP(atomic_read(&cell->usage), ==, 0);
- afs_put_addrlist(cell->vl_addrs);
+ afs_put_addrlist(rcu_access_pointer(cell->vl_addrs));
key_put(cell->anonymous_key);
kfree(cell);
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 41e277f57b20..357de908df3a 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -178,8 +178,8 @@ static void SRXAFSCB_CallBack(struct work_struct *work)
*/
static int afs_deliver_cb_callback(struct afs_call *call)
{
+ struct afs_callback_break *cb;
struct sockaddr_rxrpc srx;
- struct afs_callback *cb;
struct afs_server *server;
__be32 *bp;
int ret, loop;
@@ -201,7 +201,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("FID count: %u", call->count);
if (call->count > AFSCBMAX)
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
call->buffer = kmalloc(call->count * 3 * 4, GFP_KERNEL);
if (!call->buffer)
@@ -218,7 +218,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
_debug("unmarshall FID array");
call->request = kcalloc(call->count,
- sizeof(struct afs_callback),
+ sizeof(struct afs_callback_break),
GFP_KERNEL);
if (!call->request)
return -ENOMEM;
@@ -229,7 +229,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
cb->fid.vid = ntohl(*bp++);
cb->fid.vnode = ntohl(*bp++);
cb->fid.unique = ntohl(*bp++);
- cb->type = AFSCM_CB_UNTYPED;
+ cb->cb.type = AFSCM_CB_UNTYPED;
}
call->offset = 0;
@@ -245,7 +245,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
call->count2 = ntohl(call->tmp);
_debug("CB count: %u", call->count2);
if (call->count2 != call->count && call->count2 != 0)
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
call->offset = 0;
call->unmarshall++;
@@ -260,9 +260,9 @@ static int afs_deliver_cb_callback(struct afs_call *call)
cb = call->request;
bp = call->buffer;
for (loop = call->count2; loop > 0; loop--, cb++) {
- cb->version = ntohl(*bp++);
- cb->expiry = ntohl(*bp++);
- cb->type = ntohl(*bp++);
+ cb->cb.version = ntohl(*bp++);
+ cb->cb.expiry = ntohl(*bp++);
+ cb->cb.type = ntohl(*bp++);
}
call->offset = 0;
@@ -500,9 +500,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
b = call->buffer;
r = call->request;
- r->time_low = ntohl(b[0]);
- r->time_mid = ntohl(b[1]);
- r->time_hi_and_version = ntohl(b[2]);
+ r->time_low = b[0];
+ r->time_mid = htons(ntohl(b[1]));
+ r->time_hi_and_version = htons(ntohl(b[2]));
r->clock_seq_hi_and_reserved = ntohl(b[3]);
r->clock_seq_low = ntohl(b[4]);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index ba2b458b36d1..5889f70d4d27 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -1,6 +1,6 @@
/* dir.c: AFS filesystem directory handling
*
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002, 2018 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -10,27 +10,26 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
+#include <linux/swap.h>
#include <linux/ctype.h>
#include <linux/sched.h>
-#include <linux/dns_resolver.h>
+#include <linux/task_io_accounting_ops.h>
#include "internal.h"
+#include "xdr_fs.h"
static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
-static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry,
- unsigned int flags);
static int afs_dir_open(struct inode *inode, struct file *file);
static int afs_readdir(struct file *file, struct dir_context *ctx);
static int afs_d_revalidate(struct dentry *dentry, unsigned int flags);
static int afs_d_delete(const struct dentry *dentry);
-static void afs_d_release(struct dentry *dentry);
-static int afs_lookup_filldir(struct dir_context *ctx, const char *name, int nlen,
+static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name, int nlen,
loff_t fpos, u64 ino, unsigned dtype);
+static int afs_lookup_filldir(struct dir_context *ctx, const char *name, int nlen,
+ loff_t fpos, u64 ino, unsigned dtype);
static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl);
static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
@@ -43,6 +42,14 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags);
+static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags);
+static void afs_dir_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int length);
+
+static int afs_dir_set_page_dirty(struct page *page)
+{
+ BUG(); /* This should never happen. */
+}
const struct file_operations afs_dir_file_operations = {
.open = afs_dir_open,
@@ -67,15 +74,10 @@ const struct inode_operations afs_dir_inode_operations = {
.listxattr = afs_listxattr,
};
-const struct file_operations afs_dynroot_file_operations = {
- .open = dcache_dir_open,
- .release = dcache_dir_close,
- .iterate_shared = dcache_readdir,
- .llseek = dcache_dir_lseek,
-};
-
-const struct inode_operations afs_dynroot_inode_operations = {
- .lookup = afs_dynroot_lookup,
+const struct address_space_operations afs_dir_aops = {
+ .set_page_dirty = afs_dir_set_page_dirty,
+ .releasepage = afs_dir_releasepage,
+ .invalidatepage = afs_dir_invalidatepage,
};
const struct dentry_operations afs_fs_dentry_operations = {
@@ -85,91 +87,38 @@ const struct dentry_operations afs_fs_dentry_operations = {
.d_automount = afs_d_automount,
};
-#define AFS_DIR_HASHTBL_SIZE 128
-#define AFS_DIR_DIRENT_SIZE 32
-#define AFS_DIRENT_PER_BLOCK 64
-
-union afs_dirent {
- struct {
- uint8_t valid;
- uint8_t unused[1];
- __be16 hash_next;
- __be32 vnode;
- __be32 unique;
- uint8_t name[16];
- uint8_t overflow[4]; /* if any char of the name (inc
- * NUL) reaches here, consume
- * the next dirent too */
- } u;
- uint8_t extended_name[32];
-};
-
-/* AFS directory page header (one at the beginning of every 2048-byte chunk) */
-struct afs_dir_pagehdr {
- __be16 npages;
- __be16 magic;
-#define AFS_DIR_MAGIC htons(1234)
- uint8_t nentries;
- uint8_t bitmap[8];
- uint8_t pad[19];
-};
-
-/* directory block layout */
-union afs_dir_block {
-
- struct afs_dir_pagehdr pagehdr;
-
- struct {
- struct afs_dir_pagehdr pagehdr;
- uint8_t alloc_ctrs[128];
- /* dir hash table */
- uint16_t hashtable[AFS_DIR_HASHTBL_SIZE];
- } hdr;
-
- union afs_dirent dirents[AFS_DIRENT_PER_BLOCK];
-};
-
-/* layout on a linux VM page */
-struct afs_dir_page {
- union afs_dir_block blocks[PAGE_SIZE / sizeof(union afs_dir_block)];
+struct afs_lookup_one_cookie {
+ struct dir_context ctx;
+ struct qstr name;
+ bool found;
+ struct afs_fid fid;
};
struct afs_lookup_cookie {
- struct dir_context ctx;
- struct afs_fid fid;
- struct qstr name;
- int found;
+ struct dir_context ctx;
+ struct qstr name;
+ bool found;
+ bool one_only;
+ unsigned short nr_fids;
+ struct afs_file_status *statuses;
+ struct afs_callback *callbacks;
+ struct afs_fid fids[50];
};
/*
* check that a directory page is valid
*/
-bool afs_dir_check_page(struct inode *dir, struct page *page)
+static bool afs_dir_check_page(struct afs_vnode *dvnode, struct page *page,
+ loff_t i_size)
{
- struct afs_dir_page *dbuf;
- struct afs_vnode *vnode = AFS_FS_I(dir);
- loff_t latter, i_size, off;
+ struct afs_xdr_dir_page *dbuf;
+ loff_t latter, off;
int tmp, qty;
-#if 0
- /* check the page count */
- qty = desc.size / sizeof(dbuf->blocks[0]);
- if (qty == 0)
- goto error;
-
- if (page->index == 0 && qty != ntohs(dbuf->blocks[0].pagehdr.npages)) {
- printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n",
- __func__, dir->i_ino, qty,
- ntohs(dbuf->blocks[0].pagehdr.npages));
- goto error;
- }
-#endif
-
/* Determine how many magic numbers there should be in this page, but
* we must take care because the directory may change size under us.
*/
off = page_offset(page);
- i_size = i_size_read(dir);
if (i_size <= off)
goto checked;
@@ -178,112 +127,225 @@ bool afs_dir_check_page(struct inode *dir, struct page *page)
qty = PAGE_SIZE;
else
qty = latter;
- qty /= sizeof(union afs_dir_block);
+ qty /= sizeof(union afs_xdr_dir_block);
/* check them */
- dbuf = page_address(page);
+ dbuf = kmap(page);
for (tmp = 0; tmp < qty; tmp++) {
- if (dbuf->blocks[tmp].pagehdr.magic != AFS_DIR_MAGIC) {
+ if (dbuf->blocks[tmp].hdr.magic != AFS_DIR_MAGIC) {
printk("kAFS: %s(%lx): bad magic %d/%d is %04hx\n",
- __func__, dir->i_ino, tmp, qty,
- ntohs(dbuf->blocks[tmp].pagehdr.magic));
- trace_afs_dir_check_failed(vnode, off, i_size);
+ __func__, dvnode->vfs_inode.i_ino, tmp, qty,
+ ntohs(dbuf->blocks[tmp].hdr.magic));
+ trace_afs_dir_check_failed(dvnode, off, i_size);
+ kunmap(page);
goto error;
}
+
+ /* Make sure each block is NUL terminated so we can reasonably
+ * use string functions on it. The filenames in the page
+ * *should* be NUL-terminated anyway.
+ */
+ ((u8 *)&dbuf->blocks[tmp])[AFS_DIR_BLOCK_SIZE - 1] = 0;
}
+ kunmap(page);
+
checked:
- SetPageChecked(page);
+ afs_stat_v(dvnode, n_read_dir);
return true;
error:
- SetPageError(page);
return false;
}
/*
- * discard a page cached in the pagecache
+ * open an AFS directory file
*/
-static inline void afs_dir_put_page(struct page *page)
+static int afs_dir_open(struct inode *inode, struct file *file)
{
- kunmap(page);
- unlock_page(page);
- put_page(page);
+ _enter("{%lu}", inode->i_ino);
+
+ BUILD_BUG_ON(sizeof(union afs_xdr_dir_block) != 2048);
+ BUILD_BUG_ON(sizeof(union afs_xdr_dirent) != 32);
+
+ if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(inode)->flags))
+ return -ENOENT;
+
+ return afs_open(inode, file);
}
/*
- * get a page into the pagecache
+ * Read the directory into the pagecache in one go, scrubbing the previous
+ * contents. The list of pages is returned, pinning them so that they don't
+ * get reclaimed during the iteration.
*/
-static struct page *afs_dir_get_page(struct inode *dir, unsigned long index,
- struct key *key)
+static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
{
- struct page *page;
- _enter("{%lu},%lu", dir->i_ino, index);
-
- page = read_cache_page(dir->i_mapping, index, afs_page_filler, key);
- if (!IS_ERR(page)) {
- lock_page(page);
- kmap(page);
- if (unlikely(!PageChecked(page))) {
- if (PageError(page))
- goto fail;
- }
+ struct afs_read *req;
+ loff_t i_size;
+ int nr_pages, nr_inline, i, n;
+ int ret = -ENOMEM;
+
+retry:
+ i_size = i_size_read(&dvnode->vfs_inode);
+ if (i_size < 2048)
+ return ERR_PTR(-EIO);
+ if (i_size > 2048 * 1024)
+ return ERR_PTR(-EFBIG);
+
+ _enter("%llu", i_size);
+
+ /* Get a request record to hold the page list. We want to hold it
+ * inline if we can, but we don't want to make an order 1 allocation.
+ */
+ nr_pages = (i_size + PAGE_SIZE - 1) / PAGE_SIZE;
+ nr_inline = nr_pages;
+ if (nr_inline > (PAGE_SIZE - sizeof(*req)) / sizeof(struct page *))
+ nr_inline = 0;
+
+ req = kzalloc(sizeof(*req) + sizeof(struct page *) * nr_inline,
+ GFP_KERNEL);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&req->usage, 1);
+ req->nr_pages = nr_pages;
+ req->actual_len = i_size; /* May change */
+ req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */
+ req->data_version = dvnode->status.data_version; /* May change */
+ if (nr_inline > 0) {
+ req->pages = req->array;
+ } else {
+ req->pages = kcalloc(nr_pages, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!req->pages)
+ goto error;
}
- return page;
-fail:
- afs_dir_put_page(page);
- _leave(" = -EIO");
- return ERR_PTR(-EIO);
-}
+ /* Get a list of all the pages that hold or will hold the directory
+ * content. We need to fill in any gaps that we might find where the
+ * memory reclaimer has been at work. If there are any gaps, we will
+ * need to reread the entire directory contents.
+ */
+ i = 0;
+ do {
+ n = find_get_pages_contig(dvnode->vfs_inode.i_mapping, i,
+ req->nr_pages - i,
+ req->pages + i);
+ _debug("find %u at %u/%u", n, i, req->nr_pages);
+ if (n == 0) {
+ gfp_t gfp = dvnode->vfs_inode.i_mapping->gfp_mask;
+
+ if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ afs_stat_v(dvnode, n_inval);
+
+ ret = -ENOMEM;
+ req->pages[i] = __page_cache_alloc(gfp);
+ if (!req->pages[i])
+ goto error;
+ ret = add_to_page_cache_lru(req->pages[i],
+ dvnode->vfs_inode.i_mapping,
+ i, gfp);
+ if (ret < 0)
+ goto error;
+
+ set_page_private(req->pages[i], 1);
+ SetPagePrivate(req->pages[i]);
+ unlock_page(req->pages[i]);
+ i++;
+ } else {
+ i += n;
+ }
+ } while (i < req->nr_pages);
-/*
- * open an AFS directory file
- */
-static int afs_dir_open(struct inode *inode, struct file *file)
-{
- _enter("{%lu}", inode->i_ino);
+ /* If we're going to reload, we need to lock all the pages to prevent
+ * races.
+ */
+ if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
+ ret = -ERESTARTSYS;
+ for (i = 0; i < req->nr_pages; i++)
+ if (lock_page_killable(req->pages[i]) < 0)
+ goto error_unlock;
- BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048);
- BUILD_BUG_ON(sizeof(union afs_dirent) != 32);
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ goto success;
- if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(inode)->flags))
- return -ENOENT;
+ ret = afs_fetch_data(dvnode, key, req);
+ if (ret < 0)
+ goto error_unlock_all;
- return afs_open(inode, file);
+ task_io_account_read(PAGE_SIZE * req->nr_pages);
+
+ if (req->len < req->file_size)
+ goto content_has_grown;
+
+ /* Validate the data we just read. */
+ ret = -EIO;
+ for (i = 0; i < req->nr_pages; i++)
+ if (!afs_dir_check_page(dvnode, req->pages[i],
+ req->actual_len))
+ goto error_unlock_all;
+
+ // TODO: Trim excess pages
+
+ set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags);
+ }
+
+success:
+ i = req->nr_pages;
+ while (i > 0)
+ unlock_page(req->pages[--i]);
+ return req;
+
+error_unlock_all:
+ i = req->nr_pages;
+error_unlock:
+ while (i > 0)
+ unlock_page(req->pages[--i]);
+error:
+ afs_put_read(req);
+ _leave(" = %d", ret);
+ return ERR_PTR(ret);
+
+content_has_grown:
+ i = req->nr_pages;
+ while (i > 0)
+ unlock_page(req->pages[--i]);
+ afs_put_read(req);
+ goto retry;
}
/*
* deal with one block in an AFS directory
*/
static int afs_dir_iterate_block(struct dir_context *ctx,
- union afs_dir_block *block,
+ union afs_xdr_dir_block *block,
unsigned blkoff)
{
- union afs_dirent *dire;
+ union afs_xdr_dirent *dire;
unsigned offset, next, curr;
size_t nlen;
int tmp;
_enter("%u,%x,%p,,",(unsigned)ctx->pos,blkoff,block);
- curr = (ctx->pos - blkoff) / sizeof(union afs_dirent);
+ curr = (ctx->pos - blkoff) / sizeof(union afs_xdr_dirent);
/* walk through the block, an entry at a time */
- for (offset = AFS_DIRENT_PER_BLOCK - block->pagehdr.nentries;
- offset < AFS_DIRENT_PER_BLOCK;
+ for (offset = (blkoff == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS);
+ offset < AFS_DIR_SLOTS_PER_BLOCK;
offset = next
) {
next = offset + 1;
/* skip entries marked unused in the bitmap */
- if (!(block->pagehdr.bitmap[offset / 8] &
+ if (!(block->hdr.bitmap[offset / 8] &
(1 << (offset % 8)))) {
_debug("ENT[%zu.%u]: unused",
- blkoff / sizeof(union afs_dir_block), offset);
+ blkoff / sizeof(union afs_xdr_dir_block), offset);
if (offset >= curr)
ctx->pos = blkoff +
- next * sizeof(union afs_dirent);
+ next * sizeof(union afs_xdr_dirent);
continue;
}
@@ -291,34 +353,34 @@ static int afs_dir_iterate_block(struct dir_context *ctx,
dire = &block->dirents[offset];
nlen = strnlen(dire->u.name,
sizeof(*block) -
- offset * sizeof(union afs_dirent));
+ offset * sizeof(union afs_xdr_dirent));
_debug("ENT[%zu.%u]: %s %zu \"%s\"",
- blkoff / sizeof(union afs_dir_block), offset,
+ blkoff / sizeof(union afs_xdr_dir_block), offset,
(offset < curr ? "skip" : "fill"),
nlen, dire->u.name);
/* work out where the next possible entry is */
- for (tmp = nlen; tmp > 15; tmp -= sizeof(union afs_dirent)) {
- if (next >= AFS_DIRENT_PER_BLOCK) {
+ for (tmp = nlen; tmp > 15; tmp -= sizeof(union afs_xdr_dirent)) {
+ if (next >= AFS_DIR_SLOTS_PER_BLOCK) {
_debug("ENT[%zu.%u]:"
" %u travelled beyond end dir block"
" (len %u/%zu)",
- blkoff / sizeof(union afs_dir_block),
+ blkoff / sizeof(union afs_xdr_dir_block),
offset, next, tmp, nlen);
return -EIO;
}
- if (!(block->pagehdr.bitmap[next / 8] &
+ if (!(block->hdr.bitmap[next / 8] &
(1 << (next % 8)))) {
_debug("ENT[%zu.%u]:"
" %u unmarked extension (len %u/%zu)",
- blkoff / sizeof(union afs_dir_block),
+ blkoff / sizeof(union afs_xdr_dir_block),
offset, next, tmp, nlen);
return -EIO;
}
_debug("ENT[%zu.%u]: ext %u/%zu",
- blkoff / sizeof(union afs_dir_block),
+ blkoff / sizeof(union afs_xdr_dir_block),
next, tmp, nlen);
next++;
}
@@ -330,13 +392,14 @@ static int afs_dir_iterate_block(struct dir_context *ctx,
/* found the next entry */
if (!dir_emit(ctx, dire->u.name, nlen,
ntohl(dire->u.vnode),
- ctx->actor == afs_lookup_filldir ?
+ (ctx->actor == afs_lookup_filldir ||
+ ctx->actor == afs_lookup_one_filldir)?
ntohl(dire->u.unique) : DT_UNKNOWN)) {
_leave(" = 0 [full]");
return 0;
}
- ctx->pos = blkoff + next * sizeof(union afs_dirent);
+ ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
}
_leave(" = 1 [more]");
@@ -349,8 +412,10 @@ static int afs_dir_iterate_block(struct dir_context *ctx,
static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
struct key *key)
{
- union afs_dir_block *dblock;
- struct afs_dir_page *dbuf;
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
+ struct afs_xdr_dir_page *dbuf;
+ union afs_xdr_dir_block *dblock;
+ struct afs_read *req;
struct page *page;
unsigned blkoff, limit;
int ret;
@@ -362,45 +427,53 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
return -ESTALE;
}
+ req = afs_read_dir(dvnode, key);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
/* round the file position up to the next entry boundary */
- ctx->pos += sizeof(union afs_dirent) - 1;
- ctx->pos &= ~(sizeof(union afs_dirent) - 1);
+ ctx->pos += sizeof(union afs_xdr_dirent) - 1;
+ ctx->pos &= ~(sizeof(union afs_xdr_dirent) - 1);
/* walk through the blocks in sequence */
ret = 0;
- while (ctx->pos < dir->i_size) {
- blkoff = ctx->pos & ~(sizeof(union afs_dir_block) - 1);
+ while (ctx->pos < req->actual_len) {
+ blkoff = ctx->pos & ~(sizeof(union afs_xdr_dir_block) - 1);
- /* fetch the appropriate page from the directory */
- page = afs_dir_get_page(dir, blkoff / PAGE_SIZE, key);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
+ /* Fetch the appropriate page from the directory and re-add it
+ * to the LRU.
+ */
+ page = req->pages[blkoff / PAGE_SIZE];
+ if (!page) {
+ ret = -EIO;
break;
}
+ mark_page_accessed(page);
limit = blkoff & ~(PAGE_SIZE - 1);
- dbuf = page_address(page);
+ dbuf = kmap(page);
/* deal with the individual blocks stashed on this page */
do {
dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) /
- sizeof(union afs_dir_block)];
+ sizeof(union afs_xdr_dir_block)];
ret = afs_dir_iterate_block(ctx, dblock, blkoff);
if (ret != 1) {
- afs_dir_put_page(page);
+ kunmap(page);
goto out;
}
- blkoff += sizeof(union afs_dir_block);
+ blkoff += sizeof(union afs_xdr_dir_block);
} while (ctx->pos < dir->i_size && blkoff < limit);
- afs_dir_put_page(page);
+ kunmap(page);
ret = 0;
}
out:
+ afs_put_read(req);
_leave(" = %d", ret);
return ret;
}
@@ -414,23 +487,23 @@ static int afs_readdir(struct file *file, struct dir_context *ctx)
}
/*
- * search the directory for a name
+ * Search the directory for a single name
* - if afs_dir_iterate_block() spots this function, it'll pass the FID
* uniquifier through dtype
*/
-static int afs_lookup_filldir(struct dir_context *ctx, const char *name,
- int nlen, loff_t fpos, u64 ino, unsigned dtype)
+static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
+ int nlen, loff_t fpos, u64 ino, unsigned dtype)
{
- struct afs_lookup_cookie *cookie =
- container_of(ctx, struct afs_lookup_cookie, ctx);
+ struct afs_lookup_one_cookie *cookie =
+ container_of(ctx, struct afs_lookup_one_cookie, ctx);
_enter("{%s,%u},%s,%u,,%llu,%u",
cookie->name.name, cookie->name.len, name, nlen,
(unsigned long long) ino, dtype);
/* insanity checks first */
- BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048);
- BUILD_BUG_ON(sizeof(union afs_dirent) != 32);
+ BUILD_BUG_ON(sizeof(union afs_xdr_dir_block) != 2048);
+ BUILD_BUG_ON(sizeof(union afs_xdr_dirent) != 32);
if (cookie->name.len != nlen ||
memcmp(cookie->name.name, name, nlen) != 0) {
@@ -447,15 +520,15 @@ static int afs_lookup_filldir(struct dir_context *ctx, const char *name,
}
/*
- * do a lookup in a directory
+ * Do a lookup of a single name in a directory
* - just returns the FID the dentry name maps to if found
*/
-static int afs_do_lookup(struct inode *dir, struct dentry *dentry,
- struct afs_fid *fid, struct key *key)
+static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
+ struct afs_fid *fid, struct key *key)
{
struct afs_super_info *as = dir->i_sb->s_fs_info;
- struct afs_lookup_cookie cookie = {
- .ctx.actor = afs_lookup_filldir,
+ struct afs_lookup_one_cookie cookie = {
+ .ctx.actor = afs_lookup_one_filldir,
.name = dentry->d_name,
.fid.vid = as->volume->vid
};
@@ -482,70 +555,265 @@ static int afs_do_lookup(struct inode *dir, struct dentry *dentry,
}
/*
- * Probe to see if a cell may exist. This prevents positive dentries from
- * being created unnecessarily.
+ * search the directory for a name
+ * - if afs_dir_iterate_block() spots this function, it'll pass the FID
+ * uniquifier through dtype
*/
-static int afs_probe_cell_name(struct dentry *dentry)
+static int afs_lookup_filldir(struct dir_context *ctx, const char *name,
+ int nlen, loff_t fpos, u64 ino, unsigned dtype)
{
- struct afs_cell *cell;
- const char *name = dentry->d_name.name;
- size_t len = dentry->d_name.len;
+ struct afs_lookup_cookie *cookie =
+ container_of(ctx, struct afs_lookup_cookie, ctx);
int ret;
- /* Names prefixed with a dot are R/W mounts. */
- if (name[0] == '.') {
- if (len == 1)
- return -EINVAL;
- name++;
- len--;
- }
+ _enter("{%s,%u},%s,%u,,%llu,%u",
+ cookie->name.name, cookie->name.len, name, nlen,
+ (unsigned long long) ino, dtype);
- cell = afs_lookup_cell_rcu(afs_d2net(dentry), name, len);
- if (!IS_ERR(cell)) {
- afs_put_cell(afs_d2net(dentry), cell);
- return 0;
+ /* insanity checks first */
+ BUILD_BUG_ON(sizeof(union afs_xdr_dir_block) != 2048);
+ BUILD_BUG_ON(sizeof(union afs_xdr_dirent) != 32);
+
+ if (cookie->found) {
+ if (cookie->nr_fids < 50) {
+ cookie->fids[cookie->nr_fids].vnode = ino;
+ cookie->fids[cookie->nr_fids].unique = dtype;
+ cookie->nr_fids++;
+ }
+ } else if (cookie->name.len == nlen &&
+ memcmp(cookie->name.name, name, nlen) == 0) {
+ cookie->fids[0].vnode = ino;
+ cookie->fids[0].unique = dtype;
+ cookie->found = 1;
+ if (cookie->one_only)
+ return -1;
}
- ret = dns_query("afsdb", name, len, "ipv4", NULL, NULL);
- if (ret == -ENODATA)
- ret = -EDESTADDRREQ;
+ ret = cookie->nr_fids >= 50 ? -1 : 0;
+ _leave(" = %d", ret);
return ret;
}
/*
- * Try to auto mount the mountpoint with pseudo directory, if the autocell
- * operation is setted.
+ * Do a lookup in a directory. We make use of bulk lookup to query a slew of
+ * files in one go and create inodes for them. The inode of the file we were
+ * asked for is returned.
*/
-static struct inode *afs_try_auto_mntpt(struct dentry *dentry,
- struct inode *dir, struct afs_fid *fid)
+static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
+ struct key *key)
{
- struct afs_vnode *vnode = AFS_FS_I(dir);
- struct inode *inode;
- int ret = -ENOENT;
+ struct afs_lookup_cookie *cookie;
+ struct afs_cb_interest *cbi = NULL;
+ struct afs_super_info *as = dir->i_sb->s_fs_info;
+ struct afs_iget_data data;
+ struct afs_fs_cursor fc;
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
+ struct inode *inode = NULL;
+ int ret, i;
- _enter("%p{%pd}, {%x:%u}",
- dentry, dentry, vnode->fid.vid, vnode->fid.vnode);
+ _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
+
+ cookie = kzalloc(sizeof(struct afs_lookup_cookie), GFP_KERNEL);
+ if (!cookie)
+ return ERR_PTR(-ENOMEM);
+
+ cookie->ctx.actor = afs_lookup_filldir;
+ cookie->name = dentry->d_name;
+ cookie->nr_fids = 1; /* slot 0 is saved for the fid we actually want */
+
+ read_seqlock_excl(&dvnode->cb_lock);
+ if (dvnode->cb_interest &&
+ dvnode->cb_interest->server &&
+ test_bit(AFS_SERVER_FL_NO_IBULK, &dvnode->cb_interest->server->flags))
+ cookie->one_only = true;
+ read_sequnlock_excl(&dvnode->cb_lock);
+
+ for (i = 0; i < 50; i++)
+ cookie->fids[i].vid = as->volume->vid;
+
+ /* search the directory */
+ ret = afs_dir_iterate(dir, &cookie->ctx, key);
+ if (ret < 0) {
+ inode = ERR_PTR(ret);
+ goto out;
+ }
- if (!test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
+ inode = ERR_PTR(-ENOENT);
+ if (!cookie->found)
goto out;
- ret = afs_probe_cell_name(dentry);
- if (ret < 0)
+ /* Check to see if we already have an inode for the primary fid. */
+ data.volume = dvnode->volume;
+ data.fid = cookie->fids[0];
+ inode = ilookup5(dir->i_sb, cookie->fids[0].vnode, afs_iget5_test, &data);
+ if (inode)
goto out;
- inode = afs_iget_pseudo_dir(dir->i_sb, false);
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
+ /* Need space for examining all the selected files */
+ inode = ERR_PTR(-ENOMEM);
+ cookie->statuses = kcalloc(cookie->nr_fids, sizeof(struct afs_file_status),
+ GFP_KERNEL);
+ if (!cookie->statuses)
goto out;
+
+ cookie->callbacks = kcalloc(cookie->nr_fids, sizeof(struct afs_callback),
+ GFP_KERNEL);
+ if (!cookie->callbacks)
+ goto out_s;
+
+ /* Try FS.InlineBulkStatus first. Abort codes for the individual
+ * lookups contained therein are stored in the reply without aborting
+ * the whole operation.
+ */
+ if (cookie->one_only)
+ goto no_inline_bulk_status;
+
+ inode = ERR_PTR(-ERESTARTSYS);
+ if (afs_begin_vnode_operation(&fc, dvnode, key)) {
+ while (afs_select_fileserver(&fc)) {
+ if (test_bit(AFS_SERVER_FL_NO_IBULK,
+ &fc.cbi->server->flags)) {
+ fc.ac.abort_code = RX_INVALID_OPERATION;
+ fc.ac.error = -ECONNABORTED;
+ break;
+ }
+ afs_fs_inline_bulk_status(&fc,
+ afs_v2net(dvnode),
+ cookie->fids,
+ cookie->statuses,
+ cookie->callbacks,
+ cookie->nr_fids, NULL);
+ }
+
+ if (fc.ac.error == 0)
+ cbi = afs_get_cb_interest(fc.cbi);
+ if (fc.ac.abort_code == RX_INVALID_OPERATION)
+ set_bit(AFS_SERVER_FL_NO_IBULK, &fc.cbi->server->flags);
+ inode = ERR_PTR(afs_end_vnode_operation(&fc));
}
- *fid = AFS_FS_I(inode)->fid;
- _leave("= %p", inode);
- return inode;
+ if (!IS_ERR(inode))
+ goto success;
+ if (fc.ac.abort_code != RX_INVALID_OPERATION)
+ goto out_c;
+
+no_inline_bulk_status:
+ /* We could try FS.BulkStatus next, but this aborts the entire op if
+ * any of the lookups fails - so, for the moment, revert to
+ * FS.FetchStatus for just the primary fid.
+ */
+ cookie->nr_fids = 1;
+ inode = ERR_PTR(-ERESTARTSYS);
+ if (afs_begin_vnode_operation(&fc, dvnode, key)) {
+ while (afs_select_fileserver(&fc)) {
+ afs_fs_fetch_status(&fc,
+ afs_v2net(dvnode),
+ cookie->fids,
+ cookie->statuses,
+ cookie->callbacks,
+ NULL);
+ }
+
+ if (fc.ac.error == 0)
+ cbi = afs_get_cb_interest(fc.cbi);
+ inode = ERR_PTR(afs_end_vnode_operation(&fc));
+ }
+ if (IS_ERR(inode))
+ goto out_c;
+
+ for (i = 0; i < cookie->nr_fids; i++)
+ cookie->statuses[i].abort_code = 0;
+
+success:
+ /* Turn all the files into inodes and save the first one - which is the
+ * one we actually want.
+ */
+ if (cookie->statuses[0].abort_code != 0)
+ inode = ERR_PTR(afs_abort_to_error(cookie->statuses[0].abort_code));
+
+ for (i = 0; i < cookie->nr_fids; i++) {
+ struct inode *ti;
+
+ if (cookie->statuses[i].abort_code != 0)
+ continue;
+
+ ti = afs_iget(dir->i_sb, key, &cookie->fids[i],
+ &cookie->statuses[i],
+ &cookie->callbacks[i],
+ cbi);
+ if (i == 0) {
+ inode = ti;
+ } else {
+ if (!IS_ERR(ti))
+ iput(ti);
+ }
+ }
+
+out_c:
+ afs_put_cb_interest(afs_v2net(dvnode), cbi);
+ kfree(cookie->callbacks);
+out_s:
+ kfree(cookie->statuses);
out:
- _leave("= %d", ret);
- return ERR_PTR(ret);
+ kfree(cookie);
+ return inode;
+}
+
+/*
+ * Look up an entry in a directory with @sys substitution.
+ */
+static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry,
+ struct key *key)
+{
+ struct afs_sysnames *subs;
+ struct afs_net *net = afs_i2net(dir);
+ struct dentry *ret;
+ char *buf, *p, *name;
+ int len, i;
+
+ _enter("");
+
+ ret = ERR_PTR(-ENOMEM);
+ p = buf = kmalloc(AFSNAMEMAX, GFP_KERNEL);
+ if (!buf)
+ goto out_p;
+ if (dentry->d_name.len > 4) {
+ memcpy(p, dentry->d_name.name, dentry->d_name.len - 4);
+ p += dentry->d_name.len - 4;
+ }
+
+ /* There is an ordered list of substitutes that we have to try. */
+ read_lock(&net->sysnames_lock);
+ subs = net->sysnames;
+ refcount_inc(&subs->usage);
+ read_unlock(&net->sysnames_lock);
+
+ for (i = 0; i < subs->nr; i++) {
+ name = subs->subs[i];
+ len = dentry->d_name.len - 4 + strlen(name);
+ if (len >= AFSNAMEMAX) {
+ ret = ERR_PTR(-ENAMETOOLONG);
+ goto out_s;
+ }
+
+ strcpy(p, name);
+ ret = lookup_one_len(buf, dentry->d_parent, len);
+ if (IS_ERR(ret) || d_is_positive(ret))
+ goto out_s;
+ dput(ret);
+ }
+
+ /* We don't want to d_add() the @sys dentry here as we don't want to
+ * the cached dentry to hide changes to the sysnames list.
+ */
+ ret = NULL;
+out_s:
+ afs_put_sysnames(subs);
+ kfree(buf);
+out_p:
+ key_put(key);
+ return ret;
}
/*
@@ -554,16 +822,13 @@ out:
static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
- struct afs_vnode *vnode;
- struct afs_fid fid;
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
struct inode *inode;
struct key *key;
int ret;
- vnode = AFS_FS_I(dir);
-
_enter("{%x:%u},%p{%pd},",
- vnode->fid.vid, vnode->fid.vnode, dentry, dentry);
+ dvnode->fid.vid, dvnode->fid.vnode, dentry, dentry);
ASSERTCMP(d_inode(dentry), ==, NULL);
@@ -572,28 +837,37 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
return ERR_PTR(-ENAMETOOLONG);
}
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
+ if (test_bit(AFS_VNODE_DELETED, &dvnode->flags)) {
_leave(" = -ESTALE");
return ERR_PTR(-ESTALE);
}
- key = afs_request_key(vnode->volume->cell);
+ key = afs_request_key(dvnode->volume->cell);
if (IS_ERR(key)) {
_leave(" = %ld [key]", PTR_ERR(key));
return ERR_CAST(key);
}
- ret = afs_validate(vnode, key);
+ ret = afs_validate(dvnode, key);
if (ret < 0) {
key_put(key);
_leave(" = %d [val]", ret);
return ERR_PTR(ret);
}
- ret = afs_do_lookup(dir, dentry, &fid, key);
- if (ret < 0) {
+ if (dentry->d_name.len >= 4 &&
+ dentry->d_name.name[dentry->d_name.len - 4] == '@' &&
+ dentry->d_name.name[dentry->d_name.len - 3] == 's' &&
+ dentry->d_name.name[dentry->d_name.len - 2] == 'y' &&
+ dentry->d_name.name[dentry->d_name.len - 1] == 's')
+ return afs_lookup_atsys(dir, dentry, key);
+
+ afs_stat_v(dvnode, n_lookup);
+ inode = afs_do_lookup(dir, dentry, key);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
if (ret == -ENOENT) {
- inode = afs_try_auto_mntpt(dentry, dir, &fid);
+ inode = afs_try_auto_mntpt(dentry, dir);
if (!IS_ERR(inode)) {
key_put(key);
goto success;
@@ -611,10 +885,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
_leave(" = %d [do]", ret);
return ERR_PTR(ret);
}
- dentry->d_fsdata = (void *)(unsigned long) vnode->status.data_version;
+ dentry->d_fsdata = (void *)(unsigned long)dvnode->status.data_version;
/* instantiate the dentry */
- inode = afs_iget(dir->i_sb, key, &fid, NULL, NULL, NULL);
key_put(key);
if (IS_ERR(inode)) {
_leave(" = %ld", PTR_ERR(inode));
@@ -623,9 +896,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
success:
d_add(dentry, inode);
- _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%u }",
- fid.vnode,
- fid.unique,
+ _leave(" = 0 { ino=%lu v=%u }",
d_inode(dentry)->i_ino,
d_inode(dentry)->i_generation);
@@ -633,67 +904,23 @@ success:
}
/*
- * Look up an entry in a dynroot directory.
- */
-static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
-{
- struct afs_vnode *vnode;
- struct afs_fid fid;
- struct inode *inode;
- int ret;
-
- vnode = AFS_FS_I(dir);
-
- _enter("%pd", dentry);
-
- ASSERTCMP(d_inode(dentry), ==, NULL);
-
- if (dentry->d_name.len >= AFSNAMEMAX) {
- _leave(" = -ENAMETOOLONG");
- return ERR_PTR(-ENAMETOOLONG);
- }
-
- inode = afs_try_auto_mntpt(dentry, dir, &fid);
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
- if (ret == -ENOENT) {
- d_add(dentry, NULL);
- _leave(" = NULL [negative]");
- return NULL;
- }
- _leave(" = %d [do]", ret);
- return ERR_PTR(ret);
- }
-
- d_add(dentry, inode);
- _leave(" = 0 { ino=%lu v=%u }",
- d_inode(dentry)->i_ino, d_inode(dentry)->i_generation);
- return NULL;
-}
-
-/*
* check that a dentry lookup hit has found a valid entry
* - NOTE! the hit can be a negative hit too, so we can't assume we have an
* inode
*/
static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
{
- struct afs_super_info *as = dentry->d_sb->s_fs_info;
struct afs_vnode *vnode, *dir;
struct afs_fid uninitialized_var(fid);
struct dentry *parent;
struct inode *inode;
struct key *key;
- void *dir_version;
+ long dir_version, de_version;
int ret;
if (flags & LOOKUP_RCU)
return -ECHILD;
- if (as->dyn_root)
- return 1;
-
if (d_really_is_positive(dentry)) {
vnode = AFS_FS_I(d_inode(dentry));
_enter("{v={%x:%u} n=%pd fl=%lx},",
@@ -729,14 +956,25 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
goto out_bad_parent;
}
- dir_version = (void *) (unsigned long) dir->status.data_version;
- if (dentry->d_fsdata == dir_version)
- goto out_valid; /* the dir contents are unchanged */
+ /* We only need to invalidate a dentry if the server's copy changed
+ * behind our back. If we made the change, it's no problem. Note that
+ * on a 32-bit system, we only have 32 bits in the dentry to store the
+ * version.
+ */
+ dir_version = (long)dir->status.data_version;
+ de_version = (long)dentry->d_fsdata;
+ if (de_version == dir_version)
+ goto out_valid;
+
+ dir_version = (long)dir->invalid_before;
+ if (de_version - dir_version >= 0)
+ goto out_valid;
_debug("dir modified");
+ afs_stat_v(dir, n_reval);
/* search the directory for this vnode */
- ret = afs_do_lookup(&dir->vfs_inode, dentry, &fid, key);
+ ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key);
switch (ret) {
case 0:
/* the filename maps to something */
@@ -789,7 +1027,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
}
out_valid:
- dentry->d_fsdata = dir_version;
+ dentry->d_fsdata = (void *)dir_version;
dput(parent);
key_put(key);
_leave(" = 1 [valid]");
@@ -840,7 +1078,7 @@ zap:
/*
* handle dentry release
*/
-static void afs_d_release(struct dentry *dentry)
+void afs_d_release(struct dentry *dentry)
{
_enter("%pd", dentry);
}
@@ -854,6 +1092,7 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
struct afs_file_status *newstatus,
struct afs_callback *newcb)
{
+ struct afs_vnode *vnode;
struct inode *inode;
if (fc->ac.error < 0)
@@ -871,6 +1110,8 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
return;
}
+ vnode = AFS_FS_I(inode);
+ set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
d_add(new_dentry, inode);
}
@@ -885,6 +1126,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_fid newfid;
struct key *key;
+ u64 data_version = dvnode->status.data_version;
int ret;
mode |= S_IFDIR;
@@ -902,7 +1144,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (afs_begin_vnode_operation(&fc, dvnode, key)) {
while (afs_select_fileserver(&fc)) {
fc.cb_break = dvnode->cb_break + dvnode->cb_s_break;
- afs_fs_create(&fc, dentry->d_name.name, mode,
+ afs_fs_create(&fc, dentry->d_name.name, mode, data_version,
&newfid, &newstatus, &newcb);
}
@@ -916,6 +1158,11 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
goto error_key;
}
+ if (ret == 0 &&
+ test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ afs_edit_dir_add(dvnode, &dentry->d_name, &newfid,
+ afs_edit_dir_for_create);
+
key_put(key);
_leave(" = 0");
return 0;
@@ -939,6 +1186,7 @@ static void afs_dir_remove_subdir(struct dentry *dentry)
clear_nlink(&vnode->vfs_inode);
set_bit(AFS_VNODE_DELETED, &vnode->flags);
clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
}
}
@@ -950,6 +1198,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
struct afs_fs_cursor fc;
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct key *key;
+ u64 data_version = dvnode->status.data_version;
int ret;
_enter("{%x:%u},{%pd}",
@@ -965,13 +1214,18 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
if (afs_begin_vnode_operation(&fc, dvnode, key)) {
while (afs_select_fileserver(&fc)) {
fc.cb_break = dvnode->cb_break + dvnode->cb_s_break;
- afs_fs_remove(&fc, dentry->d_name.name, true);
+ afs_fs_remove(&fc, dentry->d_name.name, true,
+ data_version);
}
afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
ret = afs_end_vnode_operation(&fc);
- if (ret == 0)
+ if (ret == 0) {
afs_dir_remove_subdir(dentry);
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ afs_edit_dir_remove(dvnode, &dentry->d_name,
+ afs_edit_dir_for_rmdir);
+ }
}
key_put(key);
@@ -1036,6 +1290,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct key *key;
unsigned long d_version = (unsigned long)dentry->d_fsdata;
+ u64 data_version = dvnode->status.data_version;
int ret;
_enter("{%x:%u},{%pd}",
@@ -1062,7 +1317,8 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
if (afs_begin_vnode_operation(&fc, dvnode, key)) {
while (afs_select_fileserver(&fc)) {
fc.cb_break = dvnode->cb_break + dvnode->cb_s_break;
- afs_fs_remove(&fc, dentry->d_name.name, false);
+ afs_fs_remove(&fc, dentry->d_name.name, false,
+ data_version);
}
afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
@@ -1071,6 +1327,10 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
ret = afs_dir_remove_link(
dentry, key, d_version,
(unsigned long)dvnode->status.data_version);
+ if (ret == 0 &&
+ test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ afs_edit_dir_remove(dvnode, &dentry->d_name,
+ afs_edit_dir_for_unlink);
}
error_key:
@@ -1092,6 +1352,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_fid newfid;
struct key *key;
+ u64 data_version = dvnode->status.data_version;
int ret;
mode |= S_IFREG;
@@ -1113,7 +1374,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
if (afs_begin_vnode_operation(&fc, dvnode, key)) {
while (afs_select_fileserver(&fc)) {
fc.cb_break = dvnode->cb_break + dvnode->cb_s_break;
- afs_fs_create(&fc, dentry->d_name.name, mode,
+ afs_fs_create(&fc, dentry->d_name.name, mode, data_version,
&newfid, &newstatus, &newcb);
}
@@ -1127,6 +1388,10 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
goto error_key;
}
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ afs_edit_dir_add(dvnode, &dentry->d_name, &newfid,
+ afs_edit_dir_for_create);
+
key_put(key);
_leave(" = 0");
return 0;
@@ -1148,10 +1413,12 @@ static int afs_link(struct dentry *from, struct inode *dir,
struct afs_fs_cursor fc;
struct afs_vnode *dvnode, *vnode;
struct key *key;
+ u64 data_version;
int ret;
vnode = AFS_FS_I(d_inode(from));
dvnode = AFS_FS_I(dir);
+ data_version = dvnode->status.data_version;
_enter("{%x:%u},{%x:%u},{%pd}",
vnode->fid.vid, vnode->fid.vnode,
@@ -1178,7 +1445,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
while (afs_select_fileserver(&fc)) {
fc.cb_break = dvnode->cb_break + dvnode->cb_s_break;
fc.cb_break_2 = vnode->cb_break + vnode->cb_s_break;
- afs_fs_link(&fc, vnode, dentry->d_name.name);
+ afs_fs_link(&fc, vnode, dentry->d_name.name, data_version);
}
afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
@@ -1194,6 +1461,10 @@ static int afs_link(struct dentry *from, struct inode *dir,
goto error_key;
}
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ afs_edit_dir_add(dvnode, &dentry->d_name, &vnode->fid,
+ afs_edit_dir_for_link);
+
key_put(key);
_leave(" = 0");
return 0;
@@ -1217,6 +1488,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_fid newfid;
struct key *key;
+ u64 data_version = dvnode->status.data_version;
int ret;
_enter("{%x:%u},{%pd},%s",
@@ -1241,7 +1513,8 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
if (afs_begin_vnode_operation(&fc, dvnode, key)) {
while (afs_select_fileserver(&fc)) {
fc.cb_break = dvnode->cb_break + dvnode->cb_s_break;
- afs_fs_symlink(&fc, dentry->d_name.name, content,
+ afs_fs_symlink(&fc, dentry->d_name.name,
+ content, data_version,
&newfid, &newstatus);
}
@@ -1255,6 +1528,10 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
goto error_key;
}
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ afs_edit_dir_add(dvnode, &dentry->d_name, &newfid,
+ afs_edit_dir_for_symlink);
+
key_put(key);
_leave(" = 0");
return 0;
@@ -1277,6 +1554,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct afs_fs_cursor fc;
struct afs_vnode *orig_dvnode, *new_dvnode, *vnode;
struct key *key;
+ u64 orig_data_version, new_data_version;
+ bool new_negative = d_is_negative(new_dentry);
int ret;
if (flags)
@@ -1285,6 +1564,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
vnode = AFS_FS_I(d_inode(old_dentry));
orig_dvnode = AFS_FS_I(old_dir);
new_dvnode = AFS_FS_I(new_dir);
+ orig_data_version = orig_dvnode->status.data_version;
+ new_data_version = new_dvnode->status.data_version;
_enter("{%x:%u},{%x:%u},{%x:%u},{%pd}",
orig_dvnode->fid.vid, orig_dvnode->fid.vnode,
@@ -1310,7 +1591,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
fc.cb_break = orig_dvnode->cb_break + orig_dvnode->cb_s_break;
fc.cb_break_2 = new_dvnode->cb_break + new_dvnode->cb_s_break;
afs_fs_rename(&fc, old_dentry->d_name.name,
- new_dvnode, new_dentry->d_name.name);
+ new_dvnode, new_dentry->d_name.name,
+ orig_data_version, new_data_version);
}
afs_vnode_commit_status(&fc, orig_dvnode, fc.cb_break);
@@ -1322,9 +1604,68 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto error_key;
}
+ if (ret == 0) {
+ if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags))
+ afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name,
+ afs_edit_dir_for_rename);
+
+ if (!new_negative &&
+ test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags))
+ afs_edit_dir_remove(new_dvnode, &new_dentry->d_name,
+ afs_edit_dir_for_rename);
+
+ if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags))
+ afs_edit_dir_add(new_dvnode, &new_dentry->d_name,
+ &vnode->fid, afs_edit_dir_for_rename);
+ }
+
error_key:
key_put(key);
error:
_leave(" = %d", ret);
return ret;
}
+
+/*
+ * Release a directory page and clean up its private state if it's not busy
+ * - return true if the page can now be released, false if not
+ */
+static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags)
+{
+ struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host);
+
+ _enter("{{%x:%u}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, page->index);
+
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+
+ /* The directory will need reloading. */
+ if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ afs_stat_v(dvnode, n_relpg);
+ return 1;
+}
+
+/*
+ * invalidate part or all of a page
+ * - release a page and clean up its private data if offset is 0 (indicating
+ * the entire page)
+ */
+static void afs_dir_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int length)
+{
+ struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host);
+
+ _enter("{%lu},%u,%u", page->index, offset, length);
+
+ BUG_ON(!PageLocked(page));
+
+ /* The directory will need reloading. */
+ if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ afs_stat_v(dvnode, n_inval);
+
+ /* we clean up only if the entire page is being invalidated */
+ if (offset == 0 && length == PAGE_SIZE) {
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+ }
+}
diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
new file mode 100644
index 000000000000..8b400f5aead5
--- /dev/null
+++ b/fs/afs/dir_edit.c
@@ -0,0 +1,505 @@
+/* AFS filesystem directory editing
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/pagemap.h>
+#include <linux/iversion.h>
+#include "internal.h"
+#include "xdr_fs.h"
+
+/*
+ * Find a number of contiguous clear bits in a directory block bitmask.
+ *
+ * There are 64 slots, which means we can load the entire bitmap into a
+ * variable. The first bit doesn't count as it corresponds to the block header
+ * slot. nr_slots is between 1 and 9.
+ */
+static int afs_find_contig_bits(union afs_xdr_dir_block *block, unsigned int nr_slots)
+{
+ u64 bitmap;
+ u32 mask;
+ int bit, n;
+
+ bitmap = (u64)block->hdr.bitmap[0] << 0 * 8;
+ bitmap |= (u64)block->hdr.bitmap[1] << 1 * 8;
+ bitmap |= (u64)block->hdr.bitmap[2] << 2 * 8;
+ bitmap |= (u64)block->hdr.bitmap[3] << 3 * 8;
+ bitmap |= (u64)block->hdr.bitmap[4] << 4 * 8;
+ bitmap |= (u64)block->hdr.bitmap[5] << 5 * 8;
+ bitmap |= (u64)block->hdr.bitmap[6] << 6 * 8;
+ bitmap |= (u64)block->hdr.bitmap[7] << 7 * 8;
+ bitmap >>= 1; /* The first entry is metadata */
+ bit = 1;
+ mask = (1 << nr_slots) - 1;
+
+ do {
+ if (sizeof(unsigned long) == 8)
+ n = ffz(bitmap);
+ else
+ n = ((u32)bitmap) != 0 ?
+ ffz((u32)bitmap) :
+ ffz((u32)(bitmap >> 32)) + 32;
+ bitmap >>= n;
+ bit += n;
+
+ if ((bitmap & mask) == 0) {
+ if (bit > 64 - nr_slots)
+ return -1;
+ return bit;
+ }
+
+ n = __ffs(bitmap);
+ bitmap >>= n;
+ bit += n;
+ } while (bitmap);
+
+ return -1;
+}
+
+/*
+ * Set a number of contiguous bits in the directory block bitmap.
+ */
+static void afs_set_contig_bits(union afs_xdr_dir_block *block,
+ int bit, unsigned int nr_slots)
+{
+ u64 mask, before, after;
+
+ mask = (1 << nr_slots) - 1;
+ mask <<= bit;
+
+ before = *(u64 *)block->hdr.bitmap;
+
+ block->hdr.bitmap[0] |= (u8)(mask >> 0 * 8);
+ block->hdr.bitmap[1] |= (u8)(mask >> 1 * 8);
+ block->hdr.bitmap[2] |= (u8)(mask >> 2 * 8);
+ block->hdr.bitmap[3] |= (u8)(mask >> 3 * 8);
+ block->hdr.bitmap[4] |= (u8)(mask >> 4 * 8);
+ block->hdr.bitmap[5] |= (u8)(mask >> 5 * 8);
+ block->hdr.bitmap[6] |= (u8)(mask >> 6 * 8);
+ block->hdr.bitmap[7] |= (u8)(mask >> 7 * 8);
+
+ after = *(u64 *)block->hdr.bitmap;
+}
+
+/*
+ * Clear a number of contiguous bits in the directory block bitmap.
+ */
+static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
+ int bit, unsigned int nr_slots)
+{
+ u64 mask, before, after;
+
+ mask = (1 << nr_slots) - 1;
+ mask <<= bit;
+
+ before = *(u64 *)block->hdr.bitmap;
+
+ block->hdr.bitmap[0] &= ~(u8)(mask >> 0 * 8);
+ block->hdr.bitmap[1] &= ~(u8)(mask >> 1 * 8);
+ block->hdr.bitmap[2] &= ~(u8)(mask >> 2 * 8);
+ block->hdr.bitmap[3] &= ~(u8)(mask >> 3 * 8);
+ block->hdr.bitmap[4] &= ~(u8)(mask >> 4 * 8);
+ block->hdr.bitmap[5] &= ~(u8)(mask >> 5 * 8);
+ block->hdr.bitmap[6] &= ~(u8)(mask >> 6 * 8);
+ block->hdr.bitmap[7] &= ~(u8)(mask >> 7 * 8);
+
+ after = *(u64 *)block->hdr.bitmap;
+}
+
+/*
+ * Scan a directory block looking for a dirent of the right name.
+ */
+static int afs_dir_scan_block(union afs_xdr_dir_block *block, struct qstr *name,
+ unsigned int blocknum)
+{
+ union afs_xdr_dirent *de;
+ u64 bitmap;
+ int d, len, n;
+
+ _enter("");
+
+ bitmap = (u64)block->hdr.bitmap[0] << 0 * 8;
+ bitmap |= (u64)block->hdr.bitmap[1] << 1 * 8;
+ bitmap |= (u64)block->hdr.bitmap[2] << 2 * 8;
+ bitmap |= (u64)block->hdr.bitmap[3] << 3 * 8;
+ bitmap |= (u64)block->hdr.bitmap[4] << 4 * 8;
+ bitmap |= (u64)block->hdr.bitmap[5] << 5 * 8;
+ bitmap |= (u64)block->hdr.bitmap[6] << 6 * 8;
+ bitmap |= (u64)block->hdr.bitmap[7] << 7 * 8;
+
+ for (d = (blocknum == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS);
+ d < AFS_DIR_SLOTS_PER_BLOCK;
+ d++) {
+ if (!((bitmap >> d) & 1))
+ continue;
+ de = &block->dirents[d];
+ if (de->u.valid != 1)
+ continue;
+
+ /* The block was NUL-terminated by afs_dir_check_page(). */
+ len = strlen(de->u.name);
+ if (len == name->len &&
+ memcmp(de->u.name, name->name, name->len) == 0)
+ return d;
+
+ n = round_up(12 + len + 1 + 4, AFS_DIR_DIRENT_SIZE);
+ n /= AFS_DIR_DIRENT_SIZE;
+ d += n - 1;
+ }
+
+ return -1;
+}
+
+/*
+ * Initialise a new directory block. Note that block 0 is special and contains
+ * some extra metadata.
+ */
+static void afs_edit_init_block(union afs_xdr_dir_block *meta,
+ union afs_xdr_dir_block *block, int block_num)
+{
+ memset(block, 0, sizeof(*block));
+ block->hdr.npages = htons(1);
+ block->hdr.magic = AFS_DIR_MAGIC;
+ block->hdr.bitmap[0] = 1;
+
+ if (block_num == 0) {
+ block->hdr.bitmap[0] = 0xff;
+ block->hdr.bitmap[1] = 0x1f;
+ memset(block->meta.alloc_ctrs,
+ AFS_DIR_SLOTS_PER_BLOCK,
+ sizeof(block->meta.alloc_ctrs));
+ meta->meta.alloc_ctrs[0] =
+ AFS_DIR_SLOTS_PER_BLOCK - AFS_DIR_RESV_BLOCKS0;
+ }
+
+ if (block_num < AFS_DIR_BLOCKS_WITH_CTR)
+ meta->meta.alloc_ctrs[block_num] =
+ AFS_DIR_SLOTS_PER_BLOCK - AFS_DIR_RESV_BLOCKS;
+}
+
+/*
+ * Edit a directory's file data to add a new directory entry. Doing this after
+ * create, mkdir, symlink, link or rename if the data version number is
+ * incremented by exactly one avoids the need to re-download the entire
+ * directory contents.
+ *
+ * The caller must hold the inode locked.
+ */
+void afs_edit_dir_add(struct afs_vnode *vnode,
+ struct qstr *name, struct afs_fid *new_fid,
+ enum afs_edit_dir_reason why)
+{
+ union afs_xdr_dir_block *meta, *block;
+ struct afs_xdr_dir_page *meta_page, *dir_page;
+ union afs_xdr_dirent *de;
+ struct page *page0, *page;
+ unsigned int need_slots, nr_blocks, b;
+ pgoff_t index;
+ loff_t i_size;
+ gfp_t gfp;
+ int slot;
+
+ _enter(",,{%d,%s},", name->len, name->name);
+
+ i_size = i_size_read(&vnode->vfs_inode);
+ if (i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
+ (i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ return;
+ }
+
+ gfp = vnode->vfs_inode.i_mapping->gfp_mask;
+ page0 = find_or_create_page(vnode->vfs_inode.i_mapping, 0, gfp);
+ if (!page0) {
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ _leave(" [fgp]");
+ return;
+ }
+
+ /* Work out how many slots we're going to need. */
+ need_slots = round_up(12 + name->len + 1 + 4, AFS_DIR_DIRENT_SIZE);
+ need_slots /= AFS_DIR_DIRENT_SIZE;
+
+ meta_page = kmap(page0);
+ meta = &meta_page->blocks[0];
+ if (i_size == 0)
+ goto new_directory;
+ nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
+
+ /* Find a block that has sufficient slots available. Each VM page
+ * contains two or more directory blocks.
+ */
+ for (b = 0; b < nr_blocks + 1; b++) {
+ /* If the directory extended into a new page, then we need to
+ * tack a new page on the end.
+ */
+ index = b / AFS_DIR_BLOCKS_PER_PAGE;
+ if (index == 0) {
+ page = page0;
+ dir_page = meta_page;
+ } else {
+ if (nr_blocks >= AFS_DIR_MAX_BLOCKS)
+ goto error;
+ gfp = vnode->vfs_inode.i_mapping->gfp_mask;
+ page = find_or_create_page(vnode->vfs_inode.i_mapping,
+ index, gfp);
+ if (!page)
+ goto error;
+ if (!PagePrivate(page)) {
+ set_page_private(page, 1);
+ SetPagePrivate(page);
+ }
+ dir_page = kmap(page);
+ }
+
+ /* Abandon the edit if we got a callback break. */
+ if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
+ goto invalidated;
+
+ block = &dir_page->blocks[b % AFS_DIR_BLOCKS_PER_PAGE];
+
+ _debug("block %u: %2u %3u %u",
+ b,
+ (b < AFS_DIR_BLOCKS_WITH_CTR) ? meta->meta.alloc_ctrs[b] : 99,
+ ntohs(block->hdr.npages),
+ ntohs(block->hdr.magic));
+
+ /* Initialise the block if necessary. */
+ if (b == nr_blocks) {
+ _debug("init %u", b);
+ afs_edit_init_block(meta, block, b);
+ i_size_write(&vnode->vfs_inode, (b + 1) * AFS_DIR_BLOCK_SIZE);
+ }
+
+ /* Only lower dir pages have a counter in the header. */
+ if (b >= AFS_DIR_BLOCKS_WITH_CTR ||
+ meta->meta.alloc_ctrs[b] >= need_slots) {
+ /* We need to try and find one or more consecutive
+ * slots to hold the entry.
+ */
+ slot = afs_find_contig_bits(block, need_slots);
+ if (slot >= 0) {
+ _debug("slot %u", slot);
+ goto found_space;
+ }
+ }
+
+ if (page != page0) {
+ unlock_page(page);
+ kunmap(page);
+ put_page(page);
+ }
+ }
+
+ /* There are no spare slots of sufficient size, yet the operation
+ * succeeded. Download the directory again.
+ */
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_create_nospc, 0, 0, 0, 0, name->name);
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ goto out_unmap;
+
+new_directory:
+ afs_edit_init_block(meta, meta, 0);
+ i_size = AFS_DIR_BLOCK_SIZE;
+ i_size_write(&vnode->vfs_inode, i_size);
+ slot = AFS_DIR_RESV_BLOCKS0;
+ page = page0;
+ block = meta;
+ nr_blocks = 1;
+ b = 0;
+
+found_space:
+ /* Set the dirent slot. */
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_create, b, slot,
+ new_fid->vnode, new_fid->unique, name->name);
+ de = &block->dirents[slot];
+ de->u.valid = 1;
+ de->u.unused[0] = 0;
+ de->u.hash_next = 0; // TODO: Really need to maintain this
+ de->u.vnode = htonl(new_fid->vnode);
+ de->u.unique = htonl(new_fid->unique);
+ memcpy(de->u.name, name->name, name->len + 1);
+ de->u.name[name->len] = 0;
+
+ /* Adjust the bitmap. */
+ afs_set_contig_bits(block, slot, need_slots);
+ if (page != page0) {
+ unlock_page(page);
+ kunmap(page);
+ put_page(page);
+ }
+
+ /* Adjust the allocation counter. */
+ if (b < AFS_DIR_BLOCKS_WITH_CTR)
+ meta->meta.alloc_ctrs[b] -= need_slots;
+
+ inode_inc_iversion_raw(&vnode->vfs_inode);
+ afs_stat_v(vnode, n_dir_cr);
+ _debug("Insert %s in %u[%u]", name->name, b, slot);
+
+out_unmap:
+ unlock_page(page0);
+ kunmap(page0);
+ put_page(page0);
+ _leave("");
+ return;
+
+invalidated:
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_create_inval, 0, 0, 0, 0, name->name);
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ if (page != page0) {
+ kunmap(page);
+ put_page(page);
+ }
+ goto out_unmap;
+
+error:
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_create_error, 0, 0, 0, 0, name->name);
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ goto out_unmap;
+}
+
+/*
+ * Edit a directory's file data to remove a new directory entry. Doing this
+ * after unlink, rmdir or rename if the data version number is incremented by
+ * exactly one avoids the need to re-download the entire directory contents.
+ *
+ * The caller must hold the inode locked.
+ */
+void afs_edit_dir_remove(struct afs_vnode *vnode,
+ struct qstr *name, enum afs_edit_dir_reason why)
+{
+ struct afs_xdr_dir_page *meta_page, *dir_page;
+ union afs_xdr_dir_block *meta, *block;
+ union afs_xdr_dirent *de;
+ struct page *page0, *page;
+ unsigned int need_slots, nr_blocks, b;
+ pgoff_t index;
+ loff_t i_size;
+ int slot;
+
+ _enter(",,{%d,%s},", name->len, name->name);
+
+ i_size = i_size_read(&vnode->vfs_inode);
+ if (i_size < AFS_DIR_BLOCK_SIZE ||
+ i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
+ (i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ return;
+ }
+ nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
+
+ page0 = find_lock_page(vnode->vfs_inode.i_mapping, 0);
+ if (!page0) {
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ _leave(" [fgp]");
+ return;
+ }
+
+ /* Work out how many slots we're going to discard. */
+ need_slots = round_up(12 + name->len + 1 + 4, AFS_DIR_DIRENT_SIZE);
+ need_slots /= AFS_DIR_DIRENT_SIZE;
+
+ meta_page = kmap(page0);
+ meta = &meta_page->blocks[0];
+
+ /* Find a page that has sufficient slots available. Each VM page
+ * contains two or more directory blocks.
+ */
+ for (b = 0; b < nr_blocks; b++) {
+ index = b / AFS_DIR_BLOCKS_PER_PAGE;
+ if (index != 0) {
+ page = find_lock_page(vnode->vfs_inode.i_mapping, index);
+ if (!page)
+ goto error;
+ dir_page = kmap(page);
+ } else {
+ page = page0;
+ dir_page = meta_page;
+ }
+
+ /* Abandon the edit if we got a callback break. */
+ if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
+ goto invalidated;
+
+ block = &dir_page->blocks[b % AFS_DIR_BLOCKS_PER_PAGE];
+
+ if (b > AFS_DIR_BLOCKS_WITH_CTR ||
+ meta->meta.alloc_ctrs[b] <= AFS_DIR_SLOTS_PER_BLOCK - 1 - need_slots) {
+ slot = afs_dir_scan_block(block, name, b);
+ if (slot >= 0)
+ goto found_dirent;
+ }
+
+ if (page != page0) {
+ unlock_page(page);
+ kunmap(page);
+ put_page(page);
+ }
+ }
+
+ /* Didn't find the dirent to clobber. Download the directory again. */
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_noent,
+ 0, 0, 0, 0, name->name);
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ goto out_unmap;
+
+found_dirent:
+ de = &block->dirents[slot];
+
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_delete, b, slot,
+ ntohl(de->u.vnode), ntohl(de->u.unique),
+ name->name);
+
+ memset(de, 0, sizeof(*de) * need_slots);
+
+ /* Adjust the bitmap. */
+ afs_clear_contig_bits(block, slot, need_slots);
+ if (page != page0) {
+ unlock_page(page);
+ kunmap(page);
+ put_page(page);
+ }
+
+ /* Adjust the allocation counter. */
+ if (b < AFS_DIR_BLOCKS_WITH_CTR)
+ meta->meta.alloc_ctrs[b] += need_slots;
+
+ inode_set_iversion_raw(&vnode->vfs_inode, vnode->status.data_version);
+ afs_stat_v(vnode, n_dir_rm);
+ _debug("Remove %s from %u[%u]", name->name, b, slot);
+
+out_unmap:
+ unlock_page(page0);
+ kunmap(page0);
+ put_page(page0);
+ _leave("");
+ return;
+
+invalidated:
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_inval,
+ 0, 0, 0, 0, name->name);
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ if (page != page0) {
+ unlock_page(page);
+ kunmap(page);
+ put_page(page);
+ }
+ goto out_unmap;
+
+error:
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_error,
+ 0, 0, 0, 0, name->name);
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ goto out_unmap;
+}
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
new file mode 100644
index 000000000000..983f3946ab57
--- /dev/null
+++ b/fs/afs/dynroot.c
@@ -0,0 +1,209 @@
+/* dir.c: AFS dynamic root handling
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/dns_resolver.h>
+#include "internal.h"
+
+const struct file_operations afs_dynroot_file_operations = {
+ .open = dcache_dir_open,
+ .release = dcache_dir_close,
+ .iterate_shared = dcache_readdir,
+ .llseek = dcache_dir_lseek,
+};
+
+/*
+ * Probe to see if a cell may exist. This prevents positive dentries from
+ * being created unnecessarily.
+ */
+static int afs_probe_cell_name(struct dentry *dentry)
+{
+ struct afs_cell *cell;
+ const char *name = dentry->d_name.name;
+ size_t len = dentry->d_name.len;
+ int ret;
+
+ /* Names prefixed with a dot are R/W mounts. */
+ if (name[0] == '.') {
+ if (len == 1)
+ return -EINVAL;
+ name++;
+ len--;
+ }
+
+ cell = afs_lookup_cell_rcu(afs_d2net(dentry), name, len);
+ if (!IS_ERR(cell)) {
+ afs_put_cell(afs_d2net(dentry), cell);
+ return 0;
+ }
+
+ ret = dns_query("afsdb", name, len, "ipv4", NULL, NULL);
+ if (ret == -ENODATA)
+ ret = -EDESTADDRREQ;
+ return ret;
+}
+
+/*
+ * Try to auto mount the mountpoint with pseudo directory, if the autocell
+ * operation is setted.
+ */
+struct inode *afs_try_auto_mntpt(struct dentry *dentry, struct inode *dir)
+{
+ struct afs_vnode *vnode = AFS_FS_I(dir);
+ struct inode *inode;
+ int ret = -ENOENT;
+
+ _enter("%p{%pd}, {%x:%u}",
+ dentry, dentry, vnode->fid.vid, vnode->fid.vnode);
+
+ if (!test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
+ goto out;
+
+ ret = afs_probe_cell_name(dentry);
+ if (ret < 0)
+ goto out;
+
+ inode = afs_iget_pseudo_dir(dir->i_sb, false);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ goto out;
+ }
+
+ _leave("= %p", inode);
+ return inode;
+
+out:
+ _leave("= %d", ret);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Look up @cell in a dynroot directory. This is a substitution for the
+ * local cell name for the net namespace.
+ */
+static struct dentry *afs_lookup_atcell(struct dentry *dentry)
+{
+ struct afs_cell *cell;
+ struct afs_net *net = afs_d2net(dentry);
+ struct dentry *ret;
+ unsigned int seq = 0;
+ char *name;
+ int len;
+
+ if (!net->ws_cell)
+ return ERR_PTR(-ENOENT);
+
+ ret = ERR_PTR(-ENOMEM);
+ name = kmalloc(AFS_MAXCELLNAME + 1, GFP_KERNEL);
+ if (!name)
+ goto out_p;
+
+ rcu_read_lock();
+ do {
+ read_seqbegin_or_lock(&net->cells_lock, &seq);
+ cell = rcu_dereference_raw(net->ws_cell);
+ if (cell) {
+ len = cell->name_len;
+ memcpy(name, cell->name, len + 1);
+ }
+ } while (need_seqretry(&net->cells_lock, seq));
+ done_seqretry(&net->cells_lock, seq);
+ rcu_read_unlock();
+
+ ret = ERR_PTR(-ENOENT);
+ if (!cell)
+ goto out_n;
+
+ ret = lookup_one_len(name, dentry->d_parent, len);
+
+ /* We don't want to d_add() the @cell dentry here as we don't want to
+ * the cached dentry to hide changes to the local cell name.
+ */
+
+out_n:
+ kfree(name);
+out_p:
+ return ret;
+}
+
+/*
+ * Look up an entry in a dynroot directory.
+ */
+static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ int ret;
+
+ vnode = AFS_FS_I(dir);
+
+ _enter("%pd", dentry);
+
+ ASSERTCMP(d_inode(dentry), ==, NULL);
+
+ if (dentry->d_name.len >= AFSNAMEMAX) {
+ _leave(" = -ENAMETOOLONG");
+ return ERR_PTR(-ENAMETOOLONG);
+ }
+
+ if (dentry->d_name.len == 5 &&
+ memcmp(dentry->d_name.name, "@cell", 5) == 0)
+ return afs_lookup_atcell(dentry);
+
+ inode = afs_try_auto_mntpt(dentry, dir);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ if (ret == -ENOENT) {
+ d_add(dentry, NULL);
+ _leave(" = NULL [negative]");
+ return NULL;
+ }
+ _leave(" = %d [do]", ret);
+ return ERR_PTR(ret);
+ }
+
+ d_add(dentry, inode);
+ _leave(" = 0 { ino=%lu v=%u }",
+ d_inode(dentry)->i_ino, d_inode(dentry)->i_generation);
+ return NULL;
+}
+
+const struct inode_operations afs_dynroot_inode_operations = {
+ .lookup = afs_dynroot_lookup,
+};
+
+/*
+ * Dirs in the dynamic root don't need revalidation.
+ */
+static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ return 1;
+}
+
+/*
+ * Allow the VFS to enquire as to whether a dentry should be unhashed (mustn't
+ * sleep)
+ * - called from dput() when d_count is going to 0.
+ * - return 1 to request dentry be unhashed, 0 otherwise
+ */
+static int afs_dynroot_d_delete(const struct dentry *dentry)
+{
+ return d_really_is_positive(dentry);
+}
+
+const struct dentry_operations afs_dynroot_dentry_operations = {
+ .d_revalidate = afs_dynroot_d_revalidate,
+ .d_delete = afs_dynroot_d_delete,
+ .d_release = afs_d_release,
+ .d_automount = afs_d_automount,
+};
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 79e665a35fea..c24c08016dd9 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -30,7 +30,6 @@ static int afs_readpages(struct file *filp, struct address_space *mapping,
const struct file_operations afs_file_operations = {
.open = afs_open,
- .flush = afs_flush,
.release = afs_release,
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
@@ -146,6 +145,9 @@ int afs_open(struct inode *inode, struct file *file)
if (ret < 0)
goto error_af;
}
+
+ if (file->f_flags & O_TRUNC)
+ set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
file->private_data = af;
_leave(" = 0");
@@ -170,6 +172,9 @@ int afs_release(struct inode *inode, struct file *file)
_enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
+ if ((file->f_mode & FMODE_WRITE))
+ return vfs_fsync(file, 0);
+
file->private_data = NULL;
if (af->wb)
afs_put_wb_key(af->wb);
@@ -187,10 +192,12 @@ void afs_put_read(struct afs_read *req)
{
int i;
- if (atomic_dec_and_test(&req->usage)) {
+ if (refcount_dec_and_test(&req->usage)) {
for (i = 0; i < req->nr_pages; i++)
if (req->pages[i])
put_page(req->pages[i]);
+ if (req->pages != req->array)
+ kfree(req->pages);
kfree(req);
}
}
@@ -240,6 +247,12 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de
ret = afs_end_vnode_operation(&fc);
}
+ if (ret == 0) {
+ afs_stat_v(vnode, n_fetches);
+ atomic_long_add(desc->actual_len,
+ &afs_v2net(vnode)->n_fetch_bytes);
+ }
+
_leave(" = %d", ret);
return ret;
}
@@ -297,10 +310,11 @@ int afs_page_filler(void *data, struct page *page)
* end of the file, the server will return a short read and the
* unmarshalling code will clear the unfilled space.
*/
- atomic_set(&req->usage, 1);
+ refcount_set(&req->usage, 1);
req->pos = (loff_t)page->index << PAGE_SHIFT;
req->len = PAGE_SIZE;
req->nr_pages = 1;
+ req->pages = req->array;
req->pages[0] = page;
get_page(page);
@@ -309,10 +323,6 @@ int afs_page_filler(void *data, struct page *page)
ret = afs_fetch_data(vnode, key, req);
afs_put_read(req);
- if (ret >= 0 && S_ISDIR(inode->i_mode) &&
- !afs_dir_check_page(inode, page))
- ret = -EIO;
-
if (ret < 0) {
if (ret == -ENOENT) {
_debug("got NOENT from server"
@@ -447,10 +457,11 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping,
if (!req)
return -ENOMEM;
- atomic_set(&req->usage, 1);
+ refcount_set(&req->usage, 1);
req->page_done = afs_readpages_page_done;
req->pos = first->index;
req->pos <<= PAGE_SHIFT;
+ req->pages = req->array;
/* Transfer the pages to the request. We add them in until one fails
* to add to the LRU and then we stop (as that'll make a hole in the
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index c40ba2fe3cbe..7a0e017070ec 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -613,7 +613,7 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl)
posix_test_lock(file, fl);
if (fl->fl_type == F_UNLCK) {
/* no local locks; consult the server */
- ret = afs_fetch_status(vnode, key);
+ ret = afs_fetch_status(vnode, key, false);
if (ret < 0)
goto error;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 88ec38c2d83c..efacdb7c1dee 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -16,6 +16,7 @@
#include <linux/iversion.h>
#include "internal.h"
#include "afs_fs.h"
+#include "xdr_fs.h"
static const struct afs_fid afs_zero_fid;
@@ -44,109 +45,194 @@ static void xdr_decode_AFSFid(const __be32 **_bp, struct afs_fid *fid)
}
/*
- * decode an AFSFetchStatus block
+ * Dump a bad file status record.
*/
-static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
- struct afs_file_status *status,
- struct afs_vnode *vnode,
- afs_dataversion_t *store_version)
+static void xdr_dump_bad(const __be32 *bp)
{
- afs_dataversion_t expected_version;
- const __be32 *bp = *_bp;
+ __be32 x[4];
+ int i;
+
+ pr_notice("AFS XDR: Bad status record\n");
+ for (i = 0; i < 5 * 4 * 4; i += 16) {
+ memcpy(x, bp, 16);
+ bp += 4;
+ pr_notice("%03x: %08x %08x %08x %08x\n",
+ i, ntohl(x[0]), ntohl(x[1]), ntohl(x[2]), ntohl(x[3]));
+ }
+
+ memcpy(x, bp, 4);
+ pr_notice("0x50: %08x\n", ntohl(x[0]));
+}
+
+/*
+ * Update the core inode struct from a returned status record.
+ */
+void afs_update_inode_from_status(struct afs_vnode *vnode,
+ struct afs_file_status *status,
+ const afs_dataversion_t *expected_version,
+ u8 flags)
+{
+ struct timespec t;
umode_t mode;
+
+ t.tv_sec = status->mtime_client;
+ t.tv_nsec = 0;
+ vnode->vfs_inode.i_ctime = t;
+ vnode->vfs_inode.i_mtime = t;
+ vnode->vfs_inode.i_atime = t;
+
+ if (flags & (AFS_VNODE_META_CHANGED | AFS_VNODE_NOT_YET_SET)) {
+ vnode->vfs_inode.i_uid = make_kuid(&init_user_ns, status->owner);
+ vnode->vfs_inode.i_gid = make_kgid(&init_user_ns, status->group);
+ set_nlink(&vnode->vfs_inode, status->nlink);
+
+ mode = vnode->vfs_inode.i_mode;
+ mode &= ~S_IALLUGO;
+ mode |= status->mode;
+ barrier();
+ vnode->vfs_inode.i_mode = mode;
+ }
+
+ if (!(flags & AFS_VNODE_NOT_YET_SET)) {
+ if (expected_version &&
+ *expected_version != status->data_version) {
+ _debug("vnode modified %llx on {%x:%u} [exp %llx]",
+ (unsigned long long) status->data_version,
+ vnode->fid.vid, vnode->fid.vnode,
+ (unsigned long long) *expected_version);
+ vnode->invalid_before = status->data_version;
+ if (vnode->status.type == AFS_FTYPE_DIR) {
+ if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
+ afs_stat_v(vnode, n_inval);
+ } else {
+ set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
+ }
+ } else if (vnode->status.type == AFS_FTYPE_DIR) {
+ /* Expected directory change is handled elsewhere so
+ * that we can locally edit the directory and save on a
+ * download.
+ */
+ if (test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
+ flags &= ~AFS_VNODE_DATA_CHANGED;
+ }
+ }
+
+ if (flags & (AFS_VNODE_DATA_CHANGED | AFS_VNODE_NOT_YET_SET)) {
+ inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
+ i_size_write(&vnode->vfs_inode, status->size);
+ }
+}
+
+/*
+ * decode an AFSFetchStatus block
+ */
+static int xdr_decode_AFSFetchStatus(struct afs_call *call,
+ const __be32 **_bp,
+ struct afs_file_status *status,
+ struct afs_vnode *vnode,
+ const afs_dataversion_t *expected_version,
+ struct afs_read *read_req)
+{
+ const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp;
u64 data_version, size;
- bool changed = false;
- kuid_t owner;
- kgid_t group;
+ u32 type, abort_code;
+ u8 flags = 0;
+ int ret;
if (vnode)
write_seqlock(&vnode->cb_lock);
-#define EXTRACT(DST) \
- do { \
- u32 x = ntohl(*bp++); \
- if (DST != x) \
- changed |= true; \
- DST = x; \
- } while (0)
-
- status->if_version = ntohl(*bp++);
- EXTRACT(status->type);
- EXTRACT(status->nlink);
- size = ntohl(*bp++);
- data_version = ntohl(*bp++);
- EXTRACT(status->author);
- owner = make_kuid(&init_user_ns, ntohl(*bp++));
- changed |= !uid_eq(owner, status->owner);
- status->owner = owner;
- EXTRACT(status->caller_access); /* call ticket dependent */
- EXTRACT(status->anon_access);
- EXTRACT(status->mode);
- bp++; /* parent.vnode */
- bp++; /* parent.unique */
- bp++; /* seg size */
- status->mtime_client = ntohl(*bp++);
- status->mtime_server = ntohl(*bp++);
- group = make_kgid(&init_user_ns, ntohl(*bp++));
- changed |= !gid_eq(group, status->group);
- status->group = group;
- bp++; /* sync counter */
- data_version |= (u64) ntohl(*bp++) << 32;
- EXTRACT(status->lock_count);
- size |= (u64) ntohl(*bp++) << 32;
- bp++; /* spare 4 */
- *_bp = bp;
+ if (xdr->if_version != htonl(AFS_FSTATUS_VERSION)) {
+ pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version));
+ goto bad;
+ }
- if (size != status->size) {
- status->size = size;
- changed |= true;
+ type = ntohl(xdr->type);
+ abort_code = ntohl(xdr->abort_code);
+ switch (type) {
+ case AFS_FTYPE_FILE:
+ case AFS_FTYPE_DIR:
+ case AFS_FTYPE_SYMLINK:
+ if (type != status->type &&
+ vnode &&
+ !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
+ pr_warning("Vnode %x:%x:%x changed type %u to %u\n",
+ vnode->fid.vid,
+ vnode->fid.vnode,
+ vnode->fid.unique,
+ status->type, type);
+ goto bad;
+ }
+ status->type = type;
+ break;
+ case AFS_FTYPE_INVALID:
+ if (abort_code != 0) {
+ status->abort_code = abort_code;
+ ret = 0;
+ goto out;
+ }
+ /* Fall through */
+ default:
+ goto bad;
}
- status->mode &= S_IALLUGO;
- _debug("vnode time %lx, %lx",
- status->mtime_client, status->mtime_server);
+#define EXTRACT_M(FIELD) \
+ do { \
+ u32 x = ntohl(xdr->FIELD); \
+ if (status->FIELD != x) { \
+ flags |= AFS_VNODE_META_CHANGED; \
+ status->FIELD = x; \
+ } \
+ } while (0)
- if (vnode) {
- if (changed && !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
- _debug("vnode changed");
- i_size_write(&vnode->vfs_inode, size);
- vnode->vfs_inode.i_uid = status->owner;
- vnode->vfs_inode.i_gid = status->group;
- vnode->vfs_inode.i_generation = vnode->fid.unique;
- set_nlink(&vnode->vfs_inode, status->nlink);
-
- mode = vnode->vfs_inode.i_mode;
- mode &= ~S_IALLUGO;
- mode |= status->mode;
- barrier();
- vnode->vfs_inode.i_mode = mode;
- }
+ EXTRACT_M(nlink);
+ EXTRACT_M(author);
+ EXTRACT_M(owner);
+ EXTRACT_M(caller_access); /* call ticket dependent */
+ EXTRACT_M(anon_access);
+ EXTRACT_M(mode);
+ EXTRACT_M(group);
+
+ status->mtime_client = ntohl(xdr->mtime_client);
+ status->mtime_server = ntohl(xdr->mtime_server);
+ status->lock_count = ntohl(xdr->lock_count);
+
+ size = (u64)ntohl(xdr->size_lo);
+ size |= (u64)ntohl(xdr->size_hi) << 32;
+ status->size = size;
+
+ data_version = (u64)ntohl(xdr->data_version_lo);
+ data_version |= (u64)ntohl(xdr->data_version_hi) << 32;
+ if (data_version != status->data_version) {
+ status->data_version = data_version;
+ flags |= AFS_VNODE_DATA_CHANGED;
+ }
- vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client;
- vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
- vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
- inode_set_iversion_raw(&vnode->vfs_inode, data_version);
+ if (read_req) {
+ read_req->data_version = data_version;
+ read_req->file_size = size;
}
- expected_version = status->data_version;
- if (store_version)
- expected_version = *store_version;
+ *_bp = (const void *)*_bp + sizeof(*xdr);
- if (expected_version != data_version) {
- status->data_version = data_version;
- if (vnode && !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
- _debug("vnode modified %llx on {%x:%u}",
- (unsigned long long) data_version,
- vnode->fid.vid, vnode->fid.vnode);
- set_bit(AFS_VNODE_DIR_MODIFIED, &vnode->flags);
- set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
- }
- } else if (store_version) {
- status->data_version = data_version;
+ if (vnode) {
+ if (test_bit(AFS_VNODE_UNSET, &vnode->flags))
+ flags |= AFS_VNODE_NOT_YET_SET;
+ afs_update_inode_from_status(vnode, status, expected_version,
+ flags);
}
+ ret = 0;
+
+out:
if (vnode)
write_sequnlock(&vnode->cb_lock);
+ return ret;
+
+bad:
+ xdr_dump_bad(*_bp);
+ ret = afs_protocol_error(call, -EBADMSG);
+ goto out;
}
/*
@@ -274,7 +360,7 @@ static void xdr_decode_AFSFetchVolumeStatus(const __be32 **_bp,
/*
* deliver reply data to an FS.FetchStatus
*/
-static int afs_deliver_fs_fetch_status(struct afs_call *call)
+static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call)
{
struct afs_vnode *vnode = call->reply[0];
const __be32 *bp;
@@ -288,7 +374,9 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
+ if (xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL) < 0)
+ return afs_protocol_error(call, -EBADMSG);
xdr_decode_AFSCallBack(call, vnode, &bp);
if (call->reply[1])
xdr_decode_AFSVolSync(&bp, call->reply[1]);
@@ -300,17 +388,18 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call)
/*
* FS.FetchStatus operation type
*/
-static const struct afs_call_type afs_RXFSFetchStatus = {
- .name = "FS.FetchStatus",
+static const struct afs_call_type afs_RXFSFetchStatus_vnode = {
+ .name = "FS.FetchStatus(vnode)",
.op = afs_FS_FetchStatus,
- .deliver = afs_deliver_fs_fetch_status,
+ .deliver = afs_deliver_fs_fetch_status_vnode,
.destructor = afs_flat_call_destructor,
};
/*
* fetch the status information for a file
*/
-int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsync)
+int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsync,
+ bool new_inode)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -320,7 +409,8 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
_enter(",%x,{%x:%u},,",
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
- call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4);
+ call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus_vnode,
+ 16, (21 + 3 + 6) * 4);
if (!call) {
fc->ac.error = -ENOMEM;
return -ENOMEM;
@@ -329,6 +419,7 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
call->key = fc->key;
call->reply[0] = vnode;
call->reply[1] = volsync;
+ call->expected_version = new_inode ? 1 : vnode->status.data_version;
/* marshall the parameters */
bp = call->request;
@@ -464,7 +555,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
+ if (xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode,
+ &vnode->status.data_version, req) < 0)
+ return afs_protocol_error(call, -EBADMSG);
xdr_decode_AFSCallBack(call, vnode, &bp);
if (call->reply[1])
xdr_decode_AFSVolSync(&bp, call->reply[1]);
@@ -534,6 +627,7 @@ static int afs_fs_fetch_data64(struct afs_fs_cursor *fc, struct afs_read *req)
call->reply[0] = vnode;
call->reply[1] = NULL; /* volsync */
call->reply[2] = req;
+ call->expected_version = vnode->status.data_version;
/* marshall the parameters */
bp = call->request;
@@ -546,7 +640,7 @@ static int afs_fs_fetch_data64(struct afs_fs_cursor *fc, struct afs_read *req)
bp[6] = 0;
bp[7] = htonl(lower_32_bits(req->len));
- atomic_inc(&req->usage);
+ refcount_inc(&req->usage);
call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
@@ -578,6 +672,7 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
call->reply[0] = vnode;
call->reply[1] = NULL; /* volsync */
call->reply[2] = req;
+ call->expected_version = vnode->status.data_version;
/* marshall the parameters */
bp = call->request;
@@ -588,7 +683,7 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
bp[4] = htonl(lower_32_bits(req->pos));
bp[5] = htonl(lower_32_bits(req->len));
- atomic_inc(&req->usage);
+ refcount_inc(&req->usage);
call->cb_break = fc->cb_break;
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
@@ -613,8 +708,10 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
xdr_decode_AFSFid(&bp, call->reply[1]);
- xdr_decode_AFSFetchStatus(&bp, call->reply[2], NULL, NULL);
- xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
+ if (xdr_decode_AFSFetchStatus(call, &bp, call->reply[2], NULL, NULL, NULL) < 0 ||
+ xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL) < 0)
+ return afs_protocol_error(call, -EBADMSG);
xdr_decode_AFSCallBack_raw(&bp, call->reply[3]);
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
@@ -645,6 +742,7 @@ static const struct afs_call_type afs_RXFSMakeDir = {
int afs_fs_create(struct afs_fs_cursor *fc,
const char *name,
umode_t mode,
+ u64 current_data_version,
struct afs_fid *newfid,
struct afs_file_status *newstatus,
struct afs_callback *newcb)
@@ -672,6 +770,7 @@ int afs_fs_create(struct afs_fs_cursor *fc,
call->reply[1] = newfid;
call->reply[2] = newstatus;
call->reply[3] = newcb;
+ call->expected_version = current_data_version + 1;
/* marshall the parameters */
bp = call->request;
@@ -715,7 +814,9 @@ static int afs_deliver_fs_remove(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
+ if (xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL) < 0)
+ return afs_protocol_error(call, -EBADMSG);
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
_leave(" = 0 [done]");
@@ -742,7 +843,8 @@ static const struct afs_call_type afs_RXFSRemoveDir = {
/*
* remove a file or directory
*/
-int afs_fs_remove(struct afs_fs_cursor *fc, const char *name, bool isdir)
+int afs_fs_remove(struct afs_fs_cursor *fc, const char *name, bool isdir,
+ u64 current_data_version)
{
struct afs_vnode *vnode = fc->vnode;
struct afs_call *call;
@@ -764,6 +866,7 @@ int afs_fs_remove(struct afs_fs_cursor *fc, const char *name, bool isdir)
call->key = fc->key;
call->reply[0] = vnode;
+ call->expected_version = current_data_version + 1;
/* marshall the parameters */
bp = call->request;
@@ -801,8 +904,10 @@ static int afs_deliver_fs_link(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
- xdr_decode_AFSFetchStatus(&bp, &dvnode->status, dvnode, NULL);
+ if (xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode, NULL, NULL) < 0 ||
+ xdr_decode_AFSFetchStatus(call, &bp, &dvnode->status, dvnode,
+ &call->expected_version, NULL) < 0)
+ return afs_protocol_error(call, -EBADMSG);
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
_leave(" = 0 [done]");
@@ -823,7 +928,7 @@ static const struct afs_call_type afs_RXFSLink = {
* make a hard link
*/
int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name)
+ const char *name, u64 current_data_version)
{
struct afs_vnode *dvnode = fc->vnode;
struct afs_call *call;
@@ -844,6 +949,7 @@ int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
call->key = fc->key;
call->reply[0] = dvnode;
call->reply[1] = vnode;
+ call->expected_version = current_data_version + 1;
/* marshall the parameters */
bp = call->request;
@@ -885,8 +991,10 @@ static int afs_deliver_fs_symlink(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
xdr_decode_AFSFid(&bp, call->reply[1]);
- xdr_decode_AFSFetchStatus(&bp, call->reply[2], NULL, NULL);
- xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
+ if (xdr_decode_AFSFetchStatus(call, &bp, call->reply[2], NULL, NULL, NULL) ||
+ xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL) < 0)
+ return afs_protocol_error(call, -EBADMSG);
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
_leave(" = 0 [done]");
@@ -909,6 +1017,7 @@ static const struct afs_call_type afs_RXFSSymlink = {
int afs_fs_symlink(struct afs_fs_cursor *fc,
const char *name,
const char *contents,
+ u64 current_data_version,
struct afs_fid *newfid,
struct afs_file_status *newstatus)
{
@@ -937,6 +1046,7 @@ int afs_fs_symlink(struct afs_fs_cursor *fc,
call->reply[0] = vnode;
call->reply[1] = newfid;
call->reply[2] = newstatus;
+ call->expected_version = current_data_version + 1;
/* marshall the parameters */
bp = call->request;
@@ -987,10 +1097,13 @@ static int afs_deliver_fs_rename(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSFetchStatus(&bp, &orig_dvnode->status, orig_dvnode, NULL);
- if (new_dvnode != orig_dvnode)
- xdr_decode_AFSFetchStatus(&bp, &new_dvnode->status, new_dvnode,
- NULL);
+ if (xdr_decode_AFSFetchStatus(call, &bp, &orig_dvnode->status, orig_dvnode,
+ &call->expected_version, NULL) < 0)
+ return afs_protocol_error(call, -EBADMSG);
+ if (new_dvnode != orig_dvnode &&
+ xdr_decode_AFSFetchStatus(call, &bp, &new_dvnode->status, new_dvnode,
+ &call->expected_version_2, NULL) < 0)
+ return afs_protocol_error(call, -EBADMSG);
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
_leave(" = 0 [done]");
@@ -1013,7 +1126,9 @@ static const struct afs_call_type afs_RXFSRename = {
int afs_fs_rename(struct afs_fs_cursor *fc,
const char *orig_name,
struct afs_vnode *new_dvnode,
- const char *new_name)
+ const char *new_name,
+ u64 current_orig_data_version,
+ u64 current_new_data_version)
{
struct afs_vnode *orig_dvnode = fc->vnode;
struct afs_call *call;
@@ -1041,6 +1156,8 @@ int afs_fs_rename(struct afs_fs_cursor *fc,
call->key = fc->key;
call->reply[0] = orig_dvnode;
call->reply[1] = new_dvnode;
+ call->expected_version = current_orig_data_version + 1;
+ call->expected_version_2 = current_new_data_version + 1;
/* marshall the parameters */
bp = call->request;
@@ -1089,8 +1206,9 @@ static int afs_deliver_fs_store_data(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode,
- &call->store_version);
+ if (xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL) < 0)
+ return afs_protocol_error(call, -EBADMSG);
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
afs_pages_written_back(vnode, call);
@@ -1147,7 +1265,7 @@ static int afs_fs_store_data64(struct afs_fs_cursor *fc,
call->first_offset = offset;
call->last_to = to;
call->send_pages = true;
- call->store_version = vnode->status.data_version + 1;
+ call->expected_version = vnode->status.data_version + 1;
/* marshall the parameters */
bp = call->request;
@@ -1222,7 +1340,7 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
call->first_offset = offset;
call->last_to = to;
call->send_pages = true;
- call->store_version = vnode->status.data_version + 1;
+ call->expected_version = vnode->status.data_version + 1;
/* marshall the parameters */
bp = call->request;
@@ -1252,7 +1370,6 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
*/
static int afs_deliver_fs_store_status(struct afs_call *call)
{
- afs_dataversion_t *store_version;
struct afs_vnode *vnode = call->reply[0];
const __be32 *bp;
int ret;
@@ -1264,12 +1381,10 @@ static int afs_deliver_fs_store_status(struct afs_call *call)
return ret;
/* unmarshall the reply once we've received all of it */
- store_version = NULL;
- if (call->operation_ID == FSSTOREDATA)
- store_version = &call->store_version;
-
bp = call->buffer;
- xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, store_version);
+ if (xdr_decode_AFSFetchStatus(call, &bp, &vnode->status, vnode,
+ &call->expected_version, NULL) < 0)
+ return afs_protocol_error(call, -EBADMSG);
/* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
_leave(" = 0 [done]");
@@ -1324,7 +1439,7 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
call->key = fc->key;
call->reply[0] = vnode;
- call->store_version = vnode->status.data_version + 1;
+ call->expected_version = vnode->status.data_version + 1;
/* marshall the parameters */
bp = call->request;
@@ -1373,7 +1488,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
call->key = fc->key;
call->reply[0] = vnode;
- call->store_version = vnode->status.data_version + 1;
+ call->expected_version = vnode->status.data_version + 1;
/* marshall the parameters */
bp = call->request;
@@ -1418,6 +1533,7 @@ int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
call->key = fc->key;
call->reply[0] = vnode;
+ call->expected_version = vnode->status.data_version;
/* marshall the parameters */
bp = call->request;
@@ -1471,7 +1587,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("volname length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
call->offset = 0;
call->unmarshall++;
@@ -1518,7 +1634,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("offline msg length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
call->offset = 0;
call->unmarshall++;
@@ -1565,7 +1681,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("motd length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
call->offset = 0;
call->unmarshall++;
@@ -1947,3 +2063,265 @@ int afs_fs_get_capabilities(struct afs_net *net,
trace_afs_make_fs_call(call, NULL);
return afs_make_call(ac, call, GFP_NOFS, false);
}
+
+/*
+ * Deliver reply data to an FS.FetchStatus with no vnode.
+ */
+static int afs_deliver_fs_fetch_status(struct afs_call *call)
+{
+ struct afs_file_status *status = call->reply[1];
+ struct afs_callback *callback = call->reply[2];
+ struct afs_volsync *volsync = call->reply[3];
+ struct afs_vnode *vnode = call->reply[0];
+ const __be32 *bp;
+ int ret;
+
+ ret = afs_transfer_reply(call);
+ if (ret < 0)
+ return ret;
+
+ _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
+
+ /* unmarshall the reply once we've received all of it */
+ bp = call->buffer;
+ xdr_decode_AFSFetchStatus(call, &bp, status, vnode,
+ &call->expected_version, NULL);
+ callback[call->count].version = ntohl(bp[0]);
+ callback[call->count].expiry = ntohl(bp[1]);
+ callback[call->count].type = ntohl(bp[2]);
+ if (vnode)
+ xdr_decode_AFSCallBack(call, vnode, &bp);
+ else
+ bp += 3;
+ if (volsync)
+ xdr_decode_AFSVolSync(&bp, volsync);
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * FS.FetchStatus operation type
+ */
+static const struct afs_call_type afs_RXFSFetchStatus = {
+ .name = "FS.FetchStatus",
+ .op = afs_FS_FetchStatus,
+ .deliver = afs_deliver_fs_fetch_status,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Fetch the status information for a fid without needing a vnode handle.
+ */
+int afs_fs_fetch_status(struct afs_fs_cursor *fc,
+ struct afs_net *net,
+ struct afs_fid *fid,
+ struct afs_file_status *status,
+ struct afs_callback *callback,
+ struct afs_volsync *volsync)
+{
+ struct afs_call *call;
+ __be32 *bp;
+
+ _enter(",%x,{%x:%u},,",
+ key_serial(fc->key), fid->vid, fid->vnode);
+
+ call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4);
+ if (!call) {
+ fc->ac.error = -ENOMEM;
+ return -ENOMEM;
+ }
+
+ call->key = fc->key;
+ call->reply[0] = NULL; /* vnode for fid[0] */
+ call->reply[1] = status;
+ call->reply[2] = callback;
+ call->reply[3] = volsync;
+ call->expected_version = 1; /* vnode->status.data_version */
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp[0] = htonl(FSFETCHSTATUS);
+ bp[1] = htonl(fid->vid);
+ bp[2] = htonl(fid->vnode);
+ bp[3] = htonl(fid->unique);
+
+ call->cb_break = fc->cb_break;
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, fid);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to an FS.InlineBulkStatus call
+ */
+static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
+{
+ struct afs_file_status *statuses;
+ struct afs_callback *callbacks;
+ struct afs_vnode *vnode = call->reply[0];
+ const __be32 *bp;
+ u32 tmp;
+ int ret;
+
+ _enter("{%u}", call->unmarshall);
+
+ switch (call->unmarshall) {
+ case 0:
+ call->offset = 0;
+ call->unmarshall++;
+
+ /* Extract the file status count and array in two steps */
+ case 1:
+ _debug("extract status count");
+ ret = afs_extract_data(call, &call->tmp, 4, true);
+ if (ret < 0)
+ return ret;
+
+ tmp = ntohl(call->tmp);
+ _debug("status count: %u/%u", tmp, call->count2);
+ if (tmp != call->count2)
+ return afs_protocol_error(call, -EBADMSG);
+
+ call->count = 0;
+ call->unmarshall++;
+ more_counts:
+ call->offset = 0;
+
+ case 2:
+ _debug("extract status array %u", call->count);
+ ret = afs_extract_data(call, call->buffer, 21 * 4, true);
+ if (ret < 0)
+ return ret;
+
+ bp = call->buffer;
+ statuses = call->reply[1];
+ if (xdr_decode_AFSFetchStatus(call, &bp, &statuses[call->count],
+ call->count == 0 ? vnode : NULL,
+ NULL, NULL) < 0)
+ return afs_protocol_error(call, -EBADMSG);
+
+ call->count++;
+ if (call->count < call->count2)
+ goto more_counts;
+
+ call->count = 0;
+ call->unmarshall++;
+ call->offset = 0;
+
+ /* Extract the callback count and array in two steps */
+ case 3:
+ _debug("extract CB count");
+ ret = afs_extract_data(call, &call->tmp, 4, true);
+ if (ret < 0)
+ return ret;
+
+ tmp = ntohl(call->tmp);
+ _debug("CB count: %u", tmp);
+ if (tmp != call->count2)
+ return afs_protocol_error(call, -EBADMSG);
+ call->count = 0;
+ call->unmarshall++;
+ more_cbs:
+ call->offset = 0;
+
+ case 4:
+ _debug("extract CB array");
+ ret = afs_extract_data(call, call->buffer, 3 * 4, true);
+ if (ret < 0)
+ return ret;
+
+ _debug("unmarshall CB array");
+ bp = call->buffer;
+ callbacks = call->reply[2];
+ callbacks[call->count].version = ntohl(bp[0]);
+ callbacks[call->count].expiry = ntohl(bp[1]);
+ callbacks[call->count].type = ntohl(bp[2]);
+ statuses = call->reply[1];
+ if (call->count == 0 && vnode && statuses[0].abort_code == 0)
+ xdr_decode_AFSCallBack(call, vnode, &bp);
+ call->count++;
+ if (call->count < call->count2)
+ goto more_cbs;
+
+ call->offset = 0;
+ call->unmarshall++;
+
+ case 5:
+ ret = afs_extract_data(call, call->buffer, 6 * 4, false);
+ if (ret < 0)
+ return ret;
+
+ bp = call->buffer;
+ if (call->reply[3])
+ xdr_decode_AFSVolSync(&bp, call->reply[3]);
+
+ call->offset = 0;
+ call->unmarshall++;
+
+ case 6:
+ break;
+ }
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+/*
+ * FS.InlineBulkStatus operation type
+ */
+static const struct afs_call_type afs_RXFSInlineBulkStatus = {
+ .name = "FS.InlineBulkStatus",
+ .op = afs_FS_InlineBulkStatus,
+ .deliver = afs_deliver_fs_inline_bulk_status,
+ .destructor = afs_flat_call_destructor,
+};
+
+/*
+ * Fetch the status information for up to 50 files
+ */
+int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
+ struct afs_net *net,
+ struct afs_fid *fids,
+ struct afs_file_status *statuses,
+ struct afs_callback *callbacks,
+ unsigned int nr_fids,
+ struct afs_volsync *volsync)
+{
+ struct afs_call *call;
+ __be32 *bp;
+ int i;
+
+ _enter(",%x,{%x:%u},%u",
+ key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids);
+
+ call = afs_alloc_flat_call(net, &afs_RXFSInlineBulkStatus,
+ (2 + nr_fids * 3) * 4,
+ 21 * 4);
+ if (!call) {
+ fc->ac.error = -ENOMEM;
+ return -ENOMEM;
+ }
+
+ call->key = fc->key;
+ call->reply[0] = NULL; /* vnode for fid[0] */
+ call->reply[1] = statuses;
+ call->reply[2] = callbacks;
+ call->reply[3] = volsync;
+ call->count2 = nr_fids;
+
+ /* marshall the parameters */
+ bp = call->request;
+ *bp++ = htonl(FSINLINEBULKSTATUS);
+ *bp++ = htonl(nr_fids);
+ for (i = 0; i < nr_fids; i++) {
+ *bp++ = htonl(fids[i].vid);
+ *bp++ = htonl(fids[i].vnode);
+ *bp++ = htonl(fids[i].unique);
+ }
+
+ call->cb_break = fc->cb_break;
+ afs_use_fs_server(call, fc->cbi);
+ trace_afs_make_fs_call(call, &fids[0]);
+ return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 65c5b1edd338..06194cfe9724 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -30,12 +30,11 @@ static const struct inode_operations afs_symlink_inode_operations = {
};
/*
- * map the AFS file status to the inode member variables
+ * Initialise an inode from the vnode status.
*/
-static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
+static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key)
{
struct inode *inode = AFS_VNODE_TO_I(vnode);
- bool changed;
_debug("FS: ft=%d lk=%d sz=%llu ver=%Lu mod=%hu",
vnode->status.type,
@@ -46,16 +45,21 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
read_seqlock_excl(&vnode->cb_lock);
+ afs_update_inode_from_status(vnode, &vnode->status, NULL,
+ AFS_VNODE_NOT_YET_SET);
+
switch (vnode->status.type) {
case AFS_FTYPE_FILE:
inode->i_mode = S_IFREG | vnode->status.mode;
inode->i_op = &afs_file_inode_operations;
inode->i_fop = &afs_file_operations;
+ inode->i_mapping->a_ops = &afs_fs_aops;
break;
case AFS_FTYPE_DIR:
inode->i_mode = S_IFDIR | vnode->status.mode;
inode->i_op = &afs_dir_inode_operations;
inode->i_fop = &afs_dir_file_operations;
+ inode->i_mapping->a_ops = &afs_dir_aops;
break;
case AFS_FTYPE_SYMLINK:
/* Symlinks with a mode of 0644 are actually mountpoints. */
@@ -67,45 +71,31 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
inode->i_mode = S_IFDIR | 0555;
inode->i_op = &afs_mntpt_inode_operations;
inode->i_fop = &afs_mntpt_file_operations;
+ inode->i_mapping->a_ops = &afs_fs_aops;
} else {
inode->i_mode = S_IFLNK | vnode->status.mode;
inode->i_op = &afs_symlink_inode_operations;
+ inode->i_mapping->a_ops = &afs_fs_aops;
}
inode_nohighmem(inode);
break;
default:
printk("kAFS: AFS vnode with undefined type\n");
read_sequnlock_excl(&vnode->cb_lock);
- return -EBADMSG;
+ return afs_protocol_error(NULL, -EBADMSG);
}
- changed = (vnode->status.size != inode->i_size);
-
- set_nlink(inode, vnode->status.nlink);
- inode->i_uid = vnode->status.owner;
- inode->i_gid = vnode->status.group;
- inode->i_size = vnode->status.size;
- inode->i_ctime.tv_sec = vnode->status.mtime_client;
- inode->i_ctime.tv_nsec = 0;
- inode->i_atime = inode->i_mtime = inode->i_ctime;
inode->i_blocks = 0;
- inode->i_generation = vnode->fid.unique;
- inode_set_iversion_raw(inode, vnode->status.data_version);
- inode->i_mapping->a_ops = &afs_fs_aops;
+ vnode->invalid_before = vnode->status.data_version;
read_sequnlock_excl(&vnode->cb_lock);
-
-#ifdef CONFIG_AFS_FSCACHE
- if (changed)
- fscache_attr_changed(vnode->cache);
-#endif
return 0;
}
/*
* Fetch file status from the volume.
*/
-int afs_fetch_status(struct afs_vnode *vnode, struct key *key)
+int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode)
{
struct afs_fs_cursor fc;
int ret;
@@ -119,7 +109,7 @@ int afs_fetch_status(struct afs_vnode *vnode, struct key *key)
if (afs_begin_vnode_operation(&fc, vnode, key)) {
while (afs_select_fileserver(&fc)) {
fc.cb_break = vnode->cb_break + vnode->cb_s_break;
- afs_fs_fetch_file_status(&fc, NULL);
+ afs_fs_fetch_file_status(&fc, NULL, new_inode);
}
afs_check_for_remote_deletion(&fc, fc.vnode);
@@ -255,6 +245,11 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
} __packed key;
struct afs_vnode_cache_aux aux;
+ if (vnode->status.type == AFS_FTYPE_DIR) {
+ vnode->cache = NULL;
+ return;
+ }
+
key.vnode_id = vnode->fid.vnode;
key.unique = vnode->fid.unique;
key.vnode_id_ext[0] = 0;
@@ -307,7 +302,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
if (!status) {
/* it's a remotely extant inode */
- ret = afs_fetch_status(vnode, key);
+ ret = afs_fetch_status(vnode, key, true);
if (ret < 0)
goto bad_inode;
} else {
@@ -331,15 +326,12 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
vnode->cb_expires_at += ktime_get_real_seconds();
}
- /* set up caching before mapping the status, as map-status reads the
- * first page of symlinks to see if they're really mountpoints */
- inode->i_size = vnode->status.size;
- afs_get_inode_cache(vnode);
-
- ret = afs_inode_map_status(vnode, key);
+ ret = afs_inode_init_from_status(vnode, key);
if (ret < 0)
goto bad_inode;
+ afs_get_inode_cache(vnode);
+
/* success */
clear_bit(AFS_VNODE_UNSET, &vnode->flags);
inode->i_flags |= S_NOATIME;
@@ -349,10 +341,6 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
/* failure */
bad_inode:
-#ifdef CONFIG_AFS_FSCACHE
- fscache_relinquish_cookie(vnode->cache, NULL, ret == -ENOENT);
- vnode->cache = NULL;
-#endif
iget_failed(inode);
_leave(" = %d [bad]", ret);
return ERR_PTR(ret);
@@ -407,8 +395,11 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
if (vnode->cb_s_break != vnode->cb_interest->server->cb_s_break) {
vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
- } else if (!test_bit(AFS_VNODE_DIR_MODIFIED, &vnode->flags) &&
- !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
+ } else if (vnode->status.type == AFS_FTYPE_DIR &&
+ test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) &&
+ vnode->cb_expires_at - 10 > now) {
+ valid = true;
+ } else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
vnode->cb_expires_at - 10 > now) {
valid = true;
}
@@ -432,7 +423,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
* access */
if (!test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
_debug("not promised");
- ret = afs_fetch_status(vnode, key);
+ ret = afs_fetch_status(vnode, key, false);
if (ret < 0) {
if (ret == -ENOENT) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
@@ -453,8 +444,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
* different */
if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
afs_zap_data(vnode);
-
- clear_bit(AFS_VNODE_DIR_MODIFIED, &vnode->flags);
mutex_unlock(&vnode->validate_lock);
valid:
_leave(" = 0");
@@ -544,7 +533,7 @@ void afs_evict_inode(struct inode *inode)
}
#endif
- afs_put_permits(vnode->permit_cache);
+ afs_put_permits(rcu_access_pointer(vnode->permit_cache));
_leave("");
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index a6a1d75eee41..f8086ec95e24 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -122,7 +122,8 @@ struct afs_call {
u32 operation_ID; /* operation ID for an incoming call */
u32 count; /* count for use in unmarshalling */
__be32 tmp; /* place to extract temporary data */
- afs_dataversion_t store_version; /* updated version expected from store */
+ afs_dataversion_t expected_version; /* Updated version expected from store */
+ afs_dataversion_t expected_version_2; /* 2nd updated version expected from store */
};
struct afs_call_type {
@@ -173,11 +174,14 @@ struct afs_read {
loff_t len; /* How much we're asking for */
loff_t actual_len; /* How much we're actually getting */
loff_t remain; /* Amount remaining */
- atomic_t usage;
+ loff_t file_size; /* File size returned by server */
+ afs_dataversion_t data_version; /* Version number returned by server */
+ refcount_t usage;
unsigned int index; /* Which page we're reading into */
unsigned int nr_pages;
void (*page_done)(struct afs_call *, struct afs_read *);
- struct page *pages[];
+ struct page **pages;
+ struct page *array[];
};
/*
@@ -199,6 +203,18 @@ static inline struct afs_super_info *AFS_FS_S(struct super_block *sb)
extern struct file_system_type afs_fs_type;
/*
+ * Set of substitutes for @sys.
+ */
+struct afs_sysnames {
+#define AFS_NR_SYSNAME 16
+ char *subs[AFS_NR_SYSNAME];
+ refcount_t usage;
+ unsigned short nr;
+ short error;
+ char blank[1];
+};
+
+/*
* AFS network namespace record.
*/
struct afs_net {
@@ -245,9 +261,25 @@ struct afs_net {
struct mutex lock_manager_mutex;
/* Misc */
- struct proc_dir_entry *proc_afs; /* /proc/net/afs directory */
+ struct proc_dir_entry *proc_afs; /* /proc/net/afs directory */
+ struct afs_sysnames *sysnames;
+ rwlock_t sysnames_lock;
+
+ /* Statistics counters */
+ atomic_t n_lookup; /* Number of lookups done */
+ atomic_t n_reval; /* Number of dentries needing revalidation */
+ atomic_t n_inval; /* Number of invalidations by the server */
+ atomic_t n_relpg; /* Number of invalidations by releasepage */
+ atomic_t n_read_dir; /* Number of directory pages read */
+ atomic_t n_dir_cr; /* Number of directory entry creation edits */
+ atomic_t n_dir_rm; /* Number of directory entry removal edits */
+ atomic_t n_stores; /* Number of store ops */
+ atomic_long_t n_store_bytes; /* Number of bytes stored */
+ atomic_long_t n_fetch_bytes; /* Number of bytes fetched */
+ atomic_t n_fetches; /* Number of data fetch ops */
};
+extern const char afs_init_sysname[];
extern struct afs_net __afs_net;// Dummy AFS network namespace; TODO: replace with real netns
enum afs_cell_state {
@@ -363,6 +395,7 @@ struct afs_server {
#define AFS_SERVER_FL_UPDATING 4
#define AFS_SERVER_FL_PROBED 5 /* The fileserver has been probed */
#define AFS_SERVER_FL_PROBING 6 /* Fileserver is being probed */
+#define AFS_SERVER_FL_NO_IBULK 7 /* Fileserver doesn't support FS.InlineBulkStatus */
atomic_t usage;
u32 addr_version; /* Address list version */
@@ -455,10 +488,11 @@ struct afs_vnode {
struct afs_volume *volume; /* volume on which vnode resides */
struct afs_fid fid; /* the file identifier for this inode */
struct afs_file_status status; /* AFS status info for this file */
+ afs_dataversion_t invalid_before; /* Child dentries are invalid before this */
#ifdef CONFIG_AFS_FSCACHE
struct fscache_cookie *cache; /* caching cookie */
#endif
- struct afs_permits *permit_cache; /* cache of permits so far obtained */
+ struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */
struct mutex io_lock; /* Lock for serialising I/O on this mutex */
struct mutex validate_lock; /* lock for validating this vnode */
spinlock_t wb_lock; /* lock for wb_keys */
@@ -466,12 +500,13 @@ struct afs_vnode {
unsigned long flags;
#define AFS_VNODE_CB_PROMISED 0 /* Set if vnode has a callback promise */
#define AFS_VNODE_UNSET 1 /* set if vnode attributes not yet set */
-#define AFS_VNODE_DIR_MODIFIED 2 /* set if dir vnode's data modified */
+#define AFS_VNODE_DIR_VALID 2 /* Set if dir contents are valid */
#define AFS_VNODE_ZAP_DATA 3 /* set if vnode's data should be invalidated */
#define AFS_VNODE_DELETED 4 /* set if vnode deleted on server */
#define AFS_VNODE_MOUNTPOINT 5 /* set if vnode is a mountpoint symlink */
#define AFS_VNODE_AUTOCELL 6 /* set if Vnode is an auto mount point */
#define AFS_VNODE_PSEUDODIR 7 /* set if Vnode is a pseudo directory */
+#define AFS_VNODE_NEW_CONTENT 8 /* Set if file has new content (create/trunc-0) */
struct list_head wb_keys; /* List of keys available for writeback */
struct list_head pending_locks; /* locks waiting to be granted */
@@ -611,7 +646,7 @@ extern struct fscache_cookie_def afs_vnode_cache_index_def;
*/
extern void afs_init_callback_state(struct afs_server *);
extern void afs_break_callback(struct afs_vnode *);
-extern void afs_break_callbacks(struct afs_server *, size_t,struct afs_callback[]);
+extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break*);
extern int afs_register_server_cb_interest(struct afs_vnode *, struct afs_server_entry *);
extern void afs_put_cb_interest(struct afs_net *, struct afs_cb_interest *);
@@ -646,11 +681,26 @@ extern bool afs_cm_incoming_call(struct afs_call *);
*/
extern const struct file_operations afs_dir_file_operations;
extern const struct inode_operations afs_dir_inode_operations;
+extern const struct address_space_operations afs_dir_aops;
+extern const struct dentry_operations afs_fs_dentry_operations;
+
+extern void afs_d_release(struct dentry *);
+
+/*
+ * dir_edit.c
+ */
+extern void afs_edit_dir_add(struct afs_vnode *, struct qstr *, struct afs_fid *,
+ enum afs_edit_dir_reason);
+extern void afs_edit_dir_remove(struct afs_vnode *, struct qstr *, enum afs_edit_dir_reason);
+
+/*
+ * dynroot.c
+ */
extern const struct file_operations afs_dynroot_file_operations;
extern const struct inode_operations afs_dynroot_inode_operations;
-extern const struct dentry_operations afs_fs_dentry_operations;
+extern const struct dentry_operations afs_dynroot_dentry_operations;
-extern bool afs_dir_check_page(struct inode *, struct page *);
+extern struct inode *afs_try_auto_mntpt(struct dentry *, struct inode *);
/*
* file.c
@@ -680,17 +730,23 @@ extern int afs_flock(struct file *, int, struct file_lock *);
/*
* fsclient.c
*/
-extern int afs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_volsync *);
+#define AFS_VNODE_NOT_YET_SET 0x01
+#define AFS_VNODE_META_CHANGED 0x02
+#define AFS_VNODE_DATA_CHANGED 0x04
+extern void afs_update_inode_from_status(struct afs_vnode *, struct afs_file_status *,
+ const afs_dataversion_t *, u8);
+
+extern int afs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_volsync *, bool);
extern int afs_fs_give_up_callbacks(struct afs_net *, struct afs_server *);
extern int afs_fs_fetch_data(struct afs_fs_cursor *, struct afs_read *);
-extern int afs_fs_create(struct afs_fs_cursor *, const char *, umode_t,
+extern int afs_fs_create(struct afs_fs_cursor *, const char *, umode_t, u64,
struct afs_fid *, struct afs_file_status *, struct afs_callback *);
-extern int afs_fs_remove(struct afs_fs_cursor *, const char *, bool);
-extern int afs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *);
-extern int afs_fs_symlink(struct afs_fs_cursor *, const char *, const char *,
+extern int afs_fs_remove(struct afs_fs_cursor *, const char *, bool, u64);
+extern int afs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64);
+extern int afs_fs_symlink(struct afs_fs_cursor *, const char *, const char *, u64,
struct afs_fid *, struct afs_file_status *);
extern int afs_fs_rename(struct afs_fs_cursor *, const char *,
- struct afs_vnode *, const char *);
+ struct afs_vnode *, const char *, u64, u64);
extern int afs_fs_store_data(struct afs_fs_cursor *, struct address_space *,
pgoff_t, pgoff_t, unsigned, unsigned);
extern int afs_fs_setattr(struct afs_fs_cursor *, struct iattr *);
@@ -702,11 +758,18 @@ extern int afs_fs_give_up_all_callbacks(struct afs_net *, struct afs_server *,
struct afs_addr_cursor *, struct key *);
extern int afs_fs_get_capabilities(struct afs_net *, struct afs_server *,
struct afs_addr_cursor *, struct key *);
+extern int afs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *,
+ struct afs_fid *, struct afs_file_status *,
+ struct afs_callback *, unsigned int,
+ struct afs_volsync *);
+extern int afs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *,
+ struct afs_fid *, struct afs_file_status *,
+ struct afs_callback *, struct afs_volsync *);
/*
* inode.c
*/
-extern int afs_fetch_status(struct afs_vnode *, struct key *);
+extern int afs_fetch_status(struct afs_vnode *, struct key *, bool);
extern int afs_iget5_test(struct inode *, void *);
extern struct inode *afs_iget_pseudo_dir(struct super_block *, bool);
extern struct inode *afs_iget(struct super_block *, struct key *,
@@ -754,6 +817,13 @@ static inline void afs_put_net(struct afs_net *net)
{
}
+static inline void __afs_stat(atomic_t *s)
+{
+ atomic_inc(s);
+}
+
+#define afs_stat_v(vnode, n) __afs_stat(&afs_v2net(vnode)->n)
+
/*
* misc.c
*/
@@ -781,6 +851,7 @@ extern int __net_init afs_proc_init(struct afs_net *);
extern void __net_exit afs_proc_cleanup(struct afs_net *);
extern int afs_proc_cell_setup(struct afs_net *, struct afs_cell *);
extern void afs_proc_cell_remove(struct afs_net *, struct afs_cell *);
+extern void afs_put_sysnames(struct afs_sysnames *);
/*
* rotate.c
@@ -809,6 +880,7 @@ extern void afs_flat_call_destructor(struct afs_call *);
extern void afs_send_empty_reply(struct afs_call *);
extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
extern int afs_extract_data(struct afs_call *, void *, size_t, bool);
+extern int afs_protocol_error(struct afs_call *, int);
static inline int afs_transfer_reply(struct afs_call *call)
{
@@ -955,7 +1027,6 @@ extern int afs_writepage(struct page *, struct writeback_control *);
extern int afs_writepages(struct address_space *, struct writeback_control *);
extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
-extern int afs_flush(struct file *, fl_owner_t);
extern int afs_fsync(struct file *, loff_t, loff_t, int);
extern int afs_page_mkwrite(struct vm_fault *);
extern void afs_prune_wb_keys(struct afs_vnode *);
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 15a02a05ff40..d7560168b3bf 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -34,11 +34,42 @@ MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
struct workqueue_struct *afs_wq;
struct afs_net __afs_net;
+#if defined(CONFIG_ALPHA)
+const char afs_init_sysname[] = "alpha_linux26";
+#elif defined(CONFIG_X86_64)
+const char afs_init_sysname[] = "amd64_linux26";
+#elif defined(CONFIG_ARM)
+const char afs_init_sysname[] = "arm_linux26";
+#elif defined(CONFIG_ARM64)
+const char afs_init_sysname[] = "aarch64_linux26";
+#elif defined(CONFIG_X86_32)
+const char afs_init_sysname[] = "i386_linux26";
+#elif defined(CONFIG_IA64)
+const char afs_init_sysname[] = "ia64_linux26";
+#elif defined(CONFIG_PPC64)
+const char afs_init_sysname[] = "ppc64_linux26";
+#elif defined(CONFIG_PPC32)
+const char afs_init_sysname[] = "ppc_linux26";
+#elif defined(CONFIG_S390)
+#ifdef CONFIG_64BIT
+const char afs_init_sysname[] = "s390x_linux26";
+#else
+const char afs_init_sysname[] = "s390_linux26";
+#endif
+#elif defined(CONFIG_SPARC64)
+const char afs_init_sysname[] = "sparc64_linux26";
+#elif defined(CONFIG_SPARC32)
+const char afs_init_sysname[] = "sparc_linux26";
+#else
+const char afs_init_sysname[] = "unknown_linux26";
+#endif
+
/*
* Initialise an AFS network namespace record.
*/
static int __net_init afs_net_init(struct afs_net *net)
{
+ struct afs_sysnames *sysnames;
int ret;
net->live = true;
@@ -67,6 +98,16 @@ static int __net_init afs_net_init(struct afs_net *net)
INIT_WORK(&net->fs_manager, afs_manage_servers);
timer_setup(&net->fs_timer, afs_servers_timer, 0);
+ ret = -ENOMEM;
+ sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL);
+ if (!sysnames)
+ goto error_sysnames;
+ sysnames->subs[0] = (char *)&afs_init_sysname;
+ sysnames->nr = 1;
+ refcount_set(&sysnames->usage, 1);
+ net->sysnames = sysnames;
+ rwlock_init(&net->sysnames_lock);
+
/* Register the /proc stuff */
ret = afs_proc_init(net);
if (ret < 0)
@@ -92,6 +133,8 @@ error_cell_init:
net->live = false;
afs_proc_cleanup(net);
error_proc:
+ afs_put_sysnames(net->sysnames);
+error_sysnames:
net->live = false;
return ret;
}
@@ -106,6 +149,7 @@ static void __net_exit afs_net_exit(struct afs_net *net)
afs_purge_servers(net);
afs_close_socket(net);
afs_proc_cleanup(net);
+ afs_put_sysnames(net->sysnames);
}
/*
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 4508dd54f789..839a22280606 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -126,6 +126,34 @@ static const struct file_operations afs_proc_servers_fops = {
.release = seq_release,
};
+static int afs_proc_sysname_open(struct inode *inode, struct file *file);
+static int afs_proc_sysname_release(struct inode *inode, struct file *file);
+static void *afs_proc_sysname_start(struct seq_file *p, loff_t *pos);
+static void *afs_proc_sysname_next(struct seq_file *p, void *v,
+ loff_t *pos);
+static void afs_proc_sysname_stop(struct seq_file *p, void *v);
+static int afs_proc_sysname_show(struct seq_file *m, void *v);
+static ssize_t afs_proc_sysname_write(struct file *file,
+ const char __user *buf,
+ size_t size, loff_t *_pos);
+
+static const struct seq_operations afs_proc_sysname_ops = {
+ .start = afs_proc_sysname_start,
+ .next = afs_proc_sysname_next,
+ .stop = afs_proc_sysname_stop,
+ .show = afs_proc_sysname_show,
+};
+
+static const struct file_operations afs_proc_sysname_fops = {
+ .open = afs_proc_sysname_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = afs_proc_sysname_release,
+ .write = afs_proc_sysname_write,
+};
+
+static const struct file_operations afs_proc_stats_fops;
+
/*
* initialise the /proc/fs/afs/ directory
*/
@@ -139,7 +167,9 @@ int afs_proc_init(struct afs_net *net)
if (!proc_create("cells", 0644, net->proc_afs, &afs_proc_cells_fops) ||
!proc_create("rootcell", 0644, net->proc_afs, &afs_proc_rootcell_fops) ||
- !proc_create("servers", 0644, net->proc_afs, &afs_proc_servers_fops))
+ !proc_create("servers", 0644, net->proc_afs, &afs_proc_servers_fops) ||
+ !proc_create("stats", 0644, net->proc_afs, &afs_proc_stats_fops) ||
+ !proc_create("sysname", 0644, net->proc_afs, &afs_proc_sysname_fops))
goto error_tree;
_leave(" = 0");
@@ -183,6 +213,7 @@ static int afs_proc_cells_open(struct inode *inode, struct file *file)
* first item
*/
static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
+ __acquires(rcu)
{
struct afs_net *net = afs_seq2net(m);
@@ -204,6 +235,7 @@ static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos)
* clean up after reading from the cells list
*/
static void afs_proc_cells_stop(struct seq_file *m, void *v)
+ __releases(rcu)
{
rcu_read_unlock();
}
@@ -282,7 +314,8 @@ static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf,
goto done;
}
- set_bit(AFS_CELL_FL_NO_GC, &cell->flags);
+ if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
+ afs_put_cell(net, cell);
printk("kAFS: Added new cell '%s'\n", name);
} else {
goto inval;
@@ -304,7 +337,40 @@ inval:
static ssize_t afs_proc_rootcell_read(struct file *file, char __user *buf,
size_t size, loff_t *_pos)
{
- return 0;
+ struct afs_cell *cell;
+ struct afs_net *net = afs_proc2net(file);
+ unsigned int seq = 0;
+ char name[AFS_MAXCELLNAME + 1];
+ int len;
+
+ if (*_pos > 0)
+ return 0;
+ if (!net->ws_cell)
+ return 0;
+
+ rcu_read_lock();
+ do {
+ read_seqbegin_or_lock(&net->cells_lock, &seq);
+ len = 0;
+ cell = rcu_dereference_raw(net->ws_cell);
+ if (cell) {
+ len = cell->name_len;
+ memcpy(name, cell->name, len);
+ }
+ } while (need_seqretry(&net->cells_lock, seq));
+ done_seqretry(&net->cells_lock, seq);
+ rcu_read_unlock();
+
+ if (!len)
+ return 0;
+
+ name[len++] = '\n';
+ if (len > size)
+ len = size;
+ if (copy_to_user(buf, name, len) != 0)
+ return -EFAULT;
+ *_pos = 1;
+ return len;
}
/*
@@ -327,6 +393,12 @@ static ssize_t afs_proc_rootcell_write(struct file *file,
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
+ ret = -EINVAL;
+ if (kbuf[0] == '.')
+ goto out;
+ if (memchr(kbuf, '/', size))
+ goto out;
+
/* trim to first NL */
s = memchr(kbuf, '\n', size);
if (s)
@@ -339,6 +411,7 @@ static ssize_t afs_proc_rootcell_write(struct file *file,
if (ret >= 0)
ret = size; /* consume everything, always */
+out:
kfree(kbuf);
_leave(" = %d", ret);
return ret;
@@ -413,6 +486,7 @@ static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file)
* first item
*/
static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
+ __acquires(cell->proc_lock)
{
struct afs_cell *cell = m->private;
@@ -438,6 +512,7 @@ static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v,
* clean up after reading from the cells list
*/
static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v)
+ __releases(cell->proc_lock)
{
struct afs_cell *cell = p->private;
@@ -500,6 +575,7 @@ static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file)
* first item
*/
static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
+ __acquires(rcu)
{
struct afs_addr_list *alist;
struct afs_cell *cell = m->private;
@@ -544,6 +620,7 @@ static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v,
* clean up after reading from the cells list
*/
static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v)
+ __releases(rcu)
{
rcu_read_unlock();
}
@@ -580,6 +657,7 @@ static int afs_proc_servers_open(struct inode *inode, struct file *file)
* first item.
*/
static void *afs_proc_servers_start(struct seq_file *m, loff_t *_pos)
+ __acquires(rcu)
{
struct afs_net *net = afs_seq2net(m);
@@ -601,6 +679,7 @@ static void *afs_proc_servers_next(struct seq_file *m, void *v, loff_t *_pos)
* clean up after reading from the cells list
*/
static void afs_proc_servers_stop(struct seq_file *p, void *v)
+ __releases(rcu)
{
rcu_read_unlock();
}
@@ -626,3 +705,244 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
&alist->addrs[alist->index].transport);
return 0;
}
+
+void afs_put_sysnames(struct afs_sysnames *sysnames)
+{
+ int i;
+
+ if (sysnames && refcount_dec_and_test(&sysnames->usage)) {
+ for (i = 0; i < sysnames->nr; i++)
+ if (sysnames->subs[i] != afs_init_sysname &&
+ sysnames->subs[i] != sysnames->blank)
+ kfree(sysnames->subs[i]);
+ }
+}
+
+/*
+ * Handle opening of /proc/fs/afs/sysname. If it is opened for writing, we
+ * assume the caller wants to change the substitution list and we allocate a
+ * buffer to hold the list.
+ */
+static int afs_proc_sysname_open(struct inode *inode, struct file *file)
+{
+ struct afs_sysnames *sysnames;
+ struct seq_file *m;
+ int ret;
+
+ ret = seq_open(file, &afs_proc_sysname_ops);
+ if (ret < 0)
+ return ret;
+
+ if (file->f_mode & FMODE_WRITE) {
+ sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL);
+ if (!sysnames) {
+ seq_release(inode, file);
+ return -ENOMEM;
+ }
+
+ refcount_set(&sysnames->usage, 1);
+ m = file->private_data;
+ m->private = sysnames;
+ }
+
+ return 0;
+}
+
+/*
+ * Handle writes to /proc/fs/afs/sysname to set the @sys substitution.
+ */
+static ssize_t afs_proc_sysname_write(struct file *file,
+ const char __user *buf,
+ size_t size, loff_t *_pos)
+{
+ struct afs_sysnames *sysnames;
+ struct seq_file *m = file->private_data;
+ char *kbuf = NULL, *s, *p, *sub;
+ int ret, len;
+
+ sysnames = m->private;
+ if (!sysnames)
+ return -EINVAL;
+ if (sysnames->error)
+ return sysnames->error;
+
+ if (size >= PAGE_SIZE - 1) {
+ sysnames->error = -EINVAL;
+ return -EINVAL;
+ }
+ if (size == 0)
+ return 0;
+
+ kbuf = memdup_user_nul(buf, size);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+
+ inode_lock(file_inode(file));
+
+ p = kbuf;
+ while ((s = strsep(&p, " \t\n"))) {
+ len = strlen(s);
+ if (len == 0)
+ continue;
+ ret = -ENAMETOOLONG;
+ if (len >= AFSNAMEMAX)
+ goto error;
+
+ if (len >= 4 &&
+ s[len - 4] == '@' &&
+ s[len - 3] == 's' &&
+ s[len - 2] == 'y' &&
+ s[len - 1] == 's')
+ /* Protect against recursion */
+ goto invalid;
+
+ if (s[0] == '.' &&
+ (len < 2 || (len == 2 && s[1] == '.')))
+ goto invalid;
+
+ if (memchr(s, '/', len))
+ goto invalid;
+
+ ret = -EFBIG;
+ if (sysnames->nr >= AFS_NR_SYSNAME)
+ goto out;
+
+ if (strcmp(s, afs_init_sysname) == 0) {
+ sub = (char *)afs_init_sysname;
+ } else {
+ ret = -ENOMEM;
+ sub = kmemdup(s, len + 1, GFP_KERNEL);
+ if (!sub)
+ goto out;
+ }
+
+ sysnames->subs[sysnames->nr] = sub;
+ sysnames->nr++;
+ }
+
+ ret = size; /* consume everything, always */
+out:
+ inode_unlock(file_inode(file));
+ kfree(kbuf);
+ return ret;
+
+invalid:
+ ret = -EINVAL;
+error:
+ sysnames->error = ret;
+ goto out;
+}
+
+static int afs_proc_sysname_release(struct inode *inode, struct file *file)
+{
+ struct afs_sysnames *sysnames, *kill = NULL;
+ struct seq_file *m = file->private_data;
+ struct afs_net *net = afs_seq2net(m);
+
+ sysnames = m->private;
+ if (sysnames) {
+ if (!sysnames->error) {
+ kill = sysnames;
+ if (sysnames->nr == 0) {
+ sysnames->subs[0] = sysnames->blank;
+ sysnames->nr++;
+ }
+ write_lock(&net->sysnames_lock);
+ kill = net->sysnames;
+ net->sysnames = sysnames;
+ write_unlock(&net->sysnames_lock);
+ }
+ afs_put_sysnames(kill);
+ }
+
+ return seq_release(inode, file);
+}
+
+static void *afs_proc_sysname_start(struct seq_file *m, loff_t *pos)
+ __acquires(&net->sysnames_lock)
+{
+ struct afs_net *net = afs_seq2net(m);
+ struct afs_sysnames *names = net->sysnames;
+
+ read_lock(&net->sysnames_lock);
+
+ if (*pos >= names->nr)
+ return NULL;
+ return (void *)(unsigned long)(*pos + 1);
+}
+
+static void *afs_proc_sysname_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct afs_net *net = afs_seq2net(m);
+ struct afs_sysnames *names = net->sysnames;
+
+ *pos += 1;
+ if (*pos >= names->nr)
+ return NULL;
+ return (void *)(unsigned long)(*pos + 1);
+}
+
+static void afs_proc_sysname_stop(struct seq_file *m, void *v)
+ __releases(&net->sysnames_lock)
+{
+ struct afs_net *net = afs_seq2net(m);
+
+ read_unlock(&net->sysnames_lock);
+}
+
+static int afs_proc_sysname_show(struct seq_file *m, void *v)
+{
+ struct afs_net *net = afs_seq2net(m);
+ struct afs_sysnames *sysnames = net->sysnames;
+ unsigned int i = (unsigned long)v - 1;
+
+ if (i < sysnames->nr)
+ seq_printf(m, "%s\n", sysnames->subs[i]);
+ return 0;
+}
+
+/*
+ * Display general per-net namespace statistics
+ */
+static int afs_proc_stats_show(struct seq_file *m, void *v)
+{
+ struct afs_net *net = afs_seq2net(m);
+
+ seq_puts(m, "kAFS statistics\n");
+
+ seq_printf(m, "dir-mgmt: look=%u reval=%u inval=%u relpg=%u\n",
+ atomic_read(&net->n_lookup),
+ atomic_read(&net->n_reval),
+ atomic_read(&net->n_inval),
+ atomic_read(&net->n_relpg));
+
+ seq_printf(m, "dir-data: rdpg=%u\n",
+ atomic_read(&net->n_read_dir));
+
+ seq_printf(m, "dir-edit: cr=%u rm=%u\n",
+ atomic_read(&net->n_dir_cr),
+ atomic_read(&net->n_dir_rm));
+
+ seq_printf(m, "file-rd : n=%u nb=%lu\n",
+ atomic_read(&net->n_fetches),
+ atomic_long_read(&net->n_fetch_bytes));
+ seq_printf(m, "file-wr : n=%u nb=%lu\n",
+ atomic_read(&net->n_stores),
+ atomic_long_read(&net->n_store_bytes));
+ return 0;
+}
+
+/*
+ * Open "/proc/fs/afs/stats" to allow reading of the stat counters.
+ */
+static int afs_proc_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, afs_proc_stats_show, NULL);
+}
+
+static const struct file_operations afs_proc_stats_fops = {
+ .open = afs_proc_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index ad1328d85526..ac0feac9d746 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -21,7 +21,7 @@
/*
* Initialise a filesystem server cursor for iterating over FS servers.
*/
-void afs_init_fs_cursor(struct afs_fs_cursor *fc, struct afs_vnode *vnode)
+static void afs_init_fs_cursor(struct afs_fs_cursor *fc, struct afs_vnode *vnode)
{
memset(fc, 0, sizeof(*fc));
}
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index f7ae54b6a393..5c6263972ec9 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -926,3 +926,12 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
afs_set_call_complete(call, ret, remote_abort);
return ret;
}
+
+/*
+ * Log protocol error production.
+ */
+noinline int afs_protocol_error(struct afs_call *call, int error)
+{
+ trace_afs_protocol_error(call, error, __builtin_return_address(0));
+ return error;
+}
diff --git a/fs/afs/security.c b/fs/afs/security.c
index b88b7d45fdaa..cea2fff313dc 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -178,18 +178,14 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
}
}
- if (cb_break != (vnode->cb_break + vnode->cb_interest->server->cb_s_break)) {
- rcu_read_unlock();
+ if (cb_break != (vnode->cb_break + vnode->cb_interest->server->cb_s_break))
goto someone_else_changed_it;
- }
/* We need a ref on any permits list we want to copy as we'll have to
* drop the lock to do memory allocation.
*/
- if (permits && !refcount_inc_not_zero(&permits->usage)) {
- rcu_read_unlock();
+ if (permits && !refcount_inc_not_zero(&permits->usage))
goto someone_else_changed_it;
- }
rcu_read_unlock();
@@ -278,6 +274,7 @@ someone_else_changed_it:
/* Someone else changed the cache under us - don't recheck at this
* time.
*/
+ rcu_read_unlock();
return;
}
@@ -296,8 +293,6 @@ int afs_check_permit(struct afs_vnode *vnode, struct key *key,
_enter("{%x:%u},%x",
vnode->fid.vid, vnode->fid.vnode, key_serial(key));
- permits = vnode->permit_cache;
-
/* check the permits to see if we've got one yet */
if (key == vnode->volume->cell->anonymous_key) {
_debug("anon");
@@ -327,7 +322,7 @@ int afs_check_permit(struct afs_vnode *vnode, struct key *key,
*/
_debug("no valid permit");
- ret = afs_fetch_status(vnode, key);
+ ret = afs_fetch_status(vnode, key, false);
if (ret < 0) {
*_access = 0;
_leave(" = %d", ret);
diff --git a/fs/afs/server.c b/fs/afs/server.c
index a43ef77dabae..e23be63998a8 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -59,7 +59,8 @@ struct afs_server *afs_find_server(struct afs_net *net,
alist = rcu_dereference(server->addresses);
for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
b = &alist->addrs[i].transport.sin6;
- diff = (u16)a->sin6_port - (u16)b->sin6_port;
+ diff = ((u16 __force)a->sin6_port -
+ (u16 __force)b->sin6_port);
if (diff == 0)
diff = memcmp(&a->sin6_addr,
&b->sin6_addr,
@@ -79,10 +80,11 @@ struct afs_server *afs_find_server(struct afs_net *net,
alist = rcu_dereference(server->addresses);
for (i = 0; i < alist->nr_ipv4; i++) {
b = &alist->addrs[i].transport.sin6;
- diff = (u16)a->sin6_port - (u16)b->sin6_port;
+ diff = ((u16 __force)a->sin6_port -
+ (u16 __force)b->sin6_port);
if (diff == 0)
- diff = ((u32)a->sin6_addr.s6_addr32[3] -
- (u32)b->sin6_addr.s6_addr32[3]);
+ diff = ((u32 __force)a->sin6_addr.s6_addr32[3] -
+ (u32 __force)b->sin6_addr.s6_addr32[3]);
if (diff == 0)
goto found;
if (diff < 0) {
@@ -381,7 +383,7 @@ static void afs_server_rcu(struct rcu_head *rcu)
{
struct afs_server *server = container_of(rcu, struct afs_server, rcu);
- afs_put_addrlist(server->addresses);
+ afs_put_addrlist(rcu_access_pointer(server->addresses));
kfree(server);
}
@@ -390,7 +392,7 @@ static void afs_server_rcu(struct rcu_head *rcu)
*/
static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
{
- struct afs_addr_list *alist = server->addresses;
+ struct afs_addr_list *alist = rcu_access_pointer(server->addresses);
struct afs_addr_cursor ac = {
.alist = alist,
.addr = &alist->addrs[0],
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 3623c952b6ff..65081ec3c36e 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -154,7 +154,7 @@ static int afs_show_devname(struct seq_file *m, struct dentry *root)
seq_puts(m, "none");
return 0;
}
-
+
switch (volume->type) {
case AFSVL_RWVOL:
break;
@@ -269,7 +269,7 @@ static int afs_parse_device_name(struct afs_mount_params *params,
int cellnamesz;
_enter(",%s", name);
-
+
if (!name) {
printk(KERN_ERR "kAFS: no volume name specified\n");
return -EINVAL;
@@ -418,7 +418,10 @@ static int afs_fill_super(struct super_block *sb,
if (!sb->s_root)
goto error;
- sb->s_d_op = &afs_fs_dentry_operations;
+ if (params->dyn_root)
+ sb->s_d_op = &afs_dynroot_dentry_operations;
+ else
+ sb->s_d_op = &afs_fs_dentry_operations;
_leave(" = 0");
return 0;
@@ -676,7 +679,7 @@ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bfree = 0;
return 0;
}
-
+
key = afs_request_key(vnode->volume->cell);
if (IS_ERR(key))
return PTR_ERR(key);
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index 5d8562f1ad4a..1ed7e2fd2f35 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -303,7 +303,7 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *net,
r->uuid.clock_seq_hi_and_reserved = htonl(u->clock_seq_hi_and_reserved);
r->uuid.clock_seq_low = htonl(u->clock_seq_low);
for (i = 0; i < 6; i++)
- r->uuid.node[i] = ntohl(u->node[i]);
+ r->uuid.node[i] = htonl(u->node[i]);
trace_afs_make_vl_call(call);
return (struct afs_addr_list *)afs_make_call(ac, call, GFP_KERNEL, false);
@@ -450,7 +450,7 @@ again:
call->count2 = ntohl(*bp); /* Type or next count */
if (call->count > YFS_MAXENDPOINTS)
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
alist = afs_alloc_addrlist(call->count, FS_SERVICE, AFS_FS_PORT);
if (!alist)
@@ -474,7 +474,7 @@ again:
size = sizeof(__be32) * (1 + 4 + 1);
break;
default:
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
}
size += sizeof(__be32);
@@ -487,24 +487,24 @@ again:
switch (call->count2) {
case YFS_ENDPOINT_IPV4:
if (ntohl(bp[0]) != sizeof(__be32) * 2)
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
afs_merge_fs_addr4(alist, bp[1], ntohl(bp[2]));
bp += 3;
break;
case YFS_ENDPOINT_IPV6:
if (ntohl(bp[0]) != sizeof(__be32) * 5)
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
afs_merge_fs_addr6(alist, bp + 1, ntohl(bp[5]));
bp += 6;
break;
default:
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
}
/* Got either the type of the next entry or the count of
* volEndpoints if no more fsEndpoints.
*/
- call->count2 = htonl(*bp++);
+ call->count2 = ntohl(*bp++);
call->offset = 0;
call->count--;
@@ -517,7 +517,7 @@ again:
if (!call->count)
goto end;
if (call->count > YFS_MAXENDPOINTS)
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
call->unmarshall = 3;
@@ -531,7 +531,7 @@ again:
return ret;
bp = call->buffer;
- call->count2 = htonl(*bp++);
+ call->count2 = ntohl(*bp++);
call->offset = 0;
call->unmarshall = 4;
@@ -545,7 +545,7 @@ again:
size = sizeof(__be32) * (1 + 4 + 1);
break;
default:
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
}
if (call->count > 1)
@@ -558,16 +558,16 @@ again:
switch (call->count2) {
case YFS_ENDPOINT_IPV4:
if (ntohl(bp[0]) != sizeof(__be32) * 2)
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
bp += 3;
break;
case YFS_ENDPOINT_IPV6:
if (ntohl(bp[0]) != sizeof(__be32) * 5)
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
bp += 6;
break;
default:
- return -EBADMSG;
+ return afs_protocol_error(call, -EBADMSG);
}
/* Got either the type of the next entry or the count of
@@ -576,7 +576,7 @@ again:
call->offset = 0;
call->count--;
if (call->count > 0) {
- call->count2 = htonl(*bp++);
+ call->count2 = ntohl(*bp++);
goto again;
}
diff --git a/fs/afs/write.c b/fs/afs/write.c
index dbc3c0b0142d..c164698dc304 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -42,10 +42,11 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
if (!req)
return -ENOMEM;
- atomic_set(&req->usage, 1);
+ refcount_set(&req->usage, 1);
req->pos = pos;
req->len = len;
req->nr_pages = 1;
+ req->pages = req->array;
req->pages[0] = page;
get_page(page);
@@ -124,7 +125,12 @@ try_again:
page->index, priv);
goto flush_conflicting_write;
}
- if (to < f || from > t)
+ /* If the file is being filled locally, allow inter-write
+ * spaces to be merged into writes. If it's not, only write
+ * back what the user gives us.
+ */
+ if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
+ (to < f || from > t))
goto flush_conflicting_write;
if (from < f)
f = from;
@@ -355,6 +361,12 @@ found_key:
}
switch (ret) {
+ case 0:
+ afs_stat_v(vnode, n_stores);
+ atomic_long_add((last * PAGE_SIZE + to) -
+ (first * PAGE_SIZE + offset),
+ &afs_v2net(vnode)->n_store_bytes);
+ break;
case -EACCES:
case -EPERM:
case -ENOKEY:
@@ -412,7 +424,8 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
primary_page->index, priv);
- if (start >= final_page || to < PAGE_SIZE)
+ if (start >= final_page ||
+ (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
goto no_more;
start++;
@@ -433,9 +446,10 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
}
for (loop = 0; loop < n; loop++) {
- if (to != PAGE_SIZE)
- break;
page = pages[loop];
+ if (to != PAGE_SIZE &&
+ !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
+ break;
if (page->index > final_page)
break;
if (!trylock_page(page))
@@ -448,7 +462,8 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
priv = page_private(page);
f = priv & AFS_PRIV_MAX;
t = priv >> AFS_PRIV_SHIFT;
- if (f != 0) {
+ if (f != 0 &&
+ !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
unlock_page(page);
break;
}
@@ -735,20 +750,6 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
/*
- * Flush out all outstanding writes on a file opened for writing when it is
- * closed.
- */
-int afs_flush(struct file *file, fl_owner_t id)
-{
- _enter("");
-
- if ((file->f_mode & FMODE_WRITE) == 0)
- return 0;
-
- return vfs_fsync(file, 0);
-}
-
-/*
* notification that a previously read-only page is about to become writable
* - if it returns an error, the caller will deliver a bus error signal
*/
diff --git a/fs/afs/xdr_fs.h b/fs/afs/xdr_fs.h
new file mode 100644
index 000000000000..aa21f3068d52
--- /dev/null
+++ b/fs/afs/xdr_fs.h
@@ -0,0 +1,103 @@
+/* AFS fileserver XDR types
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef XDR_FS_H
+#define XDR_FS_H
+
+struct afs_xdr_AFSFetchStatus {
+ __be32 if_version;
+#define AFS_FSTATUS_VERSION 1
+ __be32 type;
+ __be32 nlink;
+ __be32 size_lo;
+ __be32 data_version_lo;
+ __be32 author;
+ __be32 owner;
+ __be32 caller_access;
+ __be32 anon_access;
+ __be32 mode;
+ __be32 parent_vnode;
+ __be32 parent_unique;
+ __be32 seg_size;
+ __be32 mtime_client;
+ __be32 mtime_server;
+ __be32 group;
+ __be32 sync_counter;
+ __be32 data_version_hi;
+ __be32 lock_count;
+ __be32 size_hi;
+ __be32 abort_code;
+} __packed;
+
+#define AFS_DIR_HASHTBL_SIZE 128
+#define AFS_DIR_DIRENT_SIZE 32
+#define AFS_DIR_SLOTS_PER_BLOCK 64
+#define AFS_DIR_BLOCK_SIZE 2048
+#define AFS_DIR_BLOCKS_PER_PAGE (PAGE_SIZE / AFS_DIR_BLOCK_SIZE)
+#define AFS_DIR_MAX_SLOTS 65536
+#define AFS_DIR_BLOCKS_WITH_CTR 128
+#define AFS_DIR_MAX_BLOCKS 1023
+#define AFS_DIR_RESV_BLOCKS 1
+#define AFS_DIR_RESV_BLOCKS0 13
+
+/*
+ * Directory entry structure.
+ */
+union afs_xdr_dirent {
+ struct {
+ u8 valid;
+ u8 unused[1];
+ __be16 hash_next;
+ __be32 vnode;
+ __be32 unique;
+ u8 name[16];
+ u8 overflow[4]; /* if any char of the name (inc
+ * NUL) reaches here, consume
+ * the next dirent too */
+ } u;
+ u8 extended_name[32];
+} __packed;
+
+/*
+ * Directory block header (one at the beginning of every 2048-byte block).
+ */
+struct afs_xdr_dir_hdr {
+ __be16 npages;
+ __be16 magic;
+#define AFS_DIR_MAGIC htons(1234)
+ u8 reserved;
+ u8 bitmap[8];
+ u8 pad[19];
+} __packed;
+
+/*
+ * Directory block layout
+ */
+union afs_xdr_dir_block {
+ struct afs_xdr_dir_hdr hdr;
+
+ struct {
+ struct afs_xdr_dir_hdr hdr;
+ u8 alloc_ctrs[AFS_DIR_MAX_BLOCKS];
+ __be16 hashtable[AFS_DIR_HASHTBL_SIZE];
+ } meta;
+
+ union afs_xdr_dirent dirents[AFS_DIR_SLOTS_PER_BLOCK];
+} __packed;
+
+/*
+ * Directory layout on a linux VM page.
+ */
+struct afs_xdr_dir_page {
+ union afs_xdr_dir_block blocks[AFS_DIR_BLOCKS_PER_PAGE];
+};
+
+#endif /* XDR_FS_H */
diff --git a/fs/buffer.c b/fs/buffer.c
index f3491074b035..249b83fafe48 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -494,35 +494,12 @@ repeat:
return err;
}
-static void do_thaw_one(struct super_block *sb, void *unused)
+void emergency_thaw_bdev(struct super_block *sb)
{
while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
}
-static void do_thaw_all(struct work_struct *work)
-{
- iterate_supers(do_thaw_one, NULL);
- kfree(work);
- printk(KERN_WARNING "Emergency Thaw complete\n");
-}
-
-/**
- * emergency_thaw_all -- forcibly thaw every frozen filesystem
- *
- * Used for emergency unfreeze of all filesystems via SysRq
- */
-void emergency_thaw_all(void)
-{
- struct work_struct *work;
-
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK(work, do_thaw_all);
- schedule_work(work);
- }
-}
-
/**
* sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
* @mapping: the mapping which wants those buffers written
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 329a5d103846..645158dc33f1 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -435,6 +435,15 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
if (IS_ERR_OR_NULL(result))
return ERR_PTR(-ESTALE);
+ /*
+ * If no acceptance criteria was specified by caller, a disconnected
+ * dentry is also accepatable. Callers may use this mode to query if
+ * file handle is stale or to get a reference to an inode without
+ * risking the high overhead caused by directory reconnect.
+ */
+ if (!acceptable)
+ return result;
+
if (d_is_dir(result)) {
/*
* This request is for a directory.
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 685c305cbeb6..278ed0869c3c 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1744,7 +1744,7 @@ do_grow_qunlock:
* @newsize: the size to make the file
*
* The file size can grow, shrink, or stay the same size. This
- * is called holding i_mutex and an exclusive glock on the inode
+ * is called holding i_rwsem and an exclusive glock on the inode
* in question.
*
* Returns: errno
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 82fb5583445c..097bd3c0f270 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1923,28 +1923,37 @@ void gfs2_glock_exit(void)
static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
{
- if (n == 0)
- gi->gl = rhashtable_walk_peek(&gi->hti);
- else {
- gi->gl = rhashtable_walk_next(&gi->hti);
- n--;
+ struct gfs2_glock *gl = gi->gl;
+
+ if (gl) {
+ if (n == 0)
+ return;
+ if (!lockref_put_not_zero(&gl->gl_lockref))
+ gfs2_glock_queue_put(gl);
}
for (;;) {
- if (IS_ERR_OR_NULL(gi->gl)) {
- if (!gi->gl)
- return;
- if (PTR_ERR(gi->gl) != -EAGAIN) {
- gi->gl = NULL;
- return;
+ gl = rhashtable_walk_next(&gi->hti);
+ if (IS_ERR_OR_NULL(gl)) {
+ if (gl == ERR_PTR(-EAGAIN)) {
+ n = 1;
+ continue;
}
- n = 0;
- } else if (gi->sdp == gi->gl->gl_name.ln_sbd &&
- !__lockref_is_dead(&gi->gl->gl_lockref)) {
- if (!n--)
- break;
+ gl = NULL;
+ break;
+ }
+ if (gl->gl_name.ln_sbd != gi->sdp)
+ continue;
+ if (n <= 1) {
+ if (!lockref_get_not_dead(&gl->gl_lockref))
+ continue;
+ break;
+ } else {
+ if (__lockref_is_dead(&gl->gl_lockref))
+ continue;
+ n--;
}
- gi->gl = rhashtable_walk_next(&gi->hti);
}
+ gi->gl = gl;
}
static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
@@ -1988,7 +1997,6 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
{
struct gfs2_glock_iter *gi = seq->private;
- gi->gl = NULL;
rhashtable_walk_stop(&gi->hti);
}
@@ -2076,7 +2084,8 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data;
struct gfs2_glock_iter *gi = seq->private;
- gi->gl = NULL;
+ if (gi->gl)
+ gfs2_glock_put(gi->gl);
rhashtable_walk_exit(&gi->hti);
return seq_release_private(inode, file);
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index e6a0a8a89ea7..3ba3f167641c 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -825,7 +825,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
goto fail_rindex;
}
/*
- * i_mutex on quota files is special. Since this inode is hidden system
+ * i_rwsem on quota files is special. Since this inode is hidden system
* file, we are safe to define locking ourselves.
*/
lockdep_set_class(&sdp->sd_quota_inode->i_rwsem,
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 123c069429a7..a813979b5be0 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -535,35 +535,10 @@ static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char
return 0;
}
-#define CB_SUPPORTED_ATTR0 (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE)
-#define CB_SUPPORTED_ATTR1 (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY)
-static __be32 encode_attr_bitmap(struct xdr_stream *xdr, const uint32_t *bitmap, __be32 **savep)
+static __be32 encode_attr_bitmap(struct xdr_stream *xdr, const uint32_t *bitmap, size_t sz)
{
- __be32 bm[2];
- __be32 *p;
-
- bm[0] = htonl(bitmap[0] & CB_SUPPORTED_ATTR0);
- bm[1] = htonl(bitmap[1] & CB_SUPPORTED_ATTR1);
- if (bm[1] != 0) {
- p = xdr_reserve_space(xdr, 16);
- if (unlikely(p == NULL))
- return htonl(NFS4ERR_RESOURCE);
- *p++ = htonl(2);
- *p++ = bm[0];
- *p++ = bm[1];
- } else if (bm[0] != 0) {
- p = xdr_reserve_space(xdr, 12);
- if (unlikely(p == NULL))
- return htonl(NFS4ERR_RESOURCE);
- *p++ = htonl(1);
- *p++ = bm[0];
- } else {
- p = xdr_reserve_space(xdr, 8);
- if (unlikely(p == NULL))
- return htonl(NFS4ERR_RESOURCE);
- *p++ = htonl(0);
- }
- *savep = p;
+ if (xdr_stream_encode_uint32_array(xdr, bitmap, sz) < 0)
+ return cpu_to_be32(NFS4ERR_RESOURCE);
return 0;
}
@@ -656,9 +631,13 @@ static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr,
if (unlikely(status != 0))
goto out;
- status = encode_attr_bitmap(xdr, res->bitmap, &savep);
+ status = encode_attr_bitmap(xdr, res->bitmap, ARRAY_SIZE(res->bitmap));
if (unlikely(status != 0))
goto out;
+ status = cpu_to_be32(NFS4ERR_RESOURCE);
+ savep = xdr_reserve_space(xdr, sizeof(*savep));
+ if (unlikely(!savep))
+ goto out;
status = encode_attr_change(xdr, res->bitmap, res->change_attr);
if (unlikely(status != 0))
goto out;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index d8b47624fee2..1819d0d0ba4b 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -19,6 +19,7 @@
#include <linux/nfs_xdr.h>
#include "nfs4_fs.h"
+#include "nfs4session.h"
#include "delegation.h"
#include "internal.h"
#include "nfs4trace.h"
@@ -171,11 +172,15 @@ again:
* nfs_inode_reclaim_delegation - process a delegation reclaim request
* @inode: inode to process
* @cred: credential to use for request
- * @res: new delegation state from server
+ * @type: delegation type
+ * @stateid: delegation stateid
+ * @pagemod_limit: write delegation "space_limit"
*
*/
void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
- struct nfs_openres *res)
+ fmode_t type,
+ const nfs4_stateid *stateid,
+ unsigned long pagemod_limit)
{
struct nfs_delegation *delegation;
struct rpc_cred *oldcred = NULL;
@@ -185,9 +190,9 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
if (delegation != NULL) {
spin_lock(&delegation->lock);
if (delegation->inode != NULL) {
- nfs4_stateid_copy(&delegation->stateid, &res->delegation);
- delegation->type = res->delegation_type;
- delegation->pagemod_limit = res->pagemod_limit;
+ nfs4_stateid_copy(&delegation->stateid, stateid);
+ delegation->type = type;
+ delegation->pagemod_limit = pagemod_limit;
oldcred = delegation->cred;
delegation->cred = get_rpccred(cred);
clear_bit(NFS_DELEGATION_NEED_RECLAIM,
@@ -195,14 +200,14 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
spin_unlock(&delegation->lock);
rcu_read_unlock();
put_rpccred(oldcred);
- trace_nfs4_reclaim_delegation(inode, res->delegation_type);
+ trace_nfs4_reclaim_delegation(inode, type);
return;
}
/* We appear to have raced with a delegation return. */
spin_unlock(&delegation->lock);
}
rcu_read_unlock();
- nfs_inode_set_delegation(inode, cred, res);
+ nfs_inode_set_delegation(inode, cred, type, stateid, pagemod_limit);
}
static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
@@ -329,11 +334,16 @@ nfs_update_inplace_delegation(struct nfs_delegation *delegation,
* nfs_inode_set_delegation - set up a delegation on an inode
* @inode: inode to which delegation applies
* @cred: cred to use for subsequent delegation processing
- * @res: new delegation state from server
+ * @type: delegation type
+ * @stateid: delegation stateid
+ * @pagemod_limit: write delegation "space_limit"
*
* Returns zero on success, or a negative errno value.
*/
-int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
+int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred,
+ fmode_t type,
+ const nfs4_stateid *stateid,
+ unsigned long pagemod_limit)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_client *clp = server->nfs_client;
@@ -345,9 +355,9 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
if (delegation == NULL)
return -ENOMEM;
- nfs4_stateid_copy(&delegation->stateid, &res->delegation);
- delegation->type = res->delegation_type;
- delegation->pagemod_limit = res->pagemod_limit;
+ nfs4_stateid_copy(&delegation->stateid, stateid);
+ delegation->type = type;
+ delegation->pagemod_limit = pagemod_limit;
delegation->change_attr = inode_peek_iversion_raw(inode);
delegation->cred = get_rpccred(cred);
delegation->inode = inode;
@@ -392,7 +402,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
rcu_assign_pointer(nfsi->delegation, delegation);
delegation = NULL;
- trace_nfs4_set_delegation(inode, res->delegation_type);
+ trace_nfs4_set_delegation(inode, type);
out:
spin_unlock(&clp->cl_lock);
@@ -547,6 +557,22 @@ int nfs4_inode_return_delegation(struct inode *inode)
return err;
}
+/**
+ * nfs4_inode_make_writeable
+ * @inode: pointer to inode
+ *
+ * Make the inode writeable by returning the delegation if necessary
+ *
+ * Returns zero on success, or a negative errno value.
+ */
+int nfs4_inode_make_writeable(struct inode *inode)
+{
+ if (!nfs4_has_session(NFS_SERVER(inode)->nfs_client) ||
+ !nfs4_check_delegation(inode, FMODE_WRITE))
+ return nfs4_inode_return_delegation(inode);
+ return 0;
+}
+
static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
struct nfs_delegation *delegation)
{
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 185a09f37a89..bb1ef8c37af4 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -36,8 +36,10 @@ enum {
NFS_DELEGATION_TEST_EXPIRED,
};
-int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
-void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
+int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred,
+ fmode_t type, const nfs4_stateid *stateid, unsigned long pagemod_limit);
+void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
+ fmode_t type, const nfs4_stateid *stateid, unsigned long pagemod_limit);
int nfs4_inode_return_delegation(struct inode *inode);
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
void nfs_inode_return_delegation_noreclaim(struct inode *inode);
@@ -70,6 +72,7 @@ int nfs4_check_delegation(struct inode *inode, fmode_t flags);
bool nfs4_delegation_flush_on_close(const struct inode *inode);
void nfs_inode_find_delegation_state_and_recover(struct inode *inode,
const nfs4_stateid *stateid);
+int nfs4_inode_make_writeable(struct inode *inode);
#endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 2f3f86726f5b..73f8b43d988c 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1272,7 +1272,9 @@ static void nfs_drop_nlink(struct inode *inode)
/* drop the inode if we're reasonably sure this is the last link */
if (inode->i_nlink == 1)
clear_nlink(inode);
- NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
+ | NFS_INO_INVALID_CTIME
+ | NFS_INO_INVALID_OTHER;
spin_unlock(&inode->i_lock);
}
@@ -1798,12 +1800,11 @@ static int nfs_safe_remove(struct dentry *dentry)
trace_nfs_remove_enter(dir, dentry);
if (inode != NULL) {
- NFS_PROTO(inode)->return_delegation(inode);
- error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
+ error = NFS_PROTO(dir)->remove(dir, dentry);
if (error == 0)
nfs_drop_nlink(inode);
} else
- error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
+ error = NFS_PROTO(dir)->remove(dir, dentry);
if (error == -ENOENT)
nfs_dentry_handle_enoent(dentry);
trace_nfs_remove_exit(dir, dentry, error);
@@ -1932,8 +1933,6 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
old_dentry, dentry);
trace_nfs_link_enter(inode, dir, dentry);
- NFS_PROTO(inode)->return_delegation(inode);
-
d_drop(dentry);
error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
if (error == 0) {
@@ -2023,10 +2022,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
- NFS_PROTO(old_inode)->return_delegation(old_inode);
- if (new_inode != NULL)
- NFS_PROTO(new_inode)->return_delegation(new_inode);
-
task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
if (IS_ERR(task)) {
error = PTR_ERR(task);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index d17a90c4fa37..bd15d0b57626 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -195,7 +195,10 @@ bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags)
static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
{
struct nfs_inode *nfsi = NFS_I(inode);
+ bool have_delegation = nfs_have_delegated_attributes(inode);
+ if (have_delegation)
+ flags &= ~(NFS_INO_INVALID_CHANGE|NFS_INO_REVAL_PAGECACHE);
if (inode->i_mapping->nrpages == 0)
flags &= ~NFS_INO_INVALID_DATA;
nfsi->cache_validity |= flags;
@@ -447,7 +450,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
inode->i_mode = fattr->mode;
if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
&& nfs_server_capable(inode, NFS_CAP_MODE))
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
/* Why so? Because we want revalidate for devices/FIFOs, and
* that's precisely what we have in nfs_file_inode_operations.
*/
@@ -493,37 +496,35 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
inode->i_atime = fattr->atime;
else if (nfs_server_capable(inode, NFS_CAP_ATIME))
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
inode->i_mtime = fattr->mtime;
else if (nfs_server_capable(inode, NFS_CAP_MTIME))
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
inode->i_ctime = fattr->ctime;
else if (nfs_server_capable(inode, NFS_CAP_CTIME))
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_CTIME);
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
inode_set_iversion_raw(inode, fattr->change_attr);
else
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
- | NFS_INO_REVAL_PAGECACHE);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE);
if (fattr->valid & NFS_ATTR_FATTR_SIZE)
inode->i_size = nfs_size_to_loff_t(fattr->size);
else
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
- | NFS_INO_REVAL_PAGECACHE);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_SIZE);
if (fattr->valid & NFS_ATTR_FATTR_NLINK)
set_nlink(inode, fattr->nlink);
else if (nfs_server_capable(inode, NFS_CAP_NLINK))
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
if (fattr->valid & NFS_ATTR_FATTR_OWNER)
inode->i_uid = fattr->uid;
else if (nfs_server_capable(inode, NFS_CAP_OWNER))
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
if (fattr->valid & NFS_ATTR_FATTR_GROUP)
inode->i_gid = fattr->gid;
else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -608,11 +609,6 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
goto out;
}
- /*
- * Return any delegations if we're going to change ACLs
- */
- if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
- NFS_PROTO(inode)->return_delegation(inode);
error = NFS_PROTO(inode)->setattr(dentry, fattr, attr);
if (error == 0)
error = nfs_refresh_inode(inode, fattr);
@@ -645,6 +641,7 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset)
/* Optimisation */
if (offset == 0)
NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
+ NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
spin_unlock(&inode->i_lock);
truncate_pagecache(inode, offset);
@@ -657,6 +654,7 @@ out:
* nfs_setattr_update_inode - Update inode metadata after a setattr call.
* @inode: pointer to struct inode
* @attr: pointer to struct iattr
+ * @fattr: pointer to struct nfs_fattr
*
* Note: we do this in the *proc.c in order to ensure that
* it works for things like exclusive creates too.
@@ -669,6 +667,8 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
spin_lock(&inode->i_lock);
NFS_I(inode)->attr_gencount = fattr->gencount;
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
+ | NFS_INO_INVALID_CTIME);
if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) {
if ((attr->ia_valid & ATTR_MODE) != 0) {
int mode = attr->ia_mode & S_IALLUGO;
@@ -683,13 +683,12 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
| NFS_INO_INVALID_ACL);
}
if ((attr->ia_valid & ATTR_SIZE) != 0) {
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
nfs_vmtruncate(inode, attr->ia_size);
}
if (fattr->valid)
nfs_update_inode(inode, fattr);
- else
- NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
@@ -1303,24 +1302,20 @@ static bool nfs_file_has_buffered_writers(struct nfs_inode *nfsi)
return nfs_file_has_writers(nfsi) && nfs_file_io_is_buffered(nfsi);
}
-static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
{
- unsigned long ret = 0;
-
if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
&& (fattr->valid & NFS_ATTR_FATTR_CHANGE)
&& inode_eq_iversion_raw(inode, fattr->pre_change_attr)) {
inode_set_iversion_raw(inode, fattr->change_attr);
if (S_ISDIR(inode->i_mode))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
- ret |= NFS_INO_INVALID_ATTR;
}
/* If we have atomic WCC data, we may update some attributes */
if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME)
&& (fattr->valid & NFS_ATTR_FATTR_CTIME)
&& timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) {
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
- ret |= NFS_INO_INVALID_ATTR;
}
if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME)
@@ -1329,17 +1324,13 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
if (S_ISDIR(inode->i_mode))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
- ret |= NFS_INO_INVALID_ATTR;
}
if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
&& (fattr->valid & NFS_ATTR_FATTR_SIZE)
&& i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size)
&& !nfs_have_writebacks(inode)) {
i_size_write(inode, nfs_size_to_loff_t(fattr->size));
- ret |= NFS_INO_INVALID_ATTR;
}
-
- return ret;
}
/**
@@ -1369,33 +1360,41 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if (!nfs_file_has_buffered_writers(nfsi)) {
/* Verify a few of the more important attributes */
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
- invalid |= NFS_INO_INVALID_ATTR | NFS_INO_REVAL_PAGECACHE;
+ invalid |= NFS_INO_INVALID_CHANGE
+ | NFS_INO_REVAL_PAGECACHE;
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
- invalid |= NFS_INO_INVALID_ATTR;
+ invalid |= NFS_INO_INVALID_MTIME;
if ((fattr->valid & NFS_ATTR_FATTR_CTIME) && !timespec_equal(&inode->i_ctime, &fattr->ctime))
- invalid |= NFS_INO_INVALID_ATTR;
+ invalid |= NFS_INO_INVALID_CTIME;
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
cur_size = i_size_read(inode);
new_isize = nfs_size_to_loff_t(fattr->size);
if (cur_size != new_isize)
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ invalid |= NFS_INO_INVALID_SIZE
+ | NFS_INO_REVAL_PAGECACHE;
}
}
/* Have any file permissions changed? */
if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
- invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+ invalid |= NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_INVALID_OTHER;
if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && !uid_eq(inode->i_uid, fattr->uid))
- invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+ invalid |= NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_INVALID_OTHER;
if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && !gid_eq(inode->i_gid, fattr->gid))
- invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+ invalid |= NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_INVALID_OTHER;
/* Has the link count changed? */
if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink)
- invalid |= NFS_INO_INVALID_ATTR;
+ invalid |= NFS_INO_INVALID_OTHER;
if ((fattr->valid & NFS_ATTR_FATTR_ATIME) && !timespec_equal(&inode->i_atime, &fattr->atime))
invalid |= NFS_INO_INVALID_ATIME;
@@ -1597,10 +1596,9 @@ int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
}
EXPORT_SYMBOL_GPL(nfs_refresh_inode);
-static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
+static int nfs_post_op_update_inode_locked(struct inode *inode,
+ struct nfs_fattr *fattr, unsigned int invalid)
{
- unsigned long invalid = NFS_INO_INVALID_ATTR;
-
if (S_ISDIR(inode->i_mode))
invalid |= NFS_INO_INVALID_DATA;
nfs_set_cache_invalid(inode, invalid);
@@ -1629,7 +1627,9 @@ int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
spin_lock(&inode->i_lock);
nfs_fattr_set_barrier(fattr);
- status = nfs_post_op_update_inode_locked(inode, fattr);
+ status = nfs_post_op_update_inode_locked(inode, fattr,
+ NFS_INO_INVALID_CHANGE
+ | NFS_INO_INVALID_CTIME);
spin_unlock(&inode->i_lock);
return status;
@@ -1681,7 +1681,10 @@ int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fa
fattr->valid |= NFS_ATTR_FATTR_PRESIZE;
}
out_noforce:
- status = nfs_post_op_update_inode_locked(inode, fattr);
+ status = nfs_post_op_update_inode_locked(inode, fattr,
+ NFS_INO_INVALID_CHANGE
+ | NFS_INO_INVALID_CTIME
+ | NFS_INO_INVALID_MTIME);
return status;
}
@@ -1789,7 +1792,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
| NFS_INO_REVAL_PAGECACHE);
/* Do atomic weak cache consistency updates */
- invalid |= nfs_wcc_update_inode(inode, fattr);
+ nfs_wcc_update_inode(inode, fattr);
if (pnfs_layoutcommit_outstanding(inode)) {
nfsi->cache_validity |= save_cache_validity & NFS_INO_INVALID_ATTR;
@@ -1803,17 +1806,25 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
inode->i_sb->s_id, inode->i_ino);
/* Could it be a race with writeback? */
if (!have_writers) {
- invalid |= NFS_INO_INVALID_ATTR
+ invalid |= NFS_INO_INVALID_CHANGE
| NFS_INO_INVALID_DATA
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL;
+ /* Force revalidate of all attributes */
+ save_cache_validity |= NFS_INO_INVALID_CTIME
+ | NFS_INO_INVALID_MTIME
+ | NFS_INO_INVALID_SIZE
+ | NFS_INO_INVALID_OTHER;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
}
inode_set_iversion_raw(inode, fattr->change_attr);
}
} else {
- nfsi->cache_validity |= save_cache_validity;
+ nfsi->cache_validity |= save_cache_validity &
+ (NFS_INO_INVALID_CHANGE
+ | NFS_INO_REVAL_PAGECACHE
+ | NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
@@ -1821,7 +1832,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
} else if (server->caps & NFS_CAP_MTIME) {
nfsi->cache_validity |= save_cache_validity &
- (NFS_INO_INVALID_ATTR
+ (NFS_INO_INVALID_MTIME
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
@@ -1830,7 +1841,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
} else if (server->caps & NFS_CAP_CTIME) {
nfsi->cache_validity |= save_cache_validity &
- (NFS_INO_INVALID_ATTR
+ (NFS_INO_INVALID_CTIME
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
@@ -1845,7 +1856,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (!nfs_have_writebacks(inode) || new_isize > cur_isize) {
i_size_write(inode, new_isize);
if (!have_writers)
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+ invalid |= NFS_INO_INVALID_DATA;
}
dprintk("NFS: isize change on server for file %s/%ld "
"(%Ld to %Ld)\n",
@@ -1856,7 +1867,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
}
} else {
nfsi->cache_validity |= save_cache_validity &
- (NFS_INO_INVALID_ATTR
+ (NFS_INO_INVALID_SIZE
| NFS_INO_REVAL_PAGECACHE
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
@@ -1877,55 +1888,61 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
umode_t newmode = inode->i_mode & S_IFMT;
newmode |= fattr->mode & S_IALLUGO;
inode->i_mode = newmode;
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ invalid |= NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_INVALID_OTHER;
}
} else if (server->caps & NFS_CAP_MODE) {
nfsi->cache_validity |= save_cache_validity &
- (NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
+ (NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
+ | NFS_INO_INVALID_OTHER
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
if (!uid_eq(inode->i_uid, fattr->uid)) {
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ invalid |= NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_INVALID_OTHER;
inode->i_uid = fattr->uid;
}
} else if (server->caps & NFS_CAP_OWNER) {
nfsi->cache_validity |= save_cache_validity &
- (NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
+ (NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
+ | NFS_INO_INVALID_OTHER
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
if (!gid_eq(inode->i_gid, fattr->gid)) {
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ invalid |= NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_INVALID_OTHER;
inode->i_gid = fattr->gid;
}
} else if (server->caps & NFS_CAP_OWNER_GROUP) {
nfsi->cache_validity |= save_cache_validity &
- (NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
+ (NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
+ | NFS_INO_INVALID_OTHER
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
if (inode->i_nlink != fattr->nlink) {
- invalid |= NFS_INO_INVALID_ATTR;
+ invalid |= NFS_INO_INVALID_OTHER;
if (S_ISDIR(inode->i_mode))
invalid |= NFS_INO_INVALID_DATA;
set_nlink(inode, fattr->nlink);
}
} else if (server->caps & NFS_CAP_NLINK) {
nfsi->cache_validity |= save_cache_validity &
- (NFS_INO_INVALID_ATTR
+ (NFS_INO_INVALID_OTHER
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
@@ -1942,6 +1959,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
/* Update attrtimeo value if we're out of the unstable period */
if (invalid & NFS_INO_INVALID_ATTR) {
+ invalid &= ~NFS_INO_INVALID_ATTR;
nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
nfsi->attrtimeo_timestamp = now;
@@ -1962,10 +1980,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfsi->attr_gencount = fattr->gencount;
}
- /* Don't declare attrcache up to date if there were no attrs! */
- if (cache_revalidated)
- invalid &= ~NFS_INO_INVALID_ATTR;
-
/* Don't invalidate the data if we were to blame */
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
|| S_ISLNK(inode->i_mode)))
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 7327930ad970..eadf1ab31d16 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -138,8 +138,11 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
msg.rpc_cred = nfs_file_cred(sattr->ia_file);
nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
- if (status == 0)
+ if (status == 0) {
+ if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
+ nfs_zap_acl_cache(inode);
nfs_setattr_update_inode(inode, sattr, fattr);
+ }
dprintk("NFS reply setattr: %d\n", status);
return status;
}
@@ -383,11 +386,11 @@ out:
}
static int
-nfs3_proc_remove(struct inode *dir, const struct qstr *name)
+nfs3_proc_remove(struct inode *dir, struct dentry *dentry)
{
struct nfs_removeargs arg = {
.fh = NFS_FH(dir),
- .name = *name,
+ .name = dentry->d_name,
};
struct nfs_removeres res;
struct rpc_message msg = {
@@ -397,7 +400,7 @@ nfs3_proc_remove(struct inode *dir, const struct qstr *name)
};
int status = -ENOMEM;
- dprintk("NFS call remove %s\n", name->name);
+ dprintk("NFS call remove %pd2\n", dentry);
res.dir_attr = nfs_alloc_fattr();
if (res.dir_attr == NULL)
goto out;
@@ -411,7 +414,7 @@ out:
}
static void
-nfs3_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
+nfs3_proc_unlink_setup(struct rpc_message *msg, struct dentry *dentry)
{
msg->rpc_proc = &nfs3_procedures[NFS3PROC_REMOVE];
}
@@ -433,7 +436,9 @@ nfs3_proc_unlink_done(struct rpc_task *task, struct inode *dir)
}
static void
-nfs3_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
+nfs3_proc_rename_setup(struct rpc_message *msg,
+ struct dentry *old_dentry,
+ struct dentry *new_dentry)
{
msg->rpc_proc = &nfs3_procedures[NFS3PROC_RENAME];
}
@@ -908,12 +913,6 @@ static int nfs3_have_delegation(struct inode *inode, fmode_t flags)
return 0;
}
-static int nfs3_return_delegation(struct inode *inode)
-{
- nfs_wb_all(inode);
- return 0;
-}
-
static const struct inode_operations nfs3_dir_inode_operations = {
.create = nfs_create,
.lookup = nfs_lookup,
@@ -990,7 +989,6 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.clear_acl_cache = forget_all_cached_acls,
.close_context = nfs_close_context,
.have_delegation = nfs3_have_delegation,
- .return_delegation = nfs3_return_delegation,
.alloc_client = nfs_alloc_client,
.init_client = nfs_init_client,
.free_client = nfs_free_client,
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 6cd33bd5da87..09ee36dd8426 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -1997,6 +1997,7 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
struct nfs_entry old = *entry;
__be32 *p;
int error;
+ u64 new_cookie;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
@@ -2019,8 +2020,7 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
if (unlikely(error))
return error;
- entry->prev_cookie = entry->cookie;
- error = decode_cookie3(xdr, &entry->cookie);
+ error = decode_cookie3(xdr, &new_cookie);
if (unlikely(error))
return error;
@@ -2054,6 +2054,9 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
zero_nfs_fh3(entry->fh);
}
+ entry->prev_cookie = entry->cookie;
+ entry->cookie = new_cookie;
+
return 0;
out_overflow:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 47f3c273245e..b71757e85066 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1045,7 +1045,9 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
struct nfs_inode *nfsi = NFS_I(dir);
spin_lock(&dir->i_lock);
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+ nfsi->cache_validity |= NFS_INO_INVALID_CTIME
+ | NFS_INO_INVALID_MTIME
+ | NFS_INO_INVALID_DATA;
if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(dir)) {
nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
nfsi->attrtimeo_timestamp = jiffies;
@@ -1669,6 +1671,7 @@ static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmo
{
struct nfs_delegation *delegation;
+ fmode &= FMODE_READ|FMODE_WRITE;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
if (delegation == NULL || (delegation->type & fmode) == fmode) {
@@ -1751,12 +1754,16 @@ nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
}
if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
nfs_inode_set_delegation(state->inode,
- data->owner->so_cred,
- &data->o_res);
+ data->owner->so_cred,
+ data->o_res.delegation_type,
+ &data->o_res.delegation,
+ data->o_res.pagemod_limit);
else
nfs_inode_reclaim_delegation(state->inode,
- data->owner->so_cred,
- &data->o_res);
+ data->owner->so_cred,
+ data->o_res.delegation_type,
+ &data->o_res.delegation,
+ data->o_res.pagemod_limit);
}
/*
@@ -2743,27 +2750,40 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
* fields corresponding to attributes that were used to store the verifier.
* Make sure we clobber those fields in the later setattr call
*/
-static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
+static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
struct iattr *sattr, struct nfs4_label **label)
{
- const u32 *attrset = opendata->o_res.attrset;
+ const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
+ __u32 attrset[3];
+ unsigned ret;
+ unsigned i;
- if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
- !(sattr->ia_valid & ATTR_ATIME_SET))
- sattr->ia_valid |= ATTR_ATIME;
+ for (i = 0; i < ARRAY_SIZE(attrset); i++) {
+ attrset[i] = opendata->o_res.attrset[i];
+ if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
+ attrset[i] &= ~bitmask[i];
+ }
+
+ ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
+ sattr->ia_valid : 0;
- if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
- !(sattr->ia_valid & ATTR_MTIME_SET))
- sattr->ia_valid |= ATTR_MTIME;
+ if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
+ if (sattr->ia_valid & ATTR_ATIME_SET)
+ ret |= ATTR_ATIME_SET;
+ else
+ ret |= ATTR_ATIME;
+ }
- /* Except MODE, it seems harmless of setting twice. */
- if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
- (attrset[1] & FATTR4_WORD1_MODE ||
- attrset[2] & FATTR4_WORD2_MODE_UMASK))
- sattr->ia_valid &= ~ATTR_MODE;
+ if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
+ if (sattr->ia_valid & ATTR_MTIME_SET)
+ ret |= ATTR_MTIME_SET;
+ else
+ ret |= ATTR_MTIME;
+ }
- if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
+ if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
*label = NULL;
+ return ret;
}
static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
@@ -2892,12 +2912,15 @@ static int _nfs4_do_open(struct inode *dir,
if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
(opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
- nfs4_exclusive_attrset(opendata, sattr, &label);
+ unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
/*
* send create attributes which was not set by open
* with an extra setattr.
*/
- if (sattr->ia_valid & NFS4_VALID_ATTRS) {
+ if (attrs || label) {
+ unsigned ia_old = sattr->ia_valid;
+
+ sattr->ia_valid = attrs;
nfs_fattr_init(opendata->o_res.f_attr);
status = nfs4_do_setattr(state->inode, cred,
opendata->o_res.f_attr, sattr,
@@ -2907,6 +2930,7 @@ static int _nfs4_do_open(struct inode *dir,
opendata->o_res.f_attr);
nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
}
+ sattr->ia_valid = ia_old;
}
}
if (opened && opendata->file_created)
@@ -3874,6 +3898,10 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
if (IS_ERR(label))
return PTR_ERR(label);
+ /* Return any delegations if we're going to change ACLs */
+ if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
+ nfs4_inode_make_writeable(inode);
+
status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL, label);
if (status == 0) {
nfs_setattr_update_inode(inode, sattr, fattr);
@@ -4048,7 +4076,6 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_accessargs args = {
.fh = NFS_FH(inode),
- .bitmask = server->cache_consistency_bitmask,
.access = entry->mask,
};
struct nfs4_accessres res = {
@@ -4062,14 +4089,18 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
};
int status = 0;
- res.fattr = nfs_alloc_fattr();
- if (res.fattr == NULL)
- return -ENOMEM;
+ if (!nfs_have_delegated_attributes(inode)) {
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ return -ENOMEM;
+ args.bitmask = server->cache_consistency_bitmask;
+ }
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (!status) {
nfs_access_set_mask(entry, res.access);
- nfs_refresh_inode(inode, res.fattr);
+ if (res.fattr)
+ nfs_refresh_inode(inode, res.fattr);
}
nfs_free_fattr(res.fattr);
return status;
@@ -4199,10 +4230,32 @@ static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
return status;
}
-static int nfs4_proc_remove(struct inode *dir, const struct qstr *name)
+static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
+{
+ struct nfs4_exception exception = { };
+ struct inode *inode = d_inode(dentry);
+ int err;
+
+ if (inode) {
+ if (inode->i_nlink == 1)
+ nfs4_inode_return_delegation(inode);
+ else
+ nfs4_inode_make_writeable(inode);
+ }
+ do {
+ err = _nfs4_proc_remove(dir, &dentry->d_name);
+ trace_nfs4_remove(dir, &dentry->d_name, err);
+ err = nfs4_handle_exception(NFS_SERVER(dir), err,
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
{
struct nfs4_exception exception = { };
int err;
+
do {
err = _nfs4_proc_remove(dir, name);
trace_nfs4_remove(dir, name, err);
@@ -4212,17 +4265,20 @@ static int nfs4_proc_remove(struct inode *dir, const struct qstr *name)
return err;
}
-static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
+static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dentry)
{
- struct nfs_server *server = NFS_SERVER(dir);
struct nfs_removeargs *args = msg->rpc_argp;
struct nfs_removeres *res = msg->rpc_resp;
+ struct inode *inode = d_inode(dentry);
- res->server = server;
+ res->server = NFS_SB(dentry->d_sb);
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
nfs_fattr_init(res->dir_attr);
+
+ if (inode)
+ nfs4_inode_return_delegation(inode);
}
static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
@@ -4248,14 +4304,21 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
return 1;
}
-static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
+static void nfs4_proc_rename_setup(struct rpc_message *msg,
+ struct dentry *old_dentry,
+ struct dentry *new_dentry)
{
- struct nfs_server *server = NFS_SERVER(dir);
struct nfs_renameargs *arg = msg->rpc_argp;
struct nfs_renameres *res = msg->rpc_resp;
+ struct inode *old_inode = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
+ if (old_inode)
+ nfs4_inode_make_writeable(old_inode);
+ if (new_inode)
+ nfs4_inode_return_delegation(new_inode);
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
- res->server = server;
+ res->server = NFS_SB(old_dentry->d_sb);
nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
}
@@ -4317,6 +4380,8 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct
}
arg.bitmask = nfs4_bitmask(server, res.label);
+ nfs4_inode_make_writeable(inode);
+
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo, res.fattr->time_start);
@@ -5310,7 +5375,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
i = buf_to_pages_noslab(buf, buflen, arg.acl_pages);
if (i < 0)
return i;
- nfs4_inode_return_delegation(inode);
+ nfs4_inode_make_writeable(inode);
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
/*
@@ -5325,7 +5390,8 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
* so mark the attribute cache invalid.
*/
spin_lock(&inode->i_lock);
- NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
+ | NFS_INO_INVALID_CTIME;
spin_unlock(&inode->i_lock);
nfs_access_zap_cache(inode);
nfs_zap_acl_cache(inode);
@@ -6621,22 +6687,24 @@ static int
nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
{
int ret;
- struct cb_notify_lock_args *cbnl = key;
struct nfs4_lock_waiter *waiter = wait->private;
- struct nfs_lowner *lowner = &cbnl->cbnl_owner,
- *wowner = waiter->owner;
- /* Only wake if the callback was for the same owner */
- if (lowner->clientid != wowner->clientid ||
- lowner->id != wowner->id ||
- lowner->s_dev != wowner->s_dev)
- return 0;
+ /* NULL key means to wake up everyone */
+ if (key) {
+ struct cb_notify_lock_args *cbnl = key;
+ struct nfs_lowner *lowner = &cbnl->cbnl_owner,
+ *wowner = waiter->owner;
- /* Make sure it's for the right inode */
- if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
- return 0;
+ /* Only wake if the callback was for the same owner. */
+ if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
+ return 0;
- waiter->notified = true;
+ /* Make sure it's for the right inode */
+ if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
+ return 0;
+
+ waiter->notified = true;
+ }
/* override "private" so we can use default_wake_function */
wait->private = waiter->task;
@@ -6673,6 +6741,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
add_wait_queue(q, &wait);
while(!signalled()) {
+ waiter.notified = false;
status = nfs4_proc_setlk(state, cmd, request);
if ((status != -EAGAIN) || IS_SETLK(cmd))
break;
@@ -8414,6 +8483,8 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
{
switch(task->tk_status) {
case 0:
+ wake_up_all(&clp->cl_lock_waitq);
+ /* Fallthrough */
case -NFS4ERR_COMPLETE_ALREADY:
case -NFS4ERR_WRONG_CRED: /* What to do here? */
break;
@@ -9593,7 +9664,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.link = nfs4_proc_link,
.symlink = nfs4_proc_symlink,
.mkdir = nfs4_proc_mkdir,
- .rmdir = nfs4_proc_remove,
+ .rmdir = nfs4_proc_rmdir,
.readdir = nfs4_proc_readdir,
.mknod = nfs4_proc_mknod,
.statfs = nfs4_proc_statfs,
@@ -9614,7 +9685,6 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.close_context = nfs4_close_context,
.open_context = nfs4_atomic_open,
.have_delegation = nfs4_have_delegation,
- .return_delegation = nfs4_inode_return_delegation,
.alloc_client = nfs4_alloc_client,
.init_client = nfs4_init_client,
.free_client = nfs4_free_client,
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 91a4d4eeb235..c10a422efe6f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -428,7 +428,6 @@ nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
struct rb_node **p = &server->state_owners.rb_node,
*parent = NULL;
struct nfs4_state_owner *sp;
- int err;
while (*p != NULL) {
parent = *p;
@@ -445,9 +444,6 @@ nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
return sp;
}
}
- err = ida_get_new(&server->openowner_id, &new->so_seqid.owner_id);
- if (err)
- return ERR_PTR(err);
rb_link_node(&new->so_server_node, parent, p);
rb_insert_color(&new->so_server_node, &server->state_owners);
return new;
@@ -460,7 +456,6 @@ nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
if (!RB_EMPTY_NODE(&sp->so_server_node))
rb_erase(&sp->so_server_node, &server->state_owners);
- ida_remove(&server->openowner_id, sp->so_seqid.owner_id);
}
static void
@@ -495,6 +490,12 @@ nfs4_alloc_state_owner(struct nfs_server *server,
sp = kzalloc(sizeof(*sp), gfp_flags);
if (!sp)
return NULL;
+ sp->so_seqid.owner_id = ida_simple_get(&server->openowner_id, 0, 0,
+ gfp_flags);
+ if (sp->so_seqid.owner_id < 0) {
+ kfree(sp);
+ return NULL;
+ }
sp->so_server = server;
sp->so_cred = get_rpccred(cred);
spin_lock_init(&sp->so_lock);
@@ -526,6 +527,7 @@ static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
{
nfs4_destroy_seqid_counter(&sp->so_seqid);
put_rpccred(sp->so_cred);
+ ida_simple_remove(&sp->so_server->openowner_id, sp->so_seqid.owner_id);
kfree(sp);
}
@@ -576,13 +578,9 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
new = nfs4_alloc_state_owner(server, cred, gfp_flags);
if (new == NULL)
goto out;
- do {
- if (ida_pre_get(&server->openowner_id, gfp_flags) == 0)
- break;
- spin_lock(&clp->cl_lock);
- sp = nfs4_insert_state_owner_locked(new);
- spin_unlock(&clp->cl_lock);
- } while (sp == ERR_PTR(-EAGAIN));
+ spin_lock(&clp->cl_lock);
+ sp = nfs4_insert_state_owner_locked(new);
+ spin_unlock(&clp->cl_lock);
if (sp != new)
nfs4_free_state_owner(new);
out:
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index b993ad282de2..9b7392032321 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -98,6 +98,7 @@ static int nfs4_stat_to_errno(int);
((3+NFS4_FHSIZE) >> 2))
#define nfs4_fattr_bitmap_maxsz 4
#define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz)
+#define nfstime4_maxsz (3)
#define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2))
#define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
#define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
@@ -112,7 +113,8 @@ static int nfs4_stat_to_errno(int);
#define decode_mdsthreshold_maxsz (1 + 1 + nfs4_fattr_bitmap_maxsz + 1 + 8)
/* This is based on getfattr, which uses the most attributes: */
#define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \
- 3 + 3 + 3 + nfs4_owner_maxsz + \
+ 3*nfstime4_maxsz + \
+ nfs4_owner_maxsz + \
nfs4_group_maxsz + nfs4_label_maxsz + \
decode_mdsthreshold_maxsz))
#define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \
@@ -123,7 +125,8 @@ static int nfs4_stat_to_errno(int);
nfs4_owner_maxsz + \
nfs4_group_maxsz + \
nfs4_label_maxsz + \
- 4 + 4)
+ 1 + nfstime4_maxsz + \
+ 1 + nfstime4_maxsz)
#define encode_savefh_maxsz (op_encode_hdr_maxsz)
#define decode_savefh_maxsz (op_decode_hdr_maxsz)
#define encode_restorefh_maxsz (op_encode_hdr_maxsz)
@@ -957,6 +960,35 @@ static void encode_uint64(struct xdr_stream *xdr, u64 n)
WARN_ON_ONCE(xdr_stream_encode_u64(xdr, n) < 0);
}
+static ssize_t xdr_encode_bitmap4(struct xdr_stream *xdr,
+ const __u32 *bitmap, size_t len)
+{
+ ssize_t ret;
+
+ /* Trim empty words */
+ while (len > 0 && bitmap[len-1] == 0)
+ len--;
+ ret = xdr_stream_encode_uint32_array(xdr, bitmap, len);
+ if (WARN_ON_ONCE(ret < 0))
+ return ret;
+ return len;
+}
+
+static size_t mask_bitmap4(const __u32 *bitmap, const __u32 *mask,
+ __u32 *res, size_t len)
+{
+ size_t i;
+ __u32 tmp;
+
+ while (len > 0 && (bitmap[len-1] == 0 || mask[len-1] == 0))
+ len--;
+ for (i = len; i-- > 0;) {
+ tmp = bitmap[i] & mask[i];
+ res[i] = tmp;
+ }
+ return len;
+}
+
static void encode_nfs4_seqid(struct xdr_stream *xdr,
const struct nfs_seqid *seqid)
{
@@ -1011,6 +1043,14 @@ static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *ve
encode_opaque_fixed(xdr, verf->data, NFS4_VERIFIER_SIZE);
}
+static __be32 *
+xdr_encode_nfstime4(__be32 *p, const struct timespec *t)
+{
+ p = xdr_encode_hyper(p, (__s64)t->tv_sec);
+ *p++ = cpu_to_be32(t->tv_nsec);
+ return p;
+}
+
static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
const struct nfs4_label *label,
const umode_t *umask,
@@ -1022,9 +1062,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
int owner_namelen = 0;
int owner_grouplen = 0;
__be32 *p;
- unsigned i;
uint32_t len = 0;
- uint32_t bmval_len;
uint32_t bmval[3] = { 0 };
/*
@@ -1072,7 +1110,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
if (attrmask[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
if (iap->ia_valid & ATTR_ATIME_SET) {
bmval[1] |= FATTR4_WORD1_TIME_ACCESS_SET;
- len += 16;
+ len += 4 + (nfstime4_maxsz << 2);
} else if (iap->ia_valid & ATTR_ATIME) {
bmval[1] |= FATTR4_WORD1_TIME_ACCESS_SET;
len += 4;
@@ -1081,7 +1119,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
if (attrmask[1] & FATTR4_WORD1_TIME_MODIFY_SET) {
if (iap->ia_valid & ATTR_MTIME_SET) {
bmval[1] |= FATTR4_WORD1_TIME_MODIFY_SET;
- len += 16;
+ len += 4 + (nfstime4_maxsz << 2);
} else if (iap->ia_valid & ATTR_MTIME) {
bmval[1] |= FATTR4_WORD1_TIME_MODIFY_SET;
len += 4;
@@ -1093,19 +1131,8 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
bmval[2] |= FATTR4_WORD2_SECURITY_LABEL;
}
- if (bmval[2] != 0)
- bmval_len = 3;
- else if (bmval[1] != 0)
- bmval_len = 2;
- else
- bmval_len = 1;
-
- p = reserve_space(xdr, 4 + (bmval_len << 2) + 4 + len);
-
- *p++ = cpu_to_be32(bmval_len);
- for (i = 0; i < bmval_len; i++)
- *p++ = cpu_to_be32(bmval[i]);
- *p++ = cpu_to_be32(len);
+ xdr_encode_bitmap4(xdr, bmval, ARRAY_SIZE(bmval));
+ xdr_stream_encode_opaque_inline(xdr, (void **)&p, len);
if (bmval[0] & FATTR4_WORD0_SIZE)
p = xdr_encode_hyper(p, iap->ia_size);
@@ -1118,16 +1145,14 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
if (iap->ia_valid & ATTR_ATIME_SET) {
*p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
- p = xdr_encode_hyper(p, (s64)iap->ia_atime.tv_sec);
- *p++ = cpu_to_be32(iap->ia_atime.tv_nsec);
+ p = xdr_encode_nfstime4(p, &iap->ia_atime);
} else
*p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
}
if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) {
if (iap->ia_valid & ATTR_MTIME_SET) {
*p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
- p = xdr_encode_hyper(p, (s64)iap->ia_mtime.tv_sec);
- *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
+ p = xdr_encode_nfstime4(p, &iap->ia_mtime);
} else
*p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
}
@@ -1199,85 +1224,45 @@ static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *
create->server, create->server->attr_bitmask);
}
-static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct compound_hdr *hdr)
-{
- __be32 *p;
-
- encode_op_hdr(xdr, OP_GETATTR, decode_getattr_maxsz, hdr);
- p = reserve_space(xdr, 8);
- *p++ = cpu_to_be32(1);
- *p = cpu_to_be32(bitmap);
-}
-
-static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, struct compound_hdr *hdr)
-{
- __be32 *p;
-
- encode_op_hdr(xdr, OP_GETATTR, decode_getattr_maxsz, hdr);
- p = reserve_space(xdr, 12);
- *p++ = cpu_to_be32(2);
- *p++ = cpu_to_be32(bm0);
- *p = cpu_to_be32(bm1);
-}
-
-static void
-encode_getattr_three(struct xdr_stream *xdr,
- uint32_t bm0, uint32_t bm1, uint32_t bm2,
- struct compound_hdr *hdr)
+static void encode_getattr(struct xdr_stream *xdr,
+ const __u32 *bitmap, const __u32 *mask, size_t len,
+ struct compound_hdr *hdr)
{
- __be32 *p;
+ __u32 masked_bitmap[nfs4_fattr_bitmap_maxsz];
encode_op_hdr(xdr, OP_GETATTR, decode_getattr_maxsz, hdr);
- if (bm2) {
- p = reserve_space(xdr, 16);
- *p++ = cpu_to_be32(3);
- *p++ = cpu_to_be32(bm0);
- *p++ = cpu_to_be32(bm1);
- *p = cpu_to_be32(bm2);
- } else if (bm1) {
- p = reserve_space(xdr, 12);
- *p++ = cpu_to_be32(2);
- *p++ = cpu_to_be32(bm0);
- *p = cpu_to_be32(bm1);
- } else {
- p = reserve_space(xdr, 8);
- *p++ = cpu_to_be32(1);
- *p = cpu_to_be32(bm0);
+ if (mask) {
+ if (WARN_ON_ONCE(len > ARRAY_SIZE(masked_bitmap)))
+ len = ARRAY_SIZE(masked_bitmap);
+ len = mask_bitmap4(bitmap, mask, masked_bitmap, len);
+ bitmap = masked_bitmap;
}
+ xdr_encode_bitmap4(xdr, bitmap, len);
}
static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
{
- encode_getattr_three(xdr, bitmask[0] & nfs4_fattr_bitmap[0],
- bitmask[1] & nfs4_fattr_bitmap[1],
- bitmask[2] & nfs4_fattr_bitmap[2],
- hdr);
+ encode_getattr(xdr, nfs4_fattr_bitmap, bitmask,
+ ARRAY_SIZE(nfs4_fattr_bitmap), hdr);
}
static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask,
const u32 *open_bitmap,
struct compound_hdr *hdr)
{
- encode_getattr_three(xdr,
- bitmask[0] & open_bitmap[0],
- bitmask[1] & open_bitmap[1],
- bitmask[2] & open_bitmap[2],
- hdr);
+ encode_getattr(xdr, open_bitmap, bitmask, 3, hdr);
}
static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
{
- encode_getattr_three(xdr,
- bitmask[0] & nfs4_fsinfo_bitmap[0],
- bitmask[1] & nfs4_fsinfo_bitmap[1],
- bitmask[2] & nfs4_fsinfo_bitmap[2],
- hdr);
+ encode_getattr(xdr, nfs4_fsinfo_bitmap, bitmask,
+ ARRAY_SIZE(nfs4_fsinfo_bitmap), hdr);
}
static void encode_fs_locations(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
{
- encode_getattr_two(xdr, bitmask[0] & nfs4_fs_locations_bitmap[0],
- bitmask[1] & nfs4_fs_locations_bitmap[1], hdr);
+ encode_getattr(xdr, nfs4_fs_locations_bitmap, bitmask,
+ ARRAY_SIZE(nfs4_fs_locations_bitmap), hdr);
}
static void encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
@@ -2116,7 +2101,8 @@ static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_access(xdr, args->access, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
+ if (args->bitmask)
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2558,13 +2544,17 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
+ const __u32 nfs4_acl_bitmap[1] = {
+ [0] = FATTR4_WORD0_ACL,
+ };
uint32_t replen;
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
replen = hdr.replen + op_decode_hdr_maxsz;
- encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
+ encode_getattr(xdr, nfs4_acl_bitmap, NULL,
+ ARRAY_SIZE(nfs4_acl_bitmap), &hdr);
xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
args->acl_pages, 0, args->acl_len);
@@ -2643,8 +2633,8 @@ static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
- encode_getattr_one(xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
- &hdr);
+ encode_getattr(xdr, nfs4_pathconf_bitmap, args->bitmask,
+ ARRAY_SIZE(nfs4_pathconf_bitmap), &hdr);
encode_nops(&hdr);
}
@@ -2662,8 +2652,8 @@ static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
- encode_getattr_two(xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
- args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr);
+ encode_getattr(xdr, nfs4_statfs_bitmap, args->bitmask,
+ ARRAY_SIZE(nfs4_statfs_bitmap), &hdr);
encode_nops(&hdr);
}
@@ -2683,7 +2673,7 @@ static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fhandle, &hdr);
- encode_getattr_three(xdr, bitmask[0], bitmask[1], bitmask[2], &hdr);
+ encode_getattr(xdr, bitmask, NULL, 3, &hdr);
encode_nops(&hdr);
}
@@ -3217,34 +3207,27 @@ static int decode_ace(struct xdr_stream *xdr, void *ace)
return -EIO;
}
-static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
+static ssize_t
+decode_bitmap4(struct xdr_stream *xdr, uint32_t *bitmap, size_t sz)
{
- uint32_t bmlen;
- __be32 *p;
-
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- goto out_overflow;
- bmlen = be32_to_cpup(p);
+ ssize_t ret;
- bitmap[0] = bitmap[1] = bitmap[2] = 0;
- p = xdr_inline_decode(xdr, (bmlen << 2));
- if (unlikely(!p))
- goto out_overflow;
- if (bmlen > 0) {
- bitmap[0] = be32_to_cpup(p++);
- if (bmlen > 1) {
- bitmap[1] = be32_to_cpup(p++);
- if (bmlen > 2)
- bitmap[2] = be32_to_cpup(p);
- }
- }
- return 0;
-out_overflow:
+ ret = xdr_stream_decode_uint32_array(xdr, bitmap, sz);
+ if (likely(ret >= 0))
+ return ret;
+ if (ret == -EMSGSIZE)
+ return sz;
print_overflow_msg(__func__, xdr);
return -EIO;
}
+static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
+{
+ ssize_t ret;
+ ret = decode_bitmap4(xdr, bitmap, 3);
+ return ret < 0 ? ret : 0;
+}
+
static int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, unsigned int *savep)
{
__be32 *p;
@@ -3980,7 +3963,7 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
bitmap[1] &= ~FATTR4_WORD1_OWNER;
if (owner_name != NULL) {
- len = decode_nfs4_string(xdr, owner_name, GFP_NOWAIT);
+ len = decode_nfs4_string(xdr, owner_name, GFP_NOIO);
if (len <= 0)
goto out;
dprintk("%s: name=%s\n", __func__, owner_name->data);
@@ -4015,7 +3998,7 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP;
if (group_name != NULL) {
- len = decode_nfs4_string(xdr, group_name, GFP_NOWAIT);
+ len = decode_nfs4_string(xdr, group_name, GFP_NOIO);
if (len <= 0)
goto out;
dprintk("%s: name=%s\n", __func__, group_name->data);
@@ -4155,19 +4138,25 @@ out_overflow:
return -EIO;
}
+static __be32 *
+xdr_decode_nfstime4(__be32 *p, struct timespec *t)
+{
+ __u64 sec;
+
+ p = xdr_decode_hyper(p, &sec);
+ t-> tv_sec = (time_t)sec;
+ t->tv_nsec = be32_to_cpup(p++);
+ return p;
+}
+
static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time)
{
__be32 *p;
- uint64_t sec;
- uint32_t nsec;
- p = xdr_inline_decode(xdr, 12);
+ p = xdr_inline_decode(xdr, nfstime4_maxsz << 2);
if (unlikely(!p))
goto out_overflow;
- p = xdr_decode_hyper(p, &sec);
- nsec = be32_to_cpup(p);
- time->tv_sec = (time_t)sec;
- time->tv_nsec = (long)nsec;
+ xdr_decode_nfstime4(p, time);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
@@ -5470,21 +5459,13 @@ decode_savefh(struct xdr_stream *xdr)
static int decode_setattr(struct xdr_stream *xdr)
{
- __be32 *p;
- uint32_t bmlen;
int status;
status = decode_op_hdr(xdr, OP_SETATTR);
if (status)
return status;
- p = xdr_inline_decode(xdr, 4);
- if (unlikely(!p))
- goto out_overflow;
- bmlen = be32_to_cpup(p);
- p = xdr_inline_decode(xdr, bmlen << 2);
- if (likely(p))
+ if (decode_bitmap4(xdr, NULL, 0) >= 0)
return 0;
-out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
@@ -6255,7 +6236,8 @@ static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_access(xdr, &res->supported, &res->access);
if (status != 0)
goto out;
- decode_getfattr(xdr, res->fattr, res->server);
+ if (res->fattr)
+ decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -7535,6 +7517,7 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
unsigned int savep;
uint32_t bitmap[3] = {0};
uint32_t len;
+ uint64_t new_cookie;
__be32 *p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
@@ -7551,8 +7534,7 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
goto out_overflow;
- entry->prev_cookie = entry->cookie;
- p = xdr_decode_hyper(p, &entry->cookie);
+ p = xdr_decode_hyper(p, &new_cookie);
entry->len = be32_to_cpup(p);
p = xdr_inline_decode(xdr, entry->len);
@@ -7586,6 +7568,9 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE)
entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
+ entry->prev_cookie = entry->cookie;
+ entry->cookie = new_cookie;
+
return 0;
out_overflow:
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index f7fd9192d4bc..4e93d6308733 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -300,11 +300,11 @@ out:
}
static int
-nfs_proc_remove(struct inode *dir, const struct qstr *name)
+nfs_proc_remove(struct inode *dir, struct dentry *dentry)
{
struct nfs_removeargs arg = {
.fh = NFS_FH(dir),
- .name = *name,
+ .name = dentry->d_name,
};
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_REMOVE],
@@ -312,7 +312,7 @@ nfs_proc_remove(struct inode *dir, const struct qstr *name)
};
int status;
- dprintk("NFS call remove %s\n", name->name);
+ dprintk("NFS call remove %pd2\n",dentry);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
@@ -321,7 +321,7 @@ nfs_proc_remove(struct inode *dir, const struct qstr *name)
}
static void
-nfs_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
+nfs_proc_unlink_setup(struct rpc_message *msg, struct dentry *dentry)
{
msg->rpc_proc = &nfs_procedures[NFSPROC_REMOVE];
}
@@ -338,7 +338,9 @@ static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir)
}
static void
-nfs_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
+nfs_proc_rename_setup(struct rpc_message *msg,
+ struct dentry *old_dentry,
+ struct dentry *new_dentry)
{
msg->rpc_proc = &nfs_procedures[NFSPROC_RENAME];
}
@@ -671,12 +673,6 @@ static int nfs_have_delegation(struct inode *inode, fmode_t flags)
return 0;
}
-static int nfs_return_delegation(struct inode *inode)
-{
- nfs_wb_all(inode);
- return 0;
-}
-
static const struct inode_operations nfs_dir_inode_operations = {
.create = nfs_create,
.lookup = nfs_lookup,
@@ -741,7 +737,6 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.lock_check_bounds = nfs_lock_check_bounds,
.close_context = nfs_close_context,
.have_delegation = nfs_have_delegation,
- .return_delegation = nfs_return_delegation,
.alloc_client = nfs_alloc_client,
.init_client = nfs_init_client,
.free_client = nfs_free_client,
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 630b4a3c1a93..bf54fc9ae135 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -105,7 +105,7 @@ static void nfs_do_call_unlink(struct nfs_unlinkdata *data)
data->args.fh = NFS_FH(dir);
nfs_fattr_init(data->res.dir_attr);
- NFS_PROTO(dir)->unlink_setup(&msg, dir);
+ NFS_PROTO(dir)->unlink_setup(&msg, data->dentry);
task_setup_data.rpc_client = NFS_CLIENT(dir);
task = rpc_run_task(&task_setup_data);
@@ -386,7 +386,7 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
nfs_sb_active(old_dir->i_sb);
- NFS_PROTO(data->old_dir)->rename_setup(&msg, old_dir);
+ NFS_PROTO(data->old_dir)->rename_setup(&msg, old_dentry, new_dentry);
return rpc_run_task(&task_setup_data);
}
@@ -463,9 +463,6 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
fileid = NFS_FILEID(d_inode(dentry));
- /* Return delegation in anticipation of the rename */
- NFS_PROTO(d_inode(dentry))->return_delegation(d_inode(dentry));
-
sdentry = NULL;
do {
int slen;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 6579f3b367bd..0193053bc139 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -231,6 +231,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
if (i_size >= end)
goto out;
i_size_write(inode, end);
+ NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
out:
spin_unlock(&inode->i_lock);
@@ -1562,8 +1563,11 @@ static int nfs_writeback_done(struct rpc_task *task,
}
/* Deal with the suid/sgid bit corner case */
- if (nfs_should_remove_suid(inode))
- nfs_mark_for_revalidate(inode);
+ if (nfs_should_remove_suid(inode)) {
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
+ spin_unlock(&inode->i_lock);
+ }
return 0;
}
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig
index ce6ff5a0a6e4..17032631c5cf 100644
--- a/fs/overlayfs/Kconfig
+++ b/fs/overlayfs/Kconfig
@@ -86,3 +86,20 @@ config OVERLAY_FS_NFS_EXPORT
case basis with the "nfs_export=on" mount option.
Say N unless you fully understand the consequences.
+
+config OVERLAY_FS_XINO_AUTO
+ bool "Overlayfs: auto enable inode number mapping"
+ default n
+ depends on OVERLAY_FS
+ help
+ If this config option is enabled then overlay filesystems will use
+ unused high bits in undelying filesystem inode numbers to map all
+ inodes to a unified address space. The mapped 64bit inode numbers
+ might not be compatible with applications that expect 32bit inodes.
+
+ If compatibility with applications that expect 32bit inodes is not an
+ issue, then it is safe and recommended to say Y here.
+
+ For more information, see Documentation/filesystems/overlayfs.txt
+
+ If unsure, say N.
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index d855f508fa20..8bede0742619 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -232,7 +232,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
return err;
}
-struct ovl_fh *ovl_encode_fh(struct dentry *real, bool is_upper)
+struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper)
{
struct ovl_fh *fh;
int fh_type, fh_len, dwords;
@@ -300,7 +300,7 @@ int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
* up and a pure upper inode.
*/
if (ovl_can_decode_fh(lower->d_sb)) {
- fh = ovl_encode_fh(lower, false);
+ fh = ovl_encode_real_fh(lower, false);
if (IS_ERR(fh))
return PTR_ERR(fh);
}
@@ -321,7 +321,7 @@ static int ovl_set_upper_fh(struct dentry *upper, struct dentry *index)
const struct ovl_fh *fh;
int err;
- fh = ovl_encode_fh(upper, true);
+ fh = ovl_encode_real_fh(upper, true);
if (IS_ERR(fh))
return PTR_ERR(fh);
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 87bd4148f4fb..425a94672300 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -228,8 +228,8 @@ static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
goto fail;
/* Encode an upper or lower file handle */
- fh = ovl_encode_fh(enc_lower ? ovl_dentry_lower(dentry) :
- ovl_dentry_upper(dentry), !enc_lower);
+ fh = ovl_encode_real_fh(enc_lower ? ovl_dentry_lower(dentry) :
+ ovl_dentry_upper(dentry), !enc_lower);
err = PTR_ERR(fh);
if (IS_ERR(fh))
goto fail;
@@ -267,8 +267,8 @@ static int ovl_dentry_to_fh(struct dentry *dentry, u32 *fid, int *max_len)
return OVL_FILEID;
}
-static int ovl_encode_inode_fh(struct inode *inode, u32 *fid, int *max_len,
- struct inode *parent)
+static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
+ struct inode *parent)
{
struct dentry *dentry;
int type;
@@ -305,15 +305,12 @@ static struct dentry *ovl_obtain_alias(struct super_block *sb,
if (d_is_dir(upper ?: lower))
return ERR_PTR(-EIO);
- inode = ovl_get_inode(sb, dget(upper), lower, index, !!lower);
+ inode = ovl_get_inode(sb, dget(upper), lowerpath, index, !!lower);
if (IS_ERR(inode)) {
dput(upper);
return ERR_CAST(inode);
}
- if (index)
- ovl_set_flag(OVL_INDEX, inode);
-
dentry = d_find_any_alias(inode);
if (!dentry) {
dentry = d_alloc_anon(inode->i_sb);
@@ -685,7 +682,7 @@ static struct dentry *ovl_upper_fh_to_d(struct super_block *sb,
if (!ofs->upper_mnt)
return ERR_PTR(-EACCES);
- upper = ovl_decode_fh(fh, ofs->upper_mnt);
+ upper = ovl_decode_real_fh(fh, ofs->upper_mnt, true);
if (IS_ERR_OR_NULL(upper))
return upper;
@@ -703,25 +700,39 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
struct ovl_path *stack = &origin;
struct dentry *dentry = NULL;
struct dentry *index = NULL;
- struct inode *inode = NULL;
- bool is_deleted = false;
+ struct inode *inode;
int err;
- /* First lookup indexed upper by fh */
+ /* First lookup overlay inode in inode cache by origin fh */
+ err = ovl_check_origin_fh(ofs, fh, false, NULL, &stack);
+ if (err)
+ return ERR_PTR(err);
+
+ if (!d_is_dir(origin.dentry) ||
+ !(origin.dentry->d_flags & DCACHE_DISCONNECTED)) {
+ inode = ovl_lookup_inode(sb, origin.dentry, false);
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out_err;
+ if (inode) {
+ dentry = d_find_any_alias(inode);
+ iput(inode);
+ if (dentry)
+ goto out;
+ }
+ }
+
+ /* Then lookup indexed upper/whiteout by origin fh */
if (ofs->indexdir) {
index = ovl_get_index_fh(ofs, fh);
err = PTR_ERR(index);
if (IS_ERR(index)) {
- if (err != -ESTALE)
- return ERR_PTR(err);
-
- /* Found a whiteout index - treat as deleted inode */
- is_deleted = true;
index = NULL;
+ goto out_err;
}
}
- /* Then try to get upper dir by index */
+ /* Then try to get a connected upper dir by index */
if (index && d_is_dir(index)) {
struct dentry *upper = ovl_index_upper(ofs, index);
@@ -734,24 +745,19 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
goto out;
}
- /* Then lookup origin by fh */
- err = ovl_check_origin_fh(ofs, fh, NULL, &stack);
- if (err) {
- goto out_err;
- } else if (index) {
- err = ovl_verify_origin(index, origin.dentry, false);
+ /* Otherwise, get a connected non-upper dir or disconnected non-dir */
+ if (d_is_dir(origin.dentry) &&
+ (origin.dentry->d_flags & DCACHE_DISCONNECTED)) {
+ dput(origin.dentry);
+ origin.dentry = NULL;
+ err = ovl_check_origin_fh(ofs, fh, true, NULL, &stack);
if (err)
goto out_err;
- } else if (is_deleted) {
- /* Lookup deleted non-dir by origin inode */
- if (!d_is_dir(origin.dentry))
- inode = ovl_lookup_inode(sb, origin.dentry, false);
- err = -ESTALE;
- if (!inode || atomic_read(&inode->i_count) == 1)
+ }
+ if (index) {
+ err = ovl_verify_origin(index, origin.dentry, false);
+ if (err)
goto out_err;
-
- /* Deleted but still open? */
- index = dget(ovl_i_dentry_upper(inode));
}
dentry = ovl_get_dentry(sb, NULL, &origin, index);
@@ -759,7 +765,6 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
out:
dput(origin.dentry);
dput(index);
- iput(inode);
return dentry;
out_err:
@@ -829,7 +834,7 @@ static struct dentry *ovl_get_parent(struct dentry *dentry)
}
const struct export_operations ovl_export_operations = {
- .encode_fh = ovl_encode_inode_fh,
+ .encode_fh = ovl_encode_fh,
.fh_to_dentry = ovl_fh_to_dentry,
.fh_to_parent = ovl_fh_to_parent,
.get_name = ovl_get_name,
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 3b1bd469accd..6e3815fb006b 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -16,13 +16,6 @@
#include "overlayfs.h"
-static dev_t ovl_get_pseudo_dev(struct dentry *dentry)
-{
- struct ovl_entry *oe = dentry->d_fsdata;
-
- return oe->lowerstack[0].layer->pseudo_dev;
-}
-
int ovl_setattr(struct dentry *dentry, struct iattr *attr)
{
int err;
@@ -66,6 +59,69 @@ out:
return err;
}
+static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
+ struct ovl_layer *lower_layer)
+{
+ bool samefs = ovl_same_sb(dentry->d_sb);
+ unsigned int xinobits = ovl_xino_bits(dentry->d_sb);
+
+ if (samefs) {
+ /*
+ * When all layers are on the same fs, all real inode
+ * number are unique, so we use the overlay st_dev,
+ * which is friendly to du -x.
+ */
+ stat->dev = dentry->d_sb->s_dev;
+ return 0;
+ } else if (xinobits) {
+ unsigned int shift = 64 - xinobits;
+ /*
+ * All inode numbers of underlying fs should not be using the
+ * high xinobits, so we use high xinobits to partition the
+ * overlay st_ino address space. The high bits holds the fsid
+ * (upper fsid is 0). This way overlay inode numbers are unique
+ * and all inodes use overlay st_dev. Inode numbers are also
+ * persistent for a given layer configuration.
+ */
+ if (stat->ino >> shift) {
+ pr_warn_ratelimited("overlayfs: inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
+ dentry, stat->ino, xinobits);
+ } else {
+ if (lower_layer)
+ stat->ino |= ((u64)lower_layer->fsid) << shift;
+
+ stat->dev = dentry->d_sb->s_dev;
+ return 0;
+ }
+ }
+
+ /* The inode could not be mapped to a unified st_ino address space */
+ if (S_ISDIR(dentry->d_inode->i_mode)) {
+ /*
+ * Always use the overlay st_dev for directories, so 'find
+ * -xdev' will scan the entire overlay mount and won't cross the
+ * overlay mount boundaries.
+ *
+ * If not all layers are on the same fs the pair {real st_ino;
+ * overlay st_dev} is not unique, so use the non persistent
+ * overlay st_ino for directories.
+ */
+ stat->dev = dentry->d_sb->s_dev;
+ stat->ino = dentry->d_inode->i_ino;
+ } else if (lower_layer && lower_layer->fsid) {
+ /*
+ * For non-samefs setup, if we cannot map all layers st_ino
+ * to a unified address space, we need to make sure that st_dev
+ * is unique per lower fs. Upper layer uses real st_dev and
+ * lower layers use the unique anonymous bdev assigned to the
+ * lower fs.
+ */
+ stat->dev = lower_layer->fs->pseudo_dev;
+ }
+
+ return 0;
+}
+
int ovl_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
@@ -75,6 +131,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
const struct cred *old_cred;
bool is_dir = S_ISDIR(dentry->d_inode->i_mode);
bool samefs = ovl_same_sb(dentry->d_sb);
+ struct ovl_layer *lower_layer = NULL;
int err;
type = ovl_path_real(dentry, &realpath);
@@ -84,14 +141,18 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
goto out;
/*
- * For non-dir or same fs, we use st_ino of the copy up origin, if we
- * know it. This guaranties constant st_dev/st_ino across copy up.
+ * For non-dir or same fs, we use st_ino of the copy up origin.
+ * This guaranties constant st_dev/st_ino across copy up.
+ * With xino feature and non-samefs, we use st_ino of the copy up
+ * origin masked with high bits that represent the layer id.
*
- * If filesystem supports NFS export ops, this also guaranties
+ * If lower filesystem supports NFS file handles, this also guaranties
* persistent st_ino across mount cycle.
*/
- if (!is_dir || samefs) {
- if (OVL_TYPE_ORIGIN(type)) {
+ if (!is_dir || samefs || ovl_xino_bits(dentry->d_sb)) {
+ if (!OVL_TYPE_UPPER(type)) {
+ lower_layer = ovl_layer_lower(dentry);
+ } else if (OVL_TYPE_ORIGIN(type)) {
struct kstat lowerstat;
u32 lowermask = STATX_INO | (!is_dir ? STATX_NLINK : 0);
@@ -118,43 +179,17 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
*/
if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
(!ovl_verify_lower(dentry->d_sb) &&
- (is_dir || lowerstat.nlink == 1)))
+ (is_dir || lowerstat.nlink == 1))) {
stat->ino = lowerstat.ino;
-
- if (samefs)
- WARN_ON_ONCE(stat->dev != lowerstat.dev);
- else
- stat->dev = ovl_get_pseudo_dev(dentry);
- }
- if (samefs) {
- /*
- * When all layers are on the same fs, all real inode
- * number are unique, so we use the overlay st_dev,
- * which is friendly to du -x.
- */
- stat->dev = dentry->d_sb->s_dev;
- } else if (!OVL_TYPE_UPPER(type)) {
- /*
- * For non-samefs setup, to make sure that st_dev/st_ino
- * pair is unique across the system, we use a unique
- * anonymous st_dev for lower layer inode.
- */
- stat->dev = ovl_get_pseudo_dev(dentry);
+ lower_layer = ovl_layer_lower(dentry);
+ }
}
- } else {
- /*
- * Always use the overlay st_dev for directories, so 'find
- * -xdev' will scan the entire overlay mount and won't cross the
- * overlay mount boundaries.
- *
- * If not all layers are on the same fs the pair {real st_ino;
- * overlay st_dev} is not unique, so use the non persistent
- * overlay st_ino for directories.
- */
- stat->dev = dentry->d_sb->s_dev;
- stat->ino = dentry->d_inode->i_ino;
}
+ err = ovl_map_dev_ino(dentry, stat, lower_layer);
+ if (err)
+ goto out;
+
/*
* It's probably not worth it to count subdirs to get the
* correct link count. nlink=1 seems to pacify 'find' and
@@ -383,24 +418,18 @@ int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags)
int ovl_update_time(struct inode *inode, struct timespec *ts, int flags)
{
- struct dentry *alias;
- struct path upperpath;
-
- if (!(flags & S_ATIME))
- return 0;
-
- alias = d_find_any_alias(inode);
- if (!alias)
- return 0;
-
- ovl_path_upper(alias, &upperpath);
- if (upperpath.dentry) {
- touch_atime(&upperpath);
- inode->i_atime = d_inode(upperpath.dentry)->i_atime;
+ if (flags & S_ATIME) {
+ struct ovl_fs *ofs = inode->i_sb->s_fs_info;
+ struct path upperpath = {
+ .mnt = ofs->upper_mnt,
+ .dentry = ovl_upperdentry_dereference(OVL_I(inode)),
+ };
+
+ if (upperpath.dentry) {
+ touch_atime(&upperpath);
+ inode->i_atime = d_inode(upperpath.dentry)->i_atime;
+ }
}
-
- dput(alias);
-
return 0;
}
@@ -459,9 +488,27 @@ static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode *inode)
#endif
}
-static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev)
+static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
+ unsigned long ino, int fsid)
{
- inode->i_ino = get_next_ino();
+ int xinobits = ovl_xino_bits(inode->i_sb);
+
+ /*
+ * When NFS export is enabled and d_ino is consistent with st_ino
+ * (samefs or i_ino has enough bits to encode layer), set the same
+ * value used for d_ino to i_ino, because nfsd readdirplus compares
+ * d_ino values to i_ino values of child entries. When called from
+ * ovl_new_inode(), ino arg is 0, so i_ino will be updated to real
+ * upper inode i_ino on ovl_inode_init() or ovl_inode_update().
+ */
+ if (inode->i_sb->s_export_op &&
+ (ovl_same_sb(inode->i_sb) || xinobits)) {
+ inode->i_ino = ino;
+ if (xinobits && fsid && !(ino >> (64 - xinobits)))
+ inode->i_ino |= (unsigned long)fsid << (64 - xinobits);
+ } else {
+ inode->i_ino = get_next_ino();
+ }
inode->i_mode = mode;
inode->i_flags |= S_NOCMTIME;
#ifdef CONFIG_FS_POSIX_ACL
@@ -597,7 +644,7 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev)
inode = new_inode(sb);
if (inode)
- ovl_fill_inode(inode, mode, rdev);
+ ovl_fill_inode(inode, mode, rdev, 0, 0);
return inode;
}
@@ -703,13 +750,16 @@ static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper,
}
struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry,
- struct dentry *lowerdentry, struct dentry *index,
+ struct ovl_path *lowerpath, struct dentry *index,
unsigned int numlower)
{
struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
struct inode *inode;
+ struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL;
bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry, index);
+ int fsid = bylower ? lowerpath->layer->fsid : 0;
bool is_dir;
+ unsigned long ino = 0;
if (!realinode)
realinode = d_inode(lowerdentry);
@@ -748,18 +798,22 @@ struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry,
if (!is_dir)
nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink);
set_nlink(inode, nlink);
+ ino = key->i_ino;
} else {
/* Lower hardlink that will be broken on copy up */
inode = new_inode(sb);
if (!inode)
goto out_nomem;
}
- ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev);
+ ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev, ino, fsid);
ovl_inode_init(inode, upperdentry, lowerdentry);
if (upperdentry && ovl_is_impuredir(upperdentry))
ovl_set_flag(OVL_IMPURE, inode);
+ if (index)
+ ovl_set_flag(OVL_INDEX, inode);
+
/* Check for non-merge dir that may have whiteouts */
if (is_dir) {
if (((upperdentry && lowerdentry) || numlower > 1) ||
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 70fcfcc684cc..2dba29eadde6 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -56,6 +56,15 @@ static int ovl_check_redirect(struct dentry *dentry, struct ovl_lookup_data *d,
if (s == next)
goto invalid;
}
+ /*
+ * One of the ancestor path elements in an absolute path
+ * lookup in ovl_lookup_layer() could have been opaque and
+ * that will stop further lookup in lower layers (d->stop=true)
+ * But we have found an absolute redirect in decendant path
+ * element and that should force continue lookup in lower
+ * layers (reset d->stop).
+ */
+ d->stop = false;
} else {
if (strchr(buf, '/') != NULL)
goto invalid;
@@ -171,7 +180,8 @@ invalid:
goto out;
}
-struct dentry *ovl_decode_fh(struct ovl_fh *fh, struct vfsmount *mnt)
+struct dentry *ovl_decode_real_fh(struct ovl_fh *fh, struct vfsmount *mnt,
+ bool connected)
{
struct dentry *real;
int bytes;
@@ -186,7 +196,7 @@ struct dentry *ovl_decode_fh(struct ovl_fh *fh, struct vfsmount *mnt)
bytes = (fh->len - offsetof(struct ovl_fh, fid));
real = exportfs_decode_fh(mnt, (struct fid *)fh->fid,
bytes >> 2, (int)fh->type,
- ovl_acceptable, mnt);
+ connected ? ovl_acceptable : NULL, mnt);
if (IS_ERR(real)) {
/*
* Treat stale file handle to lower file as "origin unknown".
@@ -220,6 +230,7 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
{
struct dentry *this;
int err;
+ bool last_element = !post[0];
this = lookup_one_len_unlocked(name, base, namelen);
if (IS_ERR(this)) {
@@ -245,11 +256,23 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
d->stop = true;
if (d->is_dir)
goto put_and_out;
+
+ /*
+ * NB: handle failure to lookup non-last element when non-dir
+ * redirects become possible
+ */
+ WARN_ON(!last_element);
goto out;
}
- d->is_dir = true;
- if (!d->last && ovl_is_opaquedir(this)) {
- d->stop = d->opaque = true;
+ if (last_element)
+ d->is_dir = true;
+ if (d->last)
+ goto out;
+
+ if (ovl_is_opaquedir(this)) {
+ d->stop = true;
+ if (last_element)
+ d->opaque = true;
goto out;
}
err = ovl_check_redirect(this, d, prelen, post);
@@ -310,14 +333,15 @@ static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
}
-int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh,
+int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
struct dentry *upperdentry, struct ovl_path **stackp)
{
struct dentry *origin = NULL;
int i;
for (i = 0; i < ofs->numlower; i++) {
- origin = ovl_decode_fh(fh, ofs->lower_layers[i].mnt);
+ origin = ovl_decode_real_fh(fh, ofs->lower_layers[i].mnt,
+ connected);
if (origin)
break;
}
@@ -361,7 +385,7 @@ static int ovl_check_origin(struct ovl_fs *ofs, struct dentry *upperdentry,
if (IS_ERR_OR_NULL(fh))
return PTR_ERR(fh);
- err = ovl_check_origin_fh(ofs, fh, upperdentry, stackp);
+ err = ovl_check_origin_fh(ofs, fh, false, upperdentry, stackp);
kfree(fh);
if (err) {
@@ -415,7 +439,7 @@ int ovl_verify_set_fh(struct dentry *dentry, const char *name,
struct ovl_fh *fh;
int err;
- fh = ovl_encode_fh(real, is_upper);
+ fh = ovl_encode_real_fh(real, is_upper);
err = PTR_ERR(fh);
if (IS_ERR(fh))
goto fail;
@@ -451,7 +475,7 @@ struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index)
if (IS_ERR_OR_NULL(fh))
return ERR_CAST(fh);
- upper = ovl_decode_fh(fh, ofs->upper_mnt);
+ upper = ovl_decode_real_fh(fh, ofs->upper_mnt, true);
kfree(fh);
if (IS_ERR_OR_NULL(upper))
@@ -558,7 +582,7 @@ int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index)
/* Check if non-dir index is orphan and don't warn before cleaning it */
if (!d_is_dir(index) && d_inode(index)->i_nlink == 1) {
- err = ovl_check_origin_fh(ofs, fh, index, &stack);
+ err = ovl_check_origin_fh(ofs, fh, false, index, &stack);
if (err)
goto fail;
@@ -619,7 +643,7 @@ int ovl_get_index_name(struct dentry *origin, struct qstr *name)
struct ovl_fh *fh;
int err;
- fh = ovl_encode_fh(origin, false);
+ fh = ovl_encode_real_fh(origin, false);
if (IS_ERR(fh))
return PTR_ERR(fh);
@@ -815,7 +839,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
.is_dir = false,
.opaque = false,
.stop = false,
- .last = !poe->numlower,
+ .last = ofs->config.redirect_follow ? false : !poe->numlower,
.redirect = NULL,
};
@@ -873,7 +897,11 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
for (i = 0; !d.stop && i < poe->numlower; i++) {
struct ovl_path lower = poe->lowerstack[i];
- d.last = i == poe->numlower - 1;
+ if (!ofs->config.redirect_follow)
+ d.last = i == poe->numlower - 1;
+ else
+ d.last = lower.layer->idx == roe->numlower;
+
err = ovl_lookup_layer(lower.dentry, &d, &this);
if (err)
goto out_put;
@@ -976,17 +1004,18 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
upperdentry = dget(index);
if (upperdentry || ctr) {
- if (ctr)
- origin = stack[0].dentry;
- inode = ovl_get_inode(dentry->d_sb, upperdentry, origin, index,
+ inode = ovl_get_inode(dentry->d_sb, upperdentry, stack, index,
ctr);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_free_oe;
+ /*
+ * NB: handle redirected hard links when non-dir redirects
+ * become possible
+ */
+ WARN_ON(OVL_I(inode)->redirect);
OVL_I(inode)->redirect = upperredirect;
- if (index)
- ovl_set_flag(OVL_INDEX, inode);
}
revert_creds(old_cred);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 225ff1171147..e0b7de799f6b 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -202,7 +202,7 @@ void ovl_drop_write(struct dentry *dentry);
struct dentry *ovl_workdir(struct dentry *dentry);
const struct cred *ovl_override_creds(struct super_block *sb);
struct super_block *ovl_same_sb(struct super_block *sb);
-bool ovl_can_decode_fh(struct super_block *sb);
+int ovl_can_decode_fh(struct super_block *sb);
struct dentry *ovl_indexdir(struct super_block *sb);
bool ovl_index_all(struct super_block *sb);
bool ovl_verify_lower(struct super_block *sb);
@@ -215,6 +215,7 @@ void ovl_path_lower(struct dentry *dentry, struct path *path);
enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
struct dentry *ovl_dentry_upper(struct dentry *dentry);
struct dentry *ovl_dentry_lower(struct dentry *dentry);
+struct ovl_layer *ovl_layer_lower(struct dentry *dentry);
struct dentry *ovl_dentry_real(struct dentry *dentry);
struct dentry *ovl_i_dentry_upper(struct inode *inode);
struct inode *ovl_inode_upper(struct inode *inode);
@@ -263,11 +264,19 @@ static inline bool ovl_is_impuredir(struct dentry *dentry)
return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE);
}
+static inline unsigned int ovl_xino_bits(struct super_block *sb)
+{
+ struct ovl_fs *ofs = sb->s_fs_info;
+
+ return ofs->xino_bits;
+}
+
/* namei.c */
int ovl_check_fh_len(struct ovl_fh *fh, int fh_len);
-struct dentry *ovl_decode_fh(struct ovl_fh *fh, struct vfsmount *mnt);
-int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh,
+struct dentry *ovl_decode_real_fh(struct ovl_fh *fh, struct vfsmount *mnt,
+ bool connected);
+int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
struct dentry *upperdentry, struct ovl_path **stackp);
int ovl_verify_set_fh(struct dentry *dentry, const char *name,
struct dentry *real, bool is_upper, bool set);
@@ -329,7 +338,7 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
bool is_upper);
struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry,
- struct dentry *lowerdentry, struct dentry *index,
+ struct ovl_path *lowerpath, struct dentry *index,
unsigned int numlower);
static inline void ovl_copyattr(struct inode *from, struct inode *to)
{
@@ -361,7 +370,7 @@ int ovl_copy_up(struct dentry *dentry);
int ovl_copy_up_flags(struct dentry *dentry, int flags);
int ovl_copy_xattr(struct dentry *old, struct dentry *new);
int ovl_set_attr(struct dentry *upper, struct kstat *stat);
-struct ovl_fh *ovl_encode_fh(struct dentry *real, bool is_upper);
+struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper);
int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
struct dentry *upper);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index bfef6edcc111..41655a7d6894 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -18,13 +18,21 @@ struct ovl_config {
const char *redirect_mode;
bool index;
bool nfs_export;
+ int xino;
+};
+
+struct ovl_sb {
+ struct super_block *sb;
+ dev_t pseudo_dev;
};
struct ovl_layer {
struct vfsmount *mnt;
- dev_t pseudo_dev;
- /* Index of this layer in fs root (upper == 0) */
+ struct ovl_sb *fs;
+ /* Index of this layer in fs root (upper idx == 0) */
int idx;
+ /* One fsid per unique underlying sb (upper fsid == 0) */
+ int fsid;
};
struct ovl_path {
@@ -35,8 +43,11 @@ struct ovl_path {
/* private information held for overlayfs's superblock */
struct ovl_fs {
struct vfsmount *upper_mnt;
- unsigned numlower;
+ unsigned int numlower;
+ /* Number of unique lower sb that differ from upper sb */
+ unsigned int numlowerfs;
struct ovl_layer *lower_layers;
+ struct ovl_sb *lower_fs;
/* workbasedir is the path at workdir= mount option */
struct dentry *workbasedir;
/* workdir is the 'work' directory under workbasedir */
@@ -50,11 +61,11 @@ struct ovl_fs {
const struct cred *creator_cred;
bool tmpfile;
bool noxattr;
- /* sb common to all layers */
- struct super_block *same_sb;
/* Did we take the inuse lock? */
bool upperdir_locked;
bool workdir_locked;
+ /* Inode numbers in all layers do not use the high xino_bits */
+ unsigned int xino_bits;
};
/* private information held for every overlayfs dentry */
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index c11f5c0906c3..ef1fe42ff7bb 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -120,6 +120,10 @@ static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd,
if (!rdd->dentry)
return false;
+ /* Always recalc d_ino when remapping lower inode numbers */
+ if (ovl_xino_bits(rdd->dentry->d_sb))
+ return true;
+
/* Always recalc d_ino for parent */
if (strcmp(p->name, "..") == 0)
return true;
@@ -435,6 +439,19 @@ static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
return cache;
}
+/* Map inode number to lower fs unique range */
+static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid,
+ const char *name, int namelen)
+{
+ if (ino >> (64 - xinobits)) {
+ pr_warn_ratelimited("overlayfs: d_ino too big (%.*s, ino=%llu, xinobits=%d)\n",
+ namelen, name, ino, xinobits);
+ return ino;
+ }
+
+ return ino | ((u64)fsid) << (64 - xinobits);
+}
+
/*
* Set d_ino for upper entries. Non-upper entries should always report
* the uppermost real inode ino and should not call this function.
@@ -452,9 +469,10 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
struct dentry *this = NULL;
enum ovl_path_type type;
u64 ino = p->real_ino;
+ int xinobits = ovl_xino_bits(dir->d_sb);
int err = 0;
- if (!ovl_same_sb(dir->d_sb))
+ if (!ovl_same_sb(dir->d_sb) && !xinobits)
goto out;
if (p->name[0] == '.') {
@@ -491,6 +509,10 @@ get:
WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
ino = stat.ino;
+ } else if (xinobits && !OVL_TYPE_UPPER(type)) {
+ ino = ovl_remap_lower_ino(ino, xinobits,
+ ovl_layer_lower(this)->fsid,
+ p->name, p->len);
}
out:
@@ -618,6 +640,8 @@ struct ovl_readdir_translate {
struct ovl_dir_cache *cache;
struct dir_context ctx;
u64 parent_ino;
+ int fsid;
+ int xinobits;
};
static int ovl_fill_real(struct dir_context *ctx, const char *name,
@@ -628,14 +652,17 @@ static int ovl_fill_real(struct dir_context *ctx, const char *name,
container_of(ctx, struct ovl_readdir_translate, ctx);
struct dir_context *orig_ctx = rdt->orig_ctx;
- if (rdt->parent_ino && strcmp(name, "..") == 0)
+ if (rdt->parent_ino && strcmp(name, "..") == 0) {
ino = rdt->parent_ino;
- else if (rdt->cache) {
+ } else if (rdt->cache) {
struct ovl_cache_entry *p;
p = ovl_cache_entry_find(&rdt->cache->root, name, namelen);
if (p)
ino = p->ino;
+ } else if (rdt->xinobits) {
+ ino = ovl_remap_lower_ino(ino, rdt->xinobits, rdt->fsid,
+ name, namelen);
}
return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type);
@@ -646,11 +673,16 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
int err;
struct ovl_dir_file *od = file->private_data;
struct dentry *dir = file->f_path.dentry;
+ struct ovl_layer *lower_layer = ovl_layer_lower(dir);
struct ovl_readdir_translate rdt = {
.ctx.actor = ovl_fill_real,
.orig_ctx = ctx,
+ .xinobits = ovl_xino_bits(dir->d_sb),
};
+ if (rdt.xinobits && lower_layer)
+ rdt.fsid = lower_layer->fsid;
+
if (OVL_TYPE_MERGE(ovl_path_type(dir->d_parent))) {
struct kstat stat;
struct path statpath = file->f_path;
@@ -693,9 +725,10 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
* dir is impure then need to adjust d_ino for copied up
* entries.
*/
- if (ovl_same_sb(dentry->d_sb) &&
- (ovl_test_flag(OVL_IMPURE, d_inode(dentry)) ||
- OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent)))) {
+ if (ovl_xino_bits(dentry->d_sb) ||
+ (ovl_same_sb(dentry->d_sb) &&
+ (ovl_test_flag(OVL_IMPURE, d_inode(dentry)) ||
+ OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) {
return ovl_iterate_real(file, ctx);
}
return iterate_dir(od->realfile, ctx);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 7c24619ae7fc..e8551c97de51 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -17,6 +17,7 @@
#include <linux/statfs.h>
#include <linux/seq_file.h>
#include <linux/posix_acl_xattr.h>
+#include <linux/exportfs.h>
#include "overlayfs.h"
MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
@@ -50,6 +51,11 @@ module_param_named(nfs_export, ovl_nfs_export_def, bool, 0644);
MODULE_PARM_DESC(ovl_nfs_export_def,
"Default to on or off for the NFS export feature");
+static bool ovl_xino_auto_def = IS_ENABLED(CONFIG_OVERLAY_FS_XINO_AUTO);
+module_param_named(xino_auto, ovl_xino_auto_def, bool, 0644);
+MODULE_PARM_DESC(ovl_xino_auto_def,
+ "Auto enable xino feature");
+
static void ovl_entry_stack_free(struct ovl_entry *oe)
{
unsigned int i;
@@ -236,11 +242,12 @@ static void ovl_free_fs(struct ovl_fs *ofs)
if (ofs->upperdir_locked)
ovl_inuse_unlock(ofs->upper_mnt->mnt_root);
mntput(ofs->upper_mnt);
- for (i = 0; i < ofs->numlower; i++) {
+ for (i = 0; i < ofs->numlower; i++)
mntput(ofs->lower_layers[i].mnt);
- free_anon_bdev(ofs->lower_layers[i].pseudo_dev);
- }
+ for (i = 0; i < ofs->numlowerfs; i++)
+ free_anon_bdev(ofs->lower_fs[i].pseudo_dev);
kfree(ofs->lower_layers);
+ kfree(ofs->lower_fs);
kfree(ofs->config.lowerdir);
kfree(ofs->config.upperdir);
@@ -325,6 +332,23 @@ static const char *ovl_redirect_mode_def(void)
return ovl_redirect_dir_def ? "on" : "off";
}
+enum {
+ OVL_XINO_OFF,
+ OVL_XINO_AUTO,
+ OVL_XINO_ON,
+};
+
+static const char * const ovl_xino_str[] = {
+ "off",
+ "auto",
+ "on",
+};
+
+static inline int ovl_xino_def(void)
+{
+ return ovl_xino_auto_def ? OVL_XINO_AUTO : OVL_XINO_OFF;
+}
+
/**
* ovl_show_options
*
@@ -350,6 +374,8 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
if (ofs->config.nfs_export != ovl_nfs_export_def)
seq_printf(m, ",nfs_export=%s", ofs->config.nfs_export ?
"on" : "off");
+ if (ofs->config.xino != ovl_xino_def())
+ seq_printf(m, ",xino=%s", ovl_xino_str[ofs->config.xino]);
return 0;
}
@@ -384,6 +410,9 @@ enum {
OPT_INDEX_OFF,
OPT_NFS_EXPORT_ON,
OPT_NFS_EXPORT_OFF,
+ OPT_XINO_ON,
+ OPT_XINO_OFF,
+ OPT_XINO_AUTO,
OPT_ERR,
};
@@ -397,6 +426,9 @@ static const match_table_t ovl_tokens = {
{OPT_INDEX_OFF, "index=off"},
{OPT_NFS_EXPORT_ON, "nfs_export=on"},
{OPT_NFS_EXPORT_OFF, "nfs_export=off"},
+ {OPT_XINO_ON, "xino=on"},
+ {OPT_XINO_OFF, "xino=off"},
+ {OPT_XINO_AUTO, "xino=auto"},
{OPT_ERR, NULL}
};
@@ -511,6 +543,18 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
config->nfs_export = false;
break;
+ case OPT_XINO_ON:
+ config->xino = OVL_XINO_ON;
+ break;
+
+ case OPT_XINO_OFF:
+ config->xino = OVL_XINO_OFF;
+ break;
+
+ case OPT_XINO_AUTO:
+ config->xino = OVL_XINO_AUTO;
+ break;
+
default:
pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
return -EINVAL;
@@ -700,6 +744,7 @@ static int ovl_check_namelen(struct path *path, struct ovl_fs *ofs,
static int ovl_lower_dir(const char *name, struct path *path,
struct ovl_fs *ofs, int *stack_depth, bool *remote)
{
+ int fh_type;
int err;
err = ovl_mount_dir_noesc(name, path);
@@ -719,15 +764,19 @@ static int ovl_lower_dir(const char *name, struct path *path,
* The inodes index feature and NFS export need to encode and decode
* file handles, so they require that all layers support them.
*/
+ fh_type = ovl_can_decode_fh(path->dentry->d_sb);
if ((ofs->config.nfs_export ||
- (ofs->config.index && ofs->config.upperdir)) &&
- !ovl_can_decode_fh(path->dentry->d_sb)) {
+ (ofs->config.index && ofs->config.upperdir)) && !fh_type) {
ofs->config.index = false;
ofs->config.nfs_export = false;
pr_warn("overlayfs: fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n",
name);
}
+ /* Check if lower fs has 32bit inode numbers */
+ if (fh_type != FILEID_INO32_GEN)
+ ofs->xino_bits = 0;
+
return 0;
out_put:
@@ -951,6 +1000,7 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
{
struct vfsmount *mnt = ofs->upper_mnt;
struct dentry *temp;
+ int fh_type;
int err;
err = mnt_want_write(mnt);
@@ -1000,12 +1050,16 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
}
/* Check if upper/work fs supports file handles */
- if (ofs->config.index &&
- !ovl_can_decode_fh(ofs->workdir->d_sb)) {
+ fh_type = ovl_can_decode_fh(ofs->workdir->d_sb);
+ if (ofs->config.index && !fh_type) {
ofs->config.index = false;
pr_warn("overlayfs: upper fs does not support file handles, falling back to index=off.\n");
}
+ /* Check if upper fs has 32bit inode numbers */
+ if (fh_type != FILEID_INO32_GEN)
+ ofs->xino_bits = 0;
+
/* NFS export of r/w mount depends on index */
if (ofs->config.nfs_export && !ofs->config.index) {
pr_warn("overlayfs: NFS export requires \"index=on\", falling back to nfs_export=off.\n");
@@ -1108,6 +1162,35 @@ out:
return err;
}
+/* Get a unique fsid for the layer */
+static int ovl_get_fsid(struct ovl_fs *ofs, struct super_block *sb)
+{
+ unsigned int i;
+ dev_t dev;
+ int err;
+
+ /* fsid 0 is reserved for upper fs even with non upper overlay */
+ if (ofs->upper_mnt && ofs->upper_mnt->mnt_sb == sb)
+ return 0;
+
+ for (i = 0; i < ofs->numlowerfs; i++) {
+ if (ofs->lower_fs[i].sb == sb)
+ return i + 1;
+ }
+
+ err = get_anon_bdev(&dev);
+ if (err) {
+ pr_err("overlayfs: failed to get anonymous bdev for lowerpath\n");
+ return err;
+ }
+
+ ofs->lower_fs[ofs->numlowerfs].sb = sb;
+ ofs->lower_fs[ofs->numlowerfs].pseudo_dev = dev;
+ ofs->numlowerfs++;
+
+ return ofs->numlowerfs;
+}
+
static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
unsigned int numlower)
{
@@ -1119,23 +1202,27 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
GFP_KERNEL);
if (ofs->lower_layers == NULL)
goto out;
+
+ ofs->lower_fs = kcalloc(numlower, sizeof(struct ovl_sb),
+ GFP_KERNEL);
+ if (ofs->lower_fs == NULL)
+ goto out;
+
for (i = 0; i < numlower; i++) {
struct vfsmount *mnt;
- dev_t dev;
+ int fsid;
- err = get_anon_bdev(&dev);
- if (err) {
- pr_err("overlayfs: failed to get anonymous bdev for lowerpath\n");
+ err = fsid = ovl_get_fsid(ofs, stack[i].mnt->mnt_sb);
+ if (err < 0)
goto out;
- }
mnt = clone_private_mount(&stack[i]);
err = PTR_ERR(mnt);
if (IS_ERR(mnt)) {
pr_err("overlayfs: failed to clone lowerpath\n");
- free_anon_bdev(dev);
goto out;
}
+
/*
* Make lower layers R/O. That way fchmod/fchown on lower file
* will fail instead of modifying lower fs.
@@ -1143,16 +1230,41 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME;
ofs->lower_layers[ofs->numlower].mnt = mnt;
- ofs->lower_layers[ofs->numlower].pseudo_dev = dev;
ofs->lower_layers[ofs->numlower].idx = i + 1;
+ ofs->lower_layers[ofs->numlower].fsid = fsid;
+ if (fsid) {
+ ofs->lower_layers[ofs->numlower].fs =
+ &ofs->lower_fs[fsid - 1];
+ }
ofs->numlower++;
+ }
+
+ /*
+ * When all layers on same fs, overlay can use real inode numbers.
+ * With mount option "xino=on", mounter declares that there are enough
+ * free high bits in underlying fs to hold the unique fsid.
+ * If overlayfs does encounter underlying inodes using the high xino
+ * bits reserved for fsid, it emits a warning and uses the original
+ * inode number.
+ */
+ if (!ofs->numlowerfs || (ofs->numlowerfs == 1 && !ofs->upper_mnt)) {
+ ofs->xino_bits = 0;
+ ofs->config.xino = OVL_XINO_OFF;
+ } else if (ofs->config.xino == OVL_XINO_ON && !ofs->xino_bits) {
+ /*
+ * This is a roundup of number of bits needed for numlowerfs+1
+ * (i.e. ilog2(numlowerfs+1 - 1) + 1). fsid 0 is reserved for
+ * upper fs even with non upper overlay.
+ */
+ BUILD_BUG_ON(ilog2(OVL_MAX_STACK) > 31);
+ ofs->xino_bits = ilog2(ofs->numlowerfs) + 1;
+ }
- /* Check if all lower layers are on same sb */
- if (i == 0)
- ofs->same_sb = mnt->mnt_sb;
- else if (ofs->same_sb != mnt->mnt_sb)
- ofs->same_sb = NULL;
+ if (ofs->xino_bits) {
+ pr_info("overlayfs: \"xino\" feature enabled using %d upper inode bits.\n",
+ ofs->xino_bits);
}
+
err = 0;
out:
return err;
@@ -1263,6 +1375,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
ofs->config.index = ovl_index_def;
ofs->config.nfs_export = ovl_nfs_export_def;
+ ofs->config.xino = ovl_xino_def();
err = ovl_parse_opt((char *) data, &ofs->config);
if (err)
goto out_err;
@@ -1276,6 +1389,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
sb->s_stack_depth = 0;
sb->s_maxbytes = MAX_LFS_FILESIZE;
+ /* Assume underlaying fs uses 32bit inodes unless proven otherwise */
+ if (ofs->config.xino != OVL_XINO_OFF)
+ ofs->xino_bits = BITS_PER_LONG - 32;
+
if (ofs->config.upperdir) {
if (!ofs->config.workdir) {
pr_err("overlayfs: missing 'workdir'\n");
@@ -1305,8 +1422,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
/* If the upper fs is nonexistent, we mark overlayfs r/o too */
if (!ofs->upper_mnt)
sb->s_flags |= SB_RDONLY;
- else if (ofs->upper_mnt->mnt_sb != ofs->same_sb)
- ofs->same_sb = NULL;
if (!(ovl_force_readonly(ofs)) && ofs->config.index) {
err = ovl_get_indexdir(ofs, oe, &upperpath);
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 930784a26623..6f1078028c66 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -47,13 +47,29 @@ struct super_block *ovl_same_sb(struct super_block *sb)
{
struct ovl_fs *ofs = sb->s_fs_info;
- return ofs->same_sb;
+ if (!ofs->numlowerfs)
+ return ofs->upper_mnt->mnt_sb;
+ else if (ofs->numlowerfs == 1 && !ofs->upper_mnt)
+ return ofs->lower_fs[0].sb;
+ else
+ return NULL;
}
-bool ovl_can_decode_fh(struct super_block *sb)
+/*
+ * Check if underlying fs supports file handles and try to determine encoding
+ * type, in order to deduce maximum inode number used by fs.
+ *
+ * Return 0 if file handles are not supported.
+ * Return 1 (FILEID_INO32_GEN) if fs uses the default 32bit inode encoding.
+ * Return -1 if fs uses a non default encoding with unknown inode size.
+ */
+int ovl_can_decode_fh(struct super_block *sb)
{
- return (sb->s_export_op && sb->s_export_op->fh_to_dentry &&
- !uuid_is_null(&sb->s_uuid));
+ if (!sb->s_export_op || !sb->s_export_op->fh_to_dentry ||
+ uuid_is_null(&sb->s_uuid))
+ return 0;
+
+ return sb->s_export_op->encode_fh ? -1 : FILEID_INO32_GEN;
}
struct dentry *ovl_indexdir(struct super_block *sb)
@@ -172,6 +188,13 @@ struct dentry *ovl_dentry_lower(struct dentry *dentry)
return oe->numlower ? oe->lowerstack[0].dentry : NULL;
}
+struct ovl_layer *ovl_layer_lower(struct dentry *dentry)
+{
+ struct ovl_entry *oe = dentry->d_fsdata;
+
+ return oe->numlower ? oe->lowerstack[0].layer : NULL;
+}
+
struct dentry *ovl_dentry_real(struct dentry *dentry)
{
return ovl_dentry_upper(dentry) ?: ovl_dentry_lower(dentry);
@@ -279,12 +302,16 @@ void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect)
void ovl_inode_init(struct inode *inode, struct dentry *upperdentry,
struct dentry *lowerdentry)
{
+ struct inode *realinode = d_inode(upperdentry ?: lowerdentry);
+
if (upperdentry)
OVL_I(inode)->__upperdentry = upperdentry;
if (lowerdentry)
OVL_I(inode)->lower = igrab(d_inode(lowerdentry));
- ovl_copyattr(d_inode(upperdentry ?: lowerdentry), inode);
+ ovl_copyattr(realinode, inode);
+ if (!inode->i_ino)
+ inode->i_ino = realinode->i_ino;
}
void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
@@ -299,6 +326,8 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
smp_wmb();
OVL_I(inode)->__upperdentry = upperdentry;
if (inode_unhashed(inode)) {
+ if (!inode->i_ino)
+ inode->i_ino = upperinode->i_ino;
inode->i_private = upperinode;
__insert_inode_hash(inode, (unsigned long) upperinode);
}
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 04c4804cbdef..2078e70e1595 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -15,6 +15,7 @@
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/printk.h>
#include <linux/mount.h>
@@ -217,6 +218,26 @@ void proc_free_inum(unsigned int inum)
ida_simple_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
}
+static int proc_misc_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ if (atomic_read(&PDE(d_inode(dentry))->in_use) < 0)
+ return 0; /* revalidate */
+ return 1;
+}
+
+static int proc_misc_d_delete(const struct dentry *dentry)
+{
+ return atomic_read(&PDE(d_inode(dentry))->in_use) < 0;
+}
+
+static const struct dentry_operations proc_misc_dentry_ops = {
+ .d_revalidate = proc_misc_d_revalidate,
+ .d_delete = proc_misc_d_delete,
+};
+
/*
* Don't create negative dentries here, return -ENOENT by hand
* instead.
@@ -234,7 +255,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
inode = proc_get_inode(dir->i_sb, de);
if (!inode)
return ERR_PTR(-ENOMEM);
- d_set_d_op(dentry, &simple_dentry_operations);
+ d_set_d_op(dentry, &proc_misc_dentry_ops);
d_add(dentry, inode);
return NULL;
}
diff --git a/fs/super.c b/fs/super.c
index 672538ca9831..5fa9a8d8d865 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -37,6 +37,7 @@
#include <linux/user_namespace.h>
#include "internal.h"
+static int thaw_super_locked(struct super_block *sb);
static LIST_HEAD(super_blocks);
static DEFINE_SPINLOCK(sb_lock);
@@ -574,6 +575,28 @@ void drop_super_exclusive(struct super_block *sb)
}
EXPORT_SYMBOL(drop_super_exclusive);
+static void __iterate_supers(void (*f)(struct super_block *))
+{
+ struct super_block *sb, *p = NULL;
+
+ spin_lock(&sb_lock);
+ list_for_each_entry(sb, &super_blocks, s_list) {
+ if (hlist_unhashed(&sb->s_instances))
+ continue;
+ sb->s_count++;
+ spin_unlock(&sb_lock);
+
+ f(sb);
+
+ spin_lock(&sb_lock);
+ if (p)
+ __put_super(p);
+ p = sb;
+ }
+ if (p)
+ __put_super(p);
+ spin_unlock(&sb_lock);
+}
/**
* iterate_supers - call function for all active superblocks
* @f: function to call
@@ -881,33 +904,22 @@ cancel_readonly:
return retval;
}
-static void do_emergency_remount(struct work_struct *work)
+static void do_emergency_remount_callback(struct super_block *sb)
{
- struct super_block *sb, *p = NULL;
-
- spin_lock(&sb_lock);
- list_for_each_entry(sb, &super_blocks, s_list) {
- if (hlist_unhashed(&sb->s_instances))
- continue;
- sb->s_count++;
- spin_unlock(&sb_lock);
- down_write(&sb->s_umount);
- if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
- !sb_rdonly(sb)) {
- /*
- * What lock protects sb->s_flags??
- */
- do_remount_sb(sb, SB_RDONLY, NULL, 1);
- }
- up_write(&sb->s_umount);
- spin_lock(&sb_lock);
- if (p)
- __put_super(p);
- p = sb;
+ down_write(&sb->s_umount);
+ if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
+ !sb_rdonly(sb)) {
+ /*
+ * What lock protects sb->s_flags??
+ */
+ do_remount_sb(sb, SB_RDONLY, NULL, 1);
}
- if (p)
- __put_super(p);
- spin_unlock(&sb_lock);
+ up_write(&sb->s_umount);
+}
+
+static void do_emergency_remount(struct work_struct *work)
+{
+ __iterate_supers(do_emergency_remount_callback);
kfree(work);
printk("Emergency Remount complete\n");
}
@@ -923,6 +935,40 @@ void emergency_remount(void)
}
}
+static void do_thaw_all_callback(struct super_block *sb)
+{
+ down_write(&sb->s_umount);
+ if (sb->s_root && sb->s_flags & MS_BORN) {
+ emergency_thaw_bdev(sb);
+ thaw_super_locked(sb);
+ } else {
+ up_write(&sb->s_umount);
+ }
+}
+
+static void do_thaw_all(struct work_struct *work)
+{
+ __iterate_supers(do_thaw_all_callback);
+ kfree(work);
+ printk(KERN_WARNING "Emergency Thaw complete\n");
+}
+
+/**
+ * emergency_thaw_all -- forcibly thaw every frozen filesystem
+ *
+ * Used for emergency unfreeze of all filesystems via SysRq
+ */
+void emergency_thaw_all(void)
+{
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, do_thaw_all);
+ schedule_work(work);
+ }
+}
+
/*
* Unnamed block devices are dummy devices used by virtual
* filesystems which don't use real block-devices. -- jrs
@@ -1492,11 +1538,10 @@ EXPORT_SYMBOL(freeze_super);
*
* Unlocks the filesystem and marks it writeable again after freeze_super().
*/
-int thaw_super(struct super_block *sb)
+static int thaw_super_locked(struct super_block *sb)
{
int error;
- down_write(&sb->s_umount);
if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
up_write(&sb->s_umount);
return -EINVAL;
@@ -1527,4 +1572,10 @@ out:
deactivate_locked_super(sb);
return 0;
}
+
+int thaw_super(struct super_block *sb)
+{
+ down_write(&sb->s_umount);
+ return thaw_super_locked(sb);
+}
EXPORT_SYMBOL(thaw_super);
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
index 2dcf3d473fec..9571616b5dda 100644
--- a/fs/ubifs/find.c
+++ b/fs/ubifs/find.c
@@ -632,7 +632,7 @@ static int scan_for_idx_cb(struct ubifs_info *c,
*/
static const struct ubifs_lprops *scan_for_leb_for_idx(struct ubifs_info *c)
{
- struct ubifs_lprops *lprops;
+ const struct ubifs_lprops *lprops;
struct scan_data data;
int err;
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index 6c3a1abd0e22..f5a46844340c 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -244,7 +244,6 @@ static void remove_from_lpt_heap(struct ubifs_info *c,
/**
* lpt_heap_replace - replace lprops in a category heap.
* @c: UBIFS file-system description object
- * @old_lprops: LEB properties to replace
* @new_lprops: LEB properties with which to replace
* @cat: LEB category
*
@@ -254,7 +253,6 @@ static void remove_from_lpt_heap(struct ubifs_info *c,
* lprops. This function does that.
*/
static void lpt_heap_replace(struct ubifs_info *c,
- struct ubifs_lprops *old_lprops,
struct ubifs_lprops *new_lprops, int cat)
{
struct ubifs_lpt_heap *heap;
@@ -362,7 +360,7 @@ void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops,
case LPROPS_DIRTY:
case LPROPS_DIRTY_IDX:
case LPROPS_FREE:
- lpt_heap_replace(c, old_lprops, new_lprops, cat);
+ lpt_heap_replace(c, new_lprops, cat);
break;
case LPROPS_UNCAT:
case LPROPS_EMPTY:
diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c
index aab87340d3de..16f03d9929e5 100644
--- a/fs/ubifs/scan.c
+++ b/fs/ubifs/scan.c
@@ -175,7 +175,6 @@ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum,
void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
int lnum, int offs)
{
- lnum = lnum;
dbg_scan("stop scanning LEB %d at offset %d", lnum, offs);
ubifs_assert(offs % c->min_io_size == 0);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index b16ef162344a..6c397a389105 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1737,8 +1737,11 @@ static void ubifs_remount_ro(struct ubifs_info *c)
dbg_save_space_info(c);
- for (i = 0; i < c->jhead_cnt; i++)
- ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ for (i = 0; i < c->jhead_cnt; i++) {
+ err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ if (err)
+ ubifs_ro_mode(c, err);
+ }
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
@@ -1804,8 +1807,11 @@ static void ubifs_put_super(struct super_block *sb)
int err;
/* Synchronize write-buffers */
- for (i = 0; i < c->jhead_cnt; i++)
- ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ for (i = 0; i < c->jhead_cnt; i++) {
+ err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ if (err)
+ ubifs_ro_mode(c, err);
+ }
/*
* We are being cleanly unmounted which means the
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 39387bdd225d..4bcc095fe44a 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1947,7 +1947,7 @@ void
xfs_alloc_compute_maxlevels(
xfs_mount_t *mp) /* file system mount structure */
{
- mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_alloc_mnr,
+ mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
(mp->m_sb.sb_agblocks + 1) / 2);
}
@@ -1959,7 +1959,6 @@ xfs_alloc_compute_maxlevels(
*/
xfs_extlen_t
xfs_alloc_longest_free_extent(
- struct xfs_mount *mp,
struct xfs_perag *pag,
xfs_extlen_t need,
xfs_extlen_t reserved)
@@ -2038,8 +2037,7 @@ xfs_alloc_space_available(
/* do we have enough contiguous free space for the allocation? */
alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
- longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
- reservation);
+ longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
if (longest < alloc_len)
return false;
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index a311a2414a6b..cbf789ea5a4e 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -116,9 +116,8 @@ xfs_alloc_allow_busy_reuse(int datatype)
unsigned int xfs_alloc_set_aside(struct xfs_mount *mp);
unsigned int xfs_alloc_ag_max_usable(struct xfs_mount *mp);
-xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_mount *mp,
- struct xfs_perag *pag, xfs_extlen_t need,
- xfs_extlen_t reserved);
+xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_perag *pag,
+ xfs_extlen_t need, xfs_extlen_t reserved);
unsigned int xfs_alloc_min_freelist(struct xfs_mount *mp,
struct xfs_perag *pag);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 3b03d886df66..6a7c2f03ea11 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3225,7 +3225,7 @@ xfs_bmap_longest_free_extent(
}
}
- longest = xfs_alloc_longest_free_extent(mp, pag,
+ longest = xfs_alloc_longest_free_extent(pag,
xfs_alloc_min_freelist(mp, pag),
xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
if (*blen < longest)
@@ -5667,7 +5667,6 @@ xfs_bmap_collapse_extents(
xfs_fileoff_t *next_fsb,
xfs_fileoff_t offset_shift_fsb,
bool *done,
- xfs_fileoff_t stop_fsb,
xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops)
{
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index f3be6416260b..2b766b37096d 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -228,7 +228,7 @@ void xfs_bmap_del_extent_cow(struct xfs_inode *ip,
uint xfs_default_attroffset(struct xfs_inode *ip);
int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
- bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
+ bool *done, xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops);
int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index edc0193358a5..ac7d66427e42 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -4531,7 +4531,6 @@ xfs_btree_sblock_verify(
*/
uint
xfs_btree_compute_maxlevels(
- struct xfs_mount *mp,
uint *limits,
unsigned long len)
{
@@ -4839,7 +4838,6 @@ xfs_btree_query_all(
*/
xfs_extlen_t
xfs_btree_calc_size(
- struct xfs_mount *mp,
uint *limits,
unsigned long long len)
{
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 58e30c0975c3..9227159a751e 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -481,10 +481,8 @@ xfs_failaddr_t xfs_btree_lblock_v5hdr_verify(struct xfs_buf *bp,
xfs_failaddr_t xfs_btree_lblock_verify(struct xfs_buf *bp,
unsigned int max_recs);
-uint xfs_btree_compute_maxlevels(struct xfs_mount *mp, uint *limits,
- unsigned long len);
-xfs_extlen_t xfs_btree_calc_size(struct xfs_mount *mp, uint *limits,
- unsigned long long len);
+uint xfs_btree_compute_maxlevels(uint *limits, unsigned long len);
+xfs_extlen_t xfs_btree_calc_size(uint *limits, unsigned long long len);
/* return codes */
#define XFS_BTREE_QUERY_RANGE_CONTINUE 0 /* keep iterating */
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 0e2cf5f0be1f..de627fa19168 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -2406,7 +2406,7 @@ xfs_ialloc_compute_maxlevels(
uint inodes;
inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
- mp->m_in_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_inobt_mnr,
+ mp->m_in_maxlevels = xfs_btree_compute_maxlevels(mp->m_inobt_mnr,
inodes);
}
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index a2dd7f4a2719..367e9a0726e6 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -556,7 +556,7 @@ xfs_inobt_max_size(
if (mp->m_inobt_mxr[0] == 0)
return 0;
- return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
+ return xfs_btree_calc_size(mp->m_inobt_mnr,
(uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
XFS_INODES_PER_CHUNK);
}
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index bee68c23d612..560e28473024 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -351,7 +351,6 @@ xfs_refcount_merge_center_extents(
struct xfs_refcount_irec *center,
struct xfs_refcount_irec *right,
unsigned long long extlen,
- xfs_agblock_t *agbno,
xfs_extlen_t *aglen)
{
int error;
@@ -471,7 +470,6 @@ xfs_refcount_merge_right_extent(
struct xfs_btree_cur *cur,
struct xfs_refcount_irec *right,
struct xfs_refcount_irec *cright,
- xfs_agblock_t *agbno,
xfs_extlen_t *aglen)
{
int error;
@@ -749,7 +747,7 @@ xfs_refcount_merge_extents(
ulen < MAXREFCEXTLEN) {
*shape_changed = true;
return xfs_refcount_merge_center_extents(cur, &left, &cleft,
- &right, ulen, agbno, aglen);
+ &right, ulen, aglen);
}
/* Try to merge left and cleft. */
@@ -778,7 +776,7 @@ xfs_refcount_merge_extents(
ulen < MAXREFCEXTLEN) {
*shape_changed = true;
return xfs_refcount_merge_right_extent(cur, &right, &cright,
- agbno, aglen);
+ aglen);
}
return error;
@@ -1356,9 +1354,7 @@ xfs_refcount_adjust_cow_extents(
struct xfs_btree_cur *cur,
xfs_agblock_t agbno,
xfs_extlen_t aglen,
- enum xfs_refc_adjust_op adj,
- struct xfs_defer_ops *dfops,
- struct xfs_owner_info *oinfo)
+ enum xfs_refc_adjust_op adj)
{
struct xfs_refcount_irec ext, tmp;
int error;
@@ -1437,8 +1433,7 @@ xfs_refcount_adjust_cow(
struct xfs_btree_cur *cur,
xfs_agblock_t agbno,
xfs_extlen_t aglen,
- enum xfs_refc_adjust_op adj,
- struct xfs_defer_ops *dfops)
+ enum xfs_refc_adjust_op adj)
{
bool shape_changed;
int error;
@@ -1465,8 +1460,7 @@ xfs_refcount_adjust_cow(
goto out_error;
/* Now that we've taken care of the ends, adjust the middle extents */
- error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj,
- dfops, NULL);
+ error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj);
if (error)
goto out_error;
@@ -1493,7 +1487,7 @@ __xfs_refcount_cow_alloc(
/* Add refcount btree reservation */
return xfs_refcount_adjust_cow(rcur, agbno, aglen,
- XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops);
+ XFS_REFCOUNT_ADJUST_COW_ALLOC);
}
/*
@@ -1511,7 +1505,7 @@ __xfs_refcount_cow_free(
/* Remove refcount btree reservation */
return xfs_refcount_adjust_cow(rcur, agbno, aglen,
- XFS_REFCOUNT_ADJUST_COW_FREE, dfops);
+ XFS_REFCOUNT_ADJUST_COW_FREE);
}
/* Record a CoW staging extent in the refcount btree. */
@@ -1568,7 +1562,7 @@ struct xfs_refcount_recovery {
/* Stuff an extent on the recovery list. */
STATIC int
xfs_refcount_recover_extent(
- struct xfs_btree_cur *cur,
+ struct xfs_btree_cur *cur,
union xfs_btree_rec *rec,
void *priv)
{
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index 265fdcefcbae..375abfeb6267 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -373,7 +373,6 @@ xfs_refcountbt_init_cursor(
*/
int
xfs_refcountbt_maxrecs(
- struct xfs_mount *mp,
int blocklen,
bool leaf)
{
@@ -390,7 +389,7 @@ void
xfs_refcountbt_compute_maxlevels(
struct xfs_mount *mp)
{
- mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(mp,
+ mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
mp->m_refc_mnr, mp->m_sb.sb_agblocks);
}
@@ -400,7 +399,7 @@ xfs_refcountbt_calc_size(
struct xfs_mount *mp,
unsigned long long len)
{
- return xfs_btree_calc_size(mp, mp->m_refc_mnr, len);
+ return xfs_btree_calc_size(mp->m_refc_mnr, len);
}
/*
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.h b/fs/xfs/libxfs/xfs_refcount_btree.h
index 9db008b955b7..2bc4694ef146 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.h
+++ b/fs/xfs/libxfs/xfs_refcount_btree.h
@@ -60,8 +60,7 @@ struct xfs_mount;
extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *agbp, xfs_agnumber_t agno,
struct xfs_defer_ops *dfops);
-extern int xfs_refcountbt_maxrecs(struct xfs_mount *mp, int blocklen,
- bool leaf);
+extern int xfs_refcountbt_maxrecs(int blocklen, bool leaf);
extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp,
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 79822cf6ebe3..fba8d2718017 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -376,7 +376,6 @@ xfs_rmap_free_check_owner(
struct xfs_mount *mp,
uint64_t ltoff,
struct xfs_rmap_irec *rec,
- xfs_fsblock_t bno,
xfs_filblks_t len,
uint64_t owner,
uint64_t offset,
@@ -519,7 +518,7 @@ xfs_rmap_unmap(
bno + len, out_error);
/* Check owner information. */
- error = xfs_rmap_free_check_owner(mp, ltoff, &ltrec, bno, len, owner,
+ error = xfs_rmap_free_check_owner(mp, ltoff, &ltrec, len, owner,
offset, flags);
if (error)
goto out_error;
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 8b0d0de1cd11..d756e0b84abf 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -499,7 +499,6 @@ xfs_rmapbt_init_cursor(
*/
int
xfs_rmapbt_maxrecs(
- struct xfs_mount *mp,
int blocklen,
int leaf)
{
@@ -534,7 +533,7 @@ xfs_rmapbt_compute_maxlevels(
if (xfs_sb_version_hasreflink(&mp->m_sb))
mp->m_rmap_maxlevels = XFS_BTREE_MAXLEVELS;
else
- mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(mp,
+ mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(
mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
}
@@ -544,7 +543,7 @@ xfs_rmapbt_calc_size(
struct xfs_mount *mp,
unsigned long long len)
{
- return xfs_btree_calc_size(mp, mp->m_rmap_mnr, len);
+ return xfs_btree_calc_size(mp->m_rmap_mnr, len);
}
/*
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h
index 19c08e933049..d68d96eed7ea 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.h
+++ b/fs/xfs/libxfs/xfs_rmap_btree.h
@@ -55,7 +55,7 @@ struct xfs_mount;
struct xfs_btree_cur *xfs_rmapbt_init_cursor(struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_buf *bp,
xfs_agnumber_t agno);
-int xfs_rmapbt_maxrecs(struct xfs_mount *mp, int blocklen, int leaf);
+int xfs_rmapbt_maxrecs(int blocklen, int leaf);
extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 53433cc024fd..d9b94bd5f689 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -756,15 +756,13 @@ xfs_sb_mount_common(
mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
- mp->m_rmap_mxr[0] = xfs_rmapbt_maxrecs(mp, sbp->sb_blocksize, 1);
- mp->m_rmap_mxr[1] = xfs_rmapbt_maxrecs(mp, sbp->sb_blocksize, 0);
+ mp->m_rmap_mxr[0] = xfs_rmapbt_maxrecs(sbp->sb_blocksize, 1);
+ mp->m_rmap_mxr[1] = xfs_rmapbt_maxrecs(sbp->sb_blocksize, 0);
mp->m_rmap_mnr[0] = mp->m_rmap_mxr[0] / 2;
mp->m_rmap_mnr[1] = mp->m_rmap_mxr[1] / 2;
- mp->m_refc_mxr[0] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize,
- true);
- mp->m_refc_mxr[1] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize,
- false);
+ mp->m_refc_mxr[0] = xfs_refcountbt_maxrecs(sbp->sb_blocksize, true);
+ mp->m_refc_mxr[1] = xfs_refcountbt_maxrecs(sbp->sb_blocksize, false);
mp->m_refc_mnr[0] = mp->m_refc_mxr[0] / 2;
mp->m_refc_mnr[1] = mp->m_refc_mxr[1] / 2;
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 5f17641f040f..3bccdf73e141 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -734,8 +734,7 @@ xfs_calc_clear_agi_bucket_reservation(
* the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
*/
STATIC uint
-xfs_calc_qm_setqlim_reservation(
- struct xfs_mount *mp)
+xfs_calc_qm_setqlim_reservation(void)
{
return xfs_calc_buf_res(1, sizeof(struct xfs_disk_dquot));
}
@@ -772,8 +771,7 @@ xfs_calc_qm_quotaoff_reservation(
* the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
*/
STATIC uint
-xfs_calc_qm_quotaoff_end_reservation(
- struct xfs_mount *mp)
+xfs_calc_qm_quotaoff_end_reservation(void)
{
return sizeof(struct xfs_qoff_logitem) * 2;
}
@@ -877,14 +875,14 @@ xfs_trans_resv_calc(
* The following transactions are logged in logical format with
* a default log count.
*/
- resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(mp);
+ resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation();
resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
resp->tr_qm_quotaoff.tr_logres = xfs_calc_qm_quotaoff_reservation(mp);
resp->tr_qm_quotaoff.tr_logcount = XFS_DEFAULT_LOG_COUNT;
resp->tr_qm_equotaoff.tr_logres =
- xfs_calc_qm_quotaoff_end_reservation(mp);
+ xfs_calc_qm_quotaoff_end_reservation();
resp->tr_qm_equotaoff.tr_logcount = XFS_DEFAULT_LOG_COUNT;
resp->tr_sb.tr_logres = xfs_calc_sb_reservation(mp);
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index e5fb008d75e8..2203465e63ea 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -53,6 +53,25 @@ xfs_bui_item_free(
kmem_zone_free(xfs_bui_zone, buip);
}
+/*
+ * Freeing the BUI requires that we remove it from the AIL if it has already
+ * been placed there. However, the BUI may not yet have been placed in the AIL
+ * when called by xfs_bui_release() from BUD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the reference
+ * count to ensure only the last caller frees the BUI.
+ */
+void
+xfs_bui_release(
+ struct xfs_bui_log_item *buip)
+{
+ ASSERT(atomic_read(&buip->bui_refcount) > 0);
+ if (atomic_dec_and_test(&buip->bui_refcount)) {
+ xfs_trans_ail_remove(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR);
+ xfs_bui_item_free(buip);
+ }
+}
+
+
STATIC void
xfs_bui_item_size(
struct xfs_log_item *lip,
@@ -142,7 +161,7 @@ xfs_bui_item_unlock(
struct xfs_log_item *lip)
{
if (lip->li_flags & XFS_LI_ABORTED)
- xfs_bui_item_free(BUI_ITEM(lip));
+ xfs_bui_release(BUI_ITEM(lip));
}
/*
@@ -206,24 +225,6 @@ xfs_bui_init(
return buip;
}
-/*
- * Freeing the BUI requires that we remove it from the AIL if it has already
- * been placed there. However, the BUI may not yet have been placed in the AIL
- * when called by xfs_bui_release() from BUD processing due to the ordering of
- * committed vs unpin operations in bulk insert operations. Hence the reference
- * count to ensure only the last caller frees the BUI.
- */
-void
-xfs_bui_release(
- struct xfs_bui_log_item *buip)
-{
- ASSERT(atomic_read(&buip->bui_refcount) > 0);
- if (atomic_dec_and_test(&buip->bui_refcount)) {
- xfs_trans_ail_remove(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR);
- xfs_bui_item_free(buip);
- }
-}
-
static inline struct xfs_bud_log_item *BUD_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_bud_log_item, bud_item);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 05dee8fdd895..8cd8c412f52d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1326,7 +1326,6 @@ xfs_collapse_file_space(
int error;
struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
- xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
@@ -1361,7 +1360,7 @@ xfs_collapse_file_space(
xfs_defer_init(&dfops, &first_block);
error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
- &done, stop_fsb, &first_block, &dfops);
+ &done, &first_block, &dfops);
if (error)
goto out_bmap_cancel;
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index ac669a10c62f..55661cbdb51b 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1754,7 +1754,6 @@ xfs_buftarg_shrink_count(
void
xfs_free_buftarg(
- struct xfs_mount *mp,
struct xfs_buftarg *btp)
{
unregister_shrinker(&btp->bt_shrinker);
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 2f4c91452861..edced162a674 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -388,7 +388,7 @@ xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
*/
extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
struct block_device *, struct dax_device *);
-extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
+extern void xfs_free_buftarg(struct xfs_buftarg *);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index b2cde5426182..7b68e6c9a474 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -50,19 +50,19 @@ xfs_trim_extents(
pag = xfs_perag_get(mp, agno);
- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
- if (error || !agbp)
- goto out_put_perag;
-
- cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
-
/*
* Force out the log. This means any transactions that might have freed
- * space before we took the AGF buffer lock are now on disk, and the
+ * space before we take the AGF buffer lock are now on disk, and the
* volatile disk cache is flushed.
*/
xfs_log_force(mp, XFS_LOG_SYNC);
+ error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+ if (error || !agbp)
+ goto out_put_perag;
+
+ cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
+
/*
* Look up the longest btree in the AGF and start with it.
*/
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 64da90655e95..b5b1e567b9f4 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -51,6 +51,24 @@ xfs_efi_item_free(
}
/*
+ * Freeing the efi requires that we remove it from the AIL if it has already
+ * been placed there. However, the EFI may not yet have been placed in the AIL
+ * when called by xfs_efi_release() from EFD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the reference
+ * count to ensure only the last caller frees the EFI.
+ */
+void
+xfs_efi_release(
+ struct xfs_efi_log_item *efip)
+{
+ ASSERT(atomic_read(&efip->efi_refcount) > 0);
+ if (atomic_dec_and_test(&efip->efi_refcount)) {
+ xfs_trans_ail_remove(&efip->efi_item, SHUTDOWN_LOG_IO_ERROR);
+ xfs_efi_item_free(efip);
+ }
+}
+
+/*
* This returns the number of iovecs needed to log the given efi item.
* We only need 1 iovec for an efi item. It just logs the efi_log_format
* structure.
@@ -151,7 +169,7 @@ xfs_efi_item_unlock(
struct xfs_log_item *lip)
{
if (lip->li_flags & XFS_LI_ABORTED)
- xfs_efi_item_free(EFI_ITEM(lip));
+ xfs_efi_release(EFI_ITEM(lip));
}
/*
@@ -279,24 +297,6 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
return -EFSCORRUPTED;
}
-/*
- * Freeing the efi requires that we remove it from the AIL if it has already
- * been placed there. However, the EFI may not yet have been placed in the AIL
- * when called by xfs_efi_release() from EFD processing due to the ordering of
- * committed vs unpin operations in bulk insert operations. Hence the reference
- * count to ensure only the last caller frees the EFI.
- */
-void
-xfs_efi_release(
- struct xfs_efi_log_item *efip)
-{
- ASSERT(atomic_read(&efip->efi_refcount) > 0);
- if (atomic_dec_and_test(&efip->efi_refcount)) {
- xfs_trans_ail_remove(&efip->efi_item, SHUTDOWN_LOG_IO_ERROR);
- xfs_efi_item_free(efip);
- }
-}
-
static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_efd_log_item, efd_item);
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 043ca3808ea2..3f8722e51dbe 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -34,7 +34,6 @@
struct xfs_fstrm_item {
struct xfs_mru_cache_elem mru;
- struct xfs_inode *ip;
xfs_agnumber_t ag; /* AG in use for this directory */
};
@@ -122,14 +121,15 @@ xfs_filestream_put_ag(
static void
xfs_fstrm_free_func(
+ void *data,
struct xfs_mru_cache_elem *mru)
{
+ struct xfs_mount *mp = data;
struct xfs_fstrm_item *item =
container_of(mru, struct xfs_fstrm_item, mru);
- xfs_filestream_put_ag(item->ip->i_mount, item->ag);
-
- trace_xfs_filestream_free(item->ip, item->ag);
+ xfs_filestream_put_ag(mp, item->ag);
+ trace_xfs_filestream_free(mp, mru->key, item->ag);
kmem_free(item);
}
@@ -165,7 +165,7 @@ xfs_filestream_pick_ag(
trylock = XFS_ALLOC_FLAG_TRYLOCK;
for (nscan = 0; 1; nscan++) {
- trace_xfs_filestream_scan(ip, ag);
+ trace_xfs_filestream_scan(mp, ip->i_ino, ag);
pag = xfs_perag_get(mp, ag);
@@ -198,7 +198,7 @@ xfs_filestream_pick_ag(
goto next_ag;
}
- longest = xfs_alloc_longest_free_extent(mp, pag,
+ longest = xfs_alloc_longest_free_extent(pag,
xfs_alloc_min_freelist(mp, pag),
xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
if (((minlen && longest >= minlen) ||
@@ -265,7 +265,6 @@ next_ag:
goto out_put_ag;
item->ag = *agp;
- item->ip = ip;
err = xfs_mru_cache_insert(mp->m_filestream, ip->i_ino, &item->mru);
if (err) {
@@ -333,7 +332,7 @@ xfs_filestream_lookup_ag(
ag = container_of(mru, struct xfs_fstrm_item, mru)->ag;
xfs_mru_cache_done(mp->m_filestream);
- trace_xfs_filestream_lookup(ip, ag);
+ trace_xfs_filestream_lookup(mp, ip->i_ino, ag);
goto out;
}
@@ -399,7 +398,7 @@ xfs_filestream_new_ag(
* Only free the item here so we skip over the old AG earlier.
*/
if (mru)
- xfs_fstrm_free_func(mru);
+ xfs_fstrm_free_func(mp, mru);
IRELE(pip);
exit:
@@ -426,8 +425,8 @@ xfs_filestream_mount(
* timer tunable to within about 10 percent. This requires at least 10
* groups.
*/
- return xfs_mru_cache_create(&mp->m_filestream, xfs_fstrm_centisecs * 10,
- 10, xfs_fstrm_free_func);
+ return xfs_mru_cache_create(&mp->m_filestream, mp,
+ xfs_fstrm_centisecs * 10, 10, xfs_fstrm_free_func);
}
void
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 3e3aab3888fa..2b70c8b4cee2 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -972,10 +972,8 @@ xfs_dir_ialloc(
xfs_nlink_t nlink,
dev_t rdev,
prid_t prid, /* project id */
- xfs_inode_t **ipp, /* pointer to inode; it will be
+ xfs_inode_t **ipp) /* pointer to inode; it will be
locked. */
- int *committed)
-
{
xfs_trans_t *tp;
xfs_inode_t *ip;
@@ -1050,8 +1048,6 @@ xfs_dir_ialloc(
}
code = xfs_trans_roll(&tp);
- if (committed != NULL)
- *committed = 1;
/*
* Re-attach the quota info that we detached from prev trx.
@@ -1088,9 +1084,6 @@ xfs_dir_ialloc(
}
ASSERT(!ialloc_context && ip);
- } else {
- if (committed != NULL)
- *committed = 0;
}
*ipp = ip;
@@ -1217,8 +1210,7 @@ xfs_create(
* entry pointing to them, but a directory also the "." entry
* pointing to itself.
*/
- error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip,
- NULL);
+ error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
if (error)
goto out_trans_cancel;
@@ -1309,7 +1301,6 @@ xfs_create(
int
xfs_create_tmpfile(
struct xfs_inode *dp,
- struct dentry *dentry,
umode_t mode,
struct xfs_inode **ipp)
{
@@ -1351,7 +1342,7 @@ xfs_create_tmpfile(
if (error)
goto out_trans_cancel;
- error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip, NULL);
+ error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip);
if (error)
goto out_trans_cancel;
@@ -1611,13 +1602,15 @@ xfs_itruncate_extents(
goto out;
}
- /* Remove all pending CoW reservations. */
- error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
- last_block, true);
- if (error)
- goto out;
+ if (whichfork == XFS_DATA_FORK) {
+ /* Remove all pending CoW reservations. */
+ error = xfs_reflink_cancel_cow_blocks(ip, &tp,
+ first_unmap_block, last_block, true);
+ if (error)
+ goto out;
- xfs_itruncate_clear_reflink_flags(ip);
+ xfs_itruncate_clear_reflink_flags(ip);
+ }
/*
* Always re-log the inode so that our permanent transaction can keep
@@ -2903,7 +2896,7 @@ xfs_rename_alloc_whiteout(
struct xfs_inode *tmpfile;
int error;
- error = xfs_create_tmpfile(dp, NULL, S_IFCHR | WHITEOUT_MODE, &tmpfile);
+ error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
if (error)
return error;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 132d8aa2afc4..1eebc53df7d7 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -393,8 +393,8 @@ int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode **ipp, struct xfs_name *ci_name);
int xfs_create(struct xfs_inode *dp, struct xfs_name *name,
umode_t mode, dev_t rdev, struct xfs_inode **ipp);
-int xfs_create_tmpfile(struct xfs_inode *dp, struct dentry *dentry,
- umode_t mode, struct xfs_inode **ipp);
+int xfs_create_tmpfile(struct xfs_inode *dp, umode_t mode,
+ struct xfs_inode **ipp);
int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode *ip);
int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
@@ -431,7 +431,7 @@ xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
int xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t,
xfs_nlink_t, dev_t, prid_t,
- struct xfs_inode **, int *);
+ struct xfs_inode **);
/* from xfs_file.c */
enum xfs_prealloc_flags {
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 154725b1b813..a3ed3c811dfa 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -177,7 +177,7 @@ xfs_generic_create(
if (!tmpfile) {
error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
} else {
- error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
+ error = xfs_create_tmpfile(XFS_I(dir), mode, &ip);
}
if (unlikely(error))
goto out_free_acl;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index b9c9c848146b..2fcd9ed5d075 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -560,7 +560,6 @@ xfs_log_done(
*/
int
xfs_log_notify(
- struct xfs_mount *mp,
struct xlog_in_core *iclog,
xfs_log_callback_t *cb)
{
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 7e2d62922a16..fa8ad31d587f 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -141,8 +141,7 @@ int xfs_log_mount_cancel(struct xfs_mount *);
xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
void xfs_log_space_wake(struct xfs_mount *mp);
-int xfs_log_notify(struct xfs_mount *mp,
- struct xlog_in_core *iclog,
+int xfs_log_notify(struct xlog_in_core *iclog,
struct xfs_log_callback *callback_entry);
int xfs_log_release_iclog(struct xfs_mount *mp,
struct xlog_in_core *iclog);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index cb376ac8a595..4668403b1741 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -848,7 +848,7 @@ restart:
/* attach all the transactions w/ busy extents to iclog */
ctx->log_cb.cb_func = xlog_cil_committed;
ctx->log_cb.cb_arg = ctx;
- error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb);
+ error = xfs_log_notify(commit_iclog, &ctx->log_cb);
if (error)
goto out_abort;
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index f8a674d7f092..70eea7ae2876 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -112,6 +112,7 @@ struct xfs_mru_cache {
xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */
struct delayed_work work; /* Workqueue data for reaping. */
unsigned int queued; /* work has been queued */
+ void *data;
};
static struct workqueue_struct *xfs_mru_reap_wq;
@@ -259,7 +260,7 @@ _xfs_mru_cache_clear_reap_list(
list_for_each_entry_safe(elem, next, &tmp, list_node) {
list_del_init(&elem->list_node);
- mru->free_func(elem);
+ mru->free_func(mru->data, elem);
}
spin_lock(&mru->lock);
@@ -326,6 +327,7 @@ xfs_mru_cache_uninit(void)
int
xfs_mru_cache_create(
struct xfs_mru_cache **mrup,
+ void *data,
unsigned int lifetime_ms,
unsigned int grp_count,
xfs_mru_cache_free_func_t free_func)
@@ -369,7 +371,7 @@ xfs_mru_cache_create(
mru->grp_time = grp_time;
mru->free_func = free_func;
-
+ mru->data = data;
*mrup = mru;
exit:
@@ -492,7 +494,7 @@ xfs_mru_cache_delete(
elem = xfs_mru_cache_remove(mru, key);
if (elem)
- mru->free_func(elem);
+ mru->free_func(mru->data, elem);
}
/*
diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h
index fb5245ba5ff7..b3f3fbdfcc47 100644
--- a/fs/xfs/xfs_mru_cache.h
+++ b/fs/xfs/xfs_mru_cache.h
@@ -26,13 +26,13 @@ struct xfs_mru_cache_elem {
};
/* Function pointer type for callback to free a client's data pointer. */
-typedef void (*xfs_mru_cache_free_func_t)(struct xfs_mru_cache_elem *elem);
+typedef void (*xfs_mru_cache_free_func_t)(void *, struct xfs_mru_cache_elem *);
int xfs_mru_cache_init(void);
void xfs_mru_cache_uninit(void);
-int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms,
- unsigned int grp_count,
- xfs_mru_cache_free_func_t free_func);
+int xfs_mru_cache_create(struct xfs_mru_cache **mrup, void *data,
+ unsigned int lifetime_ms, unsigned int grp_count,
+ xfs_mru_cache_free_func_t free_func);
void xfs_mru_cache_destroy(struct xfs_mru_cache *mru);
int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
struct xfs_mru_cache_elem *elem);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 5b848f4b637f..ec39ae274c78 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -748,7 +748,6 @@ xfs_qm_qino_alloc(
{
xfs_trans_t *tp;
int error;
- int committed;
bool need_alloc = true;
*ip = NULL;
@@ -788,8 +787,7 @@ xfs_qm_qino_alloc(
return error;
if (need_alloc) {
- error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip,
- &committed);
+ error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
if (error) {
xfs_trans_cancel(tp);
return error;
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index 7a39f40645f7..15c9393dd7a7 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -52,6 +52,25 @@ xfs_cui_item_free(
kmem_zone_free(xfs_cui_zone, cuip);
}
+/*
+ * Freeing the CUI requires that we remove it from the AIL if it has already
+ * been placed there. However, the CUI may not yet have been placed in the AIL
+ * when called by xfs_cui_release() from CUD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the reference
+ * count to ensure only the last caller frees the CUI.
+ */
+void
+xfs_cui_release(
+ struct xfs_cui_log_item *cuip)
+{
+ ASSERT(atomic_read(&cuip->cui_refcount) > 0);
+ if (atomic_dec_and_test(&cuip->cui_refcount)) {
+ xfs_trans_ail_remove(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
+ xfs_cui_item_free(cuip);
+ }
+}
+
+
STATIC void
xfs_cui_item_size(
struct xfs_log_item *lip,
@@ -141,7 +160,7 @@ xfs_cui_item_unlock(
struct xfs_log_item *lip)
{
if (lip->li_flags & XFS_LI_ABORTED)
- xfs_cui_item_free(CUI_ITEM(lip));
+ xfs_cui_release(CUI_ITEM(lip));
}
/*
@@ -211,24 +230,6 @@ xfs_cui_init(
return cuip;
}
-/*
- * Freeing the CUI requires that we remove it from the AIL if it has already
- * been placed there. However, the CUI may not yet have been placed in the AIL
- * when called by xfs_cui_release() from CUD processing due to the ordering of
- * committed vs unpin operations in bulk insert operations. Hence the reference
- * count to ensure only the last caller frees the CUI.
- */
-void
-xfs_cui_release(
- struct xfs_cui_log_item *cuip)
-{
- ASSERT(atomic_read(&cuip->cui_refcount) > 0);
- if (atomic_dec_and_test(&cuip->cui_refcount)) {
- xfs_trans_ail_remove(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
- xfs_cui_item_free(cuip);
- }
-}
-
static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_cud_log_item, cud_item);
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index 49d3124863a8..06a07846c9b3 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -52,6 +52,24 @@ xfs_rui_item_free(
kmem_zone_free(xfs_rui_zone, ruip);
}
+/*
+ * Freeing the RUI requires that we remove it from the AIL if it has already
+ * been placed there. However, the RUI may not yet have been placed in the AIL
+ * when called by xfs_rui_release() from RUD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the reference
+ * count to ensure only the last caller frees the RUI.
+ */
+void
+xfs_rui_release(
+ struct xfs_rui_log_item *ruip)
+{
+ ASSERT(atomic_read(&ruip->rui_refcount) > 0);
+ if (atomic_dec_and_test(&ruip->rui_refcount)) {
+ xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
+ xfs_rui_item_free(ruip);
+ }
+}
+
STATIC void
xfs_rui_item_size(
struct xfs_log_item *lip,
@@ -141,7 +159,7 @@ xfs_rui_item_unlock(
struct xfs_log_item *lip)
{
if (lip->li_flags & XFS_LI_ABORTED)
- xfs_rui_item_free(RUI_ITEM(lip));
+ xfs_rui_release(RUI_ITEM(lip));
}
/*
@@ -233,24 +251,6 @@ xfs_rui_copy_format(
return 0;
}
-/*
- * Freeing the RUI requires that we remove it from the AIL if it has already
- * been placed there. However, the RUI may not yet have been placed in the AIL
- * when called by xfs_rui_release() from RUD processing due to the ordering of
- * committed vs unpin operations in bulk insert operations. Hence the reference
- * count to ensure only the last caller frees the RUI.
- */
-void
-xfs_rui_release(
- struct xfs_rui_log_item *ruip)
-{
- ASSERT(atomic_read(&ruip->rui_refcount) > 0);
- if (atomic_dec_and_test(&ruip->rui_refcount)) {
- xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
- xfs_rui_item_free(ruip);
- }
-}
-
static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_rud_log_item, rud_item);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 612c1d5348b3..d71424052917 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -722,7 +722,7 @@ xfs_close_devices(
struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
- xfs_free_buftarg(mp, mp->m_logdev_targp);
+ xfs_free_buftarg(mp->m_logdev_targp);
xfs_blkdev_put(logdev);
fs_put_dax(dax_logdev);
}
@@ -730,11 +730,11 @@ xfs_close_devices(
struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
- xfs_free_buftarg(mp, mp->m_rtdev_targp);
+ xfs_free_buftarg(mp->m_rtdev_targp);
xfs_blkdev_put(rtdev);
fs_put_dax(dax_rtdev);
}
- xfs_free_buftarg(mp, mp->m_ddev_targp);
+ xfs_free_buftarg(mp->m_ddev_targp);
fs_put_dax(dax_ddev);
}
@@ -808,9 +808,9 @@ xfs_open_devices(
out_free_rtdev_targ:
if (mp->m_rtdev_targp)
- xfs_free_buftarg(mp, mp->m_rtdev_targp);
+ xfs_free_buftarg(mp->m_rtdev_targp);
out_free_ddev_targ:
- xfs_free_buftarg(mp, mp->m_ddev_targp);
+ xfs_free_buftarg(mp->m_ddev_targp);
out_close_rtdev:
xfs_blkdev_put(rtdev);
fs_put_dax(dax_rtdev);
@@ -1247,7 +1247,6 @@ xfs_quiesce_attr(
STATIC int
xfs_test_remount_options(
struct super_block *sb,
- struct xfs_mount *mp,
char *options)
{
int error = 0;
@@ -1278,7 +1277,7 @@ xfs_fs_remount(
int error;
/* First, check for complete junk; i.e. invalid options */
- error = xfs_test_remount_options(sb, mp, options);
+ error = xfs_test_remount_options(sb, options);
if (error)
return error;
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 2e9e793a8f9d..5b66ac12913c 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -264,7 +264,7 @@ xfs_symlink(
* Allocate an inode for the symlink.
*/
error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
- prid, &ip, NULL);
+ prid, &ip);
if (error)
goto out_trans_cancel;
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index a982c0b623d0..8955254b900e 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -506,8 +506,8 @@ DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
DECLARE_EVENT_CLASS(xfs_filestream_class,
- TP_PROTO(struct xfs_inode *ip, xfs_agnumber_t agno),
- TP_ARGS(ip, agno),
+ TP_PROTO(struct xfs_mount *mp, xfs_ino_t ino, xfs_agnumber_t agno),
+ TP_ARGS(mp, ino, agno),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
@@ -515,10 +515,10 @@ DECLARE_EVENT_CLASS(xfs_filestream_class,
__field(int, streams)
),
TP_fast_assign(
- __entry->dev = VFS_I(ip)->i_sb->s_dev;
- __entry->ino = ip->i_ino;
+ __entry->dev = mp->m_super->s_dev;
+ __entry->ino = ino;
__entry->agno = agno;
- __entry->streams = xfs_filestream_peek_ag(ip->i_mount, agno);
+ __entry->streams = xfs_filestream_peek_ag(mp, agno);
),
TP_printk("dev %d:%d ino 0x%llx agno %u streams %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -528,8 +528,8 @@ DECLARE_EVENT_CLASS(xfs_filestream_class,
)
#define DEFINE_FILESTREAM_EVENT(name) \
DEFINE_EVENT(xfs_filestream_class, name, \
- TP_PROTO(struct xfs_inode *ip, xfs_agnumber_t agno), \
- TP_ARGS(ip, agno))
+ TP_PROTO(struct xfs_mount *mp, xfs_ino_t ino, xfs_agnumber_t agno), \
+ TP_ARGS(mp, ino, agno))
DEFINE_FILESTREAM_EVENT(xfs_filestream_free);
DEFINE_FILESTREAM_EVENT(xfs_filestream_lookup);
DEFINE_FILESTREAM_EVENT(xfs_filestream_scan);
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index d591bb77f592..40a916efd7c0 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -254,6 +254,8 @@ int acpi_processor_pstate_control(void);
/* note: this locks both the calling module and the processor module
if a _PPC object exists, rmmod is disallowed then */
int acpi_processor_notify_smm(struct module *calling_module);
+int acpi_processor_get_psd(acpi_handle handle,
+ struct acpi_psd_package *pdomain);
/* parsing the _P* objects. */
extern int acpi_processor_get_performance_info(struct acpi_processor *pr);
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 04c4cc6fd820..66d1d45fa2e1 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -25,6 +25,50 @@
#define mmiowb() do {} while (0)
#endif
+#ifndef __io_br
+#define __io_br() barrier()
+#endif
+
+/* prevent prefetching of coherent DMA data ahead of a dma-complete */
+#ifndef __io_ar
+#ifdef rmb
+#define __io_ar() rmb()
+#else
+#define __io_ar() barrier()
+#endif
+#endif
+
+/* flush writes to coherent DMA data before possibly triggering a DMA read */
+#ifndef __io_bw
+#ifdef wmb
+#define __io_bw() wmb()
+#else
+#define __io_bw() barrier()
+#endif
+#endif
+
+/* serialize device access against a spin_unlock, usually handled there. */
+#ifndef __io_aw
+#define __io_aw() barrier()
+#endif
+
+#ifndef __io_pbw
+#define __io_pbw() __io_bw()
+#endif
+
+#ifndef __io_paw
+#define __io_paw() __io_aw()
+#endif
+
+#ifndef __io_pbr
+#define __io_pbr() __io_br()
+#endif
+
+#ifndef __io_par
+#define __io_par() __io_ar()
+#endif
+
+
/*
* __raw_{read,write}{b,w,l,q}() access memory in native endianness.
*
@@ -110,7 +154,12 @@ static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
#define readb readb
static inline u8 readb(const volatile void __iomem *addr)
{
- return __raw_readb(addr);
+ u8 val;
+
+ __io_br();
+ val = __raw_readb(addr);
+ __io_ar();
+ return val;
}
#endif
@@ -118,7 +167,12 @@ static inline u8 readb(const volatile void __iomem *addr)
#define readw readw
static inline u16 readw(const volatile void __iomem *addr)
{
- return __le16_to_cpu(__raw_readw(addr));
+ u16 val;
+
+ __io_br();
+ val = __le16_to_cpu(__raw_readw(addr));
+ __io_ar();
+ return val;
}
#endif
@@ -126,7 +180,12 @@ static inline u16 readw(const volatile void __iomem *addr)
#define readl readl
static inline u32 readl(const volatile void __iomem *addr)
{
- return __le32_to_cpu(__raw_readl(addr));
+ u32 val;
+
+ __io_br();
+ val = __le32_to_cpu(__raw_readl(addr));
+ __io_ar();
+ return val;
}
#endif
@@ -135,7 +194,12 @@ static inline u32 readl(const volatile void __iomem *addr)
#define readq readq
static inline u64 readq(const volatile void __iomem *addr)
{
- return __le64_to_cpu(__raw_readq(addr));
+ u64 val;
+
+ __io_br();
+ val = __le64_to_cpu(__raw_readq(addr));
+ __io_ar();
+ return val;
}
#endif
#endif /* CONFIG_64BIT */
@@ -144,7 +208,9 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writeb(value, addr);
+ __io_aw();
}
#endif
@@ -152,7 +218,9 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writew(cpu_to_le16(value), addr);
+ __io_aw();
}
#endif
@@ -160,7 +228,9 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writel(__cpu_to_le32(value), addr);
+ __io_aw();
}
#endif
@@ -169,7 +239,9 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writeq(__cpu_to_le64(value), addr);
+ __io_aw();
}
#endif
#endif /* CONFIG_64BIT */
@@ -180,35 +252,67 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
* accesses.
*/
#ifndef readb_relaxed
-#define readb_relaxed readb
+#define readb_relaxed readb_relaxed
+static inline u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ return __raw_readb(addr);
+}
#endif
#ifndef readw_relaxed
-#define readw_relaxed readw
+#define readw_relaxed readw_relaxed
+static inline u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ return __le16_to_cpu(__raw_readw(addr));
+}
#endif
#ifndef readl_relaxed
-#define readl_relaxed readl
+#define readl_relaxed readl_relaxed
+static inline u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ return __le32_to_cpu(__raw_readl(addr));
+}
#endif
#if defined(readq) && !defined(readq_relaxed)
-#define readq_relaxed readq
+#define readq_relaxed readq_relaxed
+static inline u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ return __le64_to_cpu(__raw_readq(addr));
+}
#endif
#ifndef writeb_relaxed
-#define writeb_relaxed writeb
+#define writeb_relaxed writeb_relaxed
+static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+{
+ __raw_writeb(value, addr);
+}
#endif
#ifndef writew_relaxed
-#define writew_relaxed writew
+#define writew_relaxed writew_relaxed
+static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+{
+ __raw_writew(cpu_to_le16(value), addr);
+}
#endif
#ifndef writel_relaxed
-#define writel_relaxed writel
+#define writel_relaxed writel_relaxed
+static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+{
+ __raw_writel(__cpu_to_le32(value), addr);
+}
#endif
#if defined(writeq) && !defined(writeq_relaxed)
-#define writeq_relaxed writeq
+#define writeq_relaxed writeq_relaxed
+static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+{
+ __raw_writeq(__cpu_to_le64(value), addr);
+}
#endif
/*
@@ -363,7 +467,12 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#define inb inb
static inline u8 inb(unsigned long addr)
{
- return readb(PCI_IOBASE + addr);
+ u8 val;
+
+ __io_pbr();
+ val = __raw_readb(PCI_IOBASE + addr);
+ __io_par();
+ return val;
}
#endif
@@ -371,7 +480,12 @@ static inline u8 inb(unsigned long addr)
#define inw inw
static inline u16 inw(unsigned long addr)
{
- return readw(PCI_IOBASE + addr);
+ u16 val;
+
+ __io_pbr();
+ val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
+ __io_par();
+ return val;
}
#endif
@@ -379,7 +493,12 @@ static inline u16 inw(unsigned long addr)
#define inl inl
static inline u32 inl(unsigned long addr)
{
- return readl(PCI_IOBASE + addr);
+ u32 val;
+
+ __io_pbr();
+ val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
+ __io_par();
+ return val;
}
#endif
@@ -387,7 +506,9 @@ static inline u32 inl(unsigned long addr)
#define outb outb
static inline void outb(u8 value, unsigned long addr)
{
- writeb(value, PCI_IOBASE + addr);
+ __io_pbw();
+ __raw_writeb(value, PCI_IOBASE + addr);
+ __io_paw();
}
#endif
@@ -395,7 +516,9 @@ static inline void outb(u8 value, unsigned long addr)
#define outw outw
static inline void outw(u16 value, unsigned long addr)
{
- writew(value, PCI_IOBASE + addr);
+ __io_pbw();
+ __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
+ __io_paw();
}
#endif
@@ -403,7 +526,9 @@ static inline void outw(u16 value, unsigned long addr)
#define outl outl
static inline void outl(u32 value, unsigned long addr)
{
- writel(value, PCI_IOBASE + addr);
+ __io_pbw();
+ __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
+ __io_paw();
}
#endif
diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h
index 941ac70e7f30..555937a25504 100644
--- a/include/dt-bindings/clock/axg-clkc.h
+++ b/include/dt-bindings/clock/axg-clkc.h
@@ -67,5 +67,6 @@
#define CLKID_AO_I2C 58
#define CLKID_SD_EMMC_B_CLK0 59
#define CLKID_SD_EMMC_C_CLK0 60
+#define CLKID_HIFI_PLL 69
#endif /* __AXG_CLKC_H */
diff --git a/include/dt-bindings/clock/histb-clock.h b/include/dt-bindings/clock/histb-clock.h
index 067f5e501b0c..fab30b3f78b2 100644
--- a/include/dt-bindings/clock/histb-clock.h
+++ b/include/dt-bindings/clock/histb-clock.h
@@ -22,18 +22,18 @@
#define HISTB_OSC_CLK 0
#define HISTB_APB_CLK 1
#define HISTB_AHB_CLK 2
-#define HISTB_UART1_CLK 3
-#define HISTB_UART2_CLK 4
-#define HISTB_UART3_CLK 5
-#define HISTB_I2C0_CLK 6
-#define HISTB_I2C1_CLK 7
-#define HISTB_I2C2_CLK 8
-#define HISTB_I2C3_CLK 9
-#define HISTB_I2C4_CLK 10
-#define HISTB_I2C5_CLK 11
-#define HISTB_SPI0_CLK 12
-#define HISTB_SPI1_CLK 13
-#define HISTB_SPI2_CLK 14
+#define HISTB_UART1_CLK 3
+#define HISTB_UART2_CLK 4
+#define HISTB_UART3_CLK 5
+#define HISTB_I2C0_CLK 6
+#define HISTB_I2C1_CLK 7
+#define HISTB_I2C2_CLK 8
+#define HISTB_I2C3_CLK 9
+#define HISTB_I2C4_CLK 10
+#define HISTB_I2C5_CLK 11
+#define HISTB_SPI0_CLK 12
+#define HISTB_SPI1_CLK 13
+#define HISTB_SPI2_CLK 14
#define HISTB_SCI_CLK 15
#define HISTB_FMC_CLK 16
#define HISTB_MMC_BIU_CLK 17
@@ -43,7 +43,7 @@
#define HISTB_SDIO0_BIU_CLK 21
#define HISTB_SDIO0_CIU_CLK 22
#define HISTB_SDIO0_DRV_CLK 23
-#define HISTB_SDIO0_SAMPLE_CLK 24
+#define HISTB_SDIO0_SAMPLE_CLK 24
#define HISTB_PCIE_AUX_CLK 25
#define HISTB_PCIE_PIPE_CLK 26
#define HISTB_PCIE_SYS_CLK 27
@@ -53,21 +53,22 @@
#define HISTB_ETH1_MAC_CLK 31
#define HISTB_ETH1_MACIF_CLK 32
#define HISTB_COMBPHY1_CLK 33
-#define HISTB_USB2_BUS_CLK 34
-#define HISTB_USB2_PHY_CLK 35
-#define HISTB_USB2_UTMI_CLK 36
-#define HISTB_USB2_12M_CLK 37
-#define HISTB_USB2_48M_CLK 38
-#define HISTB_USB2_OTG_UTMI_CLK 39
-#define HISTB_USB2_PHY1_REF_CLK 40
-#define HISTB_USB2_PHY2_REF_CLK 41
+#define HISTB_USB2_BUS_CLK 34
+#define HISTB_USB2_PHY_CLK 35
+#define HISTB_USB2_UTMI_CLK 36
+#define HISTB_USB2_12M_CLK 37
+#define HISTB_USB2_48M_CLK 38
+#define HISTB_USB2_OTG_UTMI_CLK 39
+#define HISTB_USB2_PHY1_REF_CLK 40
+#define HISTB_USB2_PHY2_REF_CLK 41
+#define HISTB_COMBPHY0_CLK 42
/* clocks provided by mcu CRG */
-#define HISTB_MCE_CLK 1
-#define HISTB_IR_CLK 2
-#define HISTB_TIMER01_CLK 3
-#define HISTB_LEDC_CLK 4
-#define HISTB_UART0_CLK 5
-#define HISTB_LSADC_CLK 6
+#define HISTB_MCE_CLK 1
+#define HISTB_IR_CLK 2
+#define HISTB_TIMER01_CLK 3
+#define HISTB_LEDC_CLK 4
+#define HISTB_UART0_CLK 5
+#define HISTB_LSADC_CLK 6
#endif /* __DTS_HISTB_CLOCK_H */
diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h
new file mode 100644
index 000000000000..151111e68f4f
--- /dev/null
+++ b/include/dt-bindings/clock/imx6sll-clock.h
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP.
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX6SLL_H
+#define __DT_BINDINGS_CLOCK_IMX6SLL_H
+
+#define IMX6SLL_CLK_DUMMY 0
+#define IMX6SLL_CLK_CKIL 1
+#define IMX6SLL_CLK_OSC 2
+#define IMX6SLL_PLL1_BYPASS_SRC 3
+#define IMX6SLL_PLL2_BYPASS_SRC 4
+#define IMX6SLL_PLL3_BYPASS_SRC 5
+#define IMX6SLL_PLL4_BYPASS_SRC 6
+#define IMX6SLL_PLL5_BYPASS_SRC 7
+#define IMX6SLL_PLL6_BYPASS_SRC 8
+#define IMX6SLL_PLL7_BYPASS_SRC 9
+#define IMX6SLL_CLK_PLL1 10
+#define IMX6SLL_CLK_PLL2 11
+#define IMX6SLL_CLK_PLL3 12
+#define IMX6SLL_CLK_PLL4 13
+#define IMX6SLL_CLK_PLL5 14
+#define IMX6SLL_CLK_PLL6 15
+#define IMX6SLL_CLK_PLL7 16
+#define IMX6SLL_PLL1_BYPASS 17
+#define IMX6SLL_PLL2_BYPASS 18
+#define IMX6SLL_PLL3_BYPASS 19
+#define IMX6SLL_PLL4_BYPASS 20
+#define IMX6SLL_PLL5_BYPASS 21
+#define IMX6SLL_PLL6_BYPASS 22
+#define IMX6SLL_PLL7_BYPASS 23
+#define IMX6SLL_CLK_PLL1_SYS 24
+#define IMX6SLL_CLK_PLL2_BUS 25
+#define IMX6SLL_CLK_PLL3_USB_OTG 26
+#define IMX6SLL_CLK_PLL4_AUDIO 27
+#define IMX6SLL_CLK_PLL5_VIDEO 28
+#define IMX6SLL_CLK_PLL6_ENET 29
+#define IMX6SLL_CLK_PLL7_USB_HOST 30
+#define IMX6SLL_CLK_USBPHY1 31
+#define IMX6SLL_CLK_USBPHY2 32
+#define IMX6SLL_CLK_USBPHY1_GATE 33
+#define IMX6SLL_CLK_USBPHY2_GATE 34
+#define IMX6SLL_CLK_PLL2_PFD0 35
+#define IMX6SLL_CLK_PLL2_PFD1 36
+#define IMX6SLL_CLK_PLL2_PFD2 37
+#define IMX6SLL_CLK_PLL2_PFD3 38
+#define IMX6SLL_CLK_PLL3_PFD0 39
+#define IMX6SLL_CLK_PLL3_PFD1 40
+#define IMX6SLL_CLK_PLL3_PFD2 41
+#define IMX6SLL_CLK_PLL3_PFD3 42
+#define IMX6SLL_CLK_PLL4_POST_DIV 43
+#define IMX6SLL_CLK_PLL4_AUDIO_DIV 44
+#define IMX6SLL_CLK_PLL5_POST_DIV 45
+#define IMX6SLL_CLK_PLL5_VIDEO_DIV 46
+#define IMX6SLL_CLK_PLL2_198M 47
+#define IMX6SLL_CLK_PLL3_120M 48
+#define IMX6SLL_CLK_PLL3_80M 49
+#define IMX6SLL_CLK_PLL3_60M 50
+#define IMX6SLL_CLK_STEP 51
+#define IMX6SLL_CLK_PLL1_SW 52
+#define IMX6SLL_CLK_AXI_ALT_SEL 53
+#define IMX6SLL_CLK_AXI_SEL 54
+#define IMX6SLL_CLK_PERIPH_PRE 55
+#define IMX6SLL_CLK_PERIPH2_PRE 56
+#define IMX6SLL_CLK_PERIPH_CLK2_SEL 57
+#define IMX6SLL_CLK_PERIPH2_CLK2_SEL 58
+#define IMX6SLL_CLK_PERCLK_SEL 59
+#define IMX6SLL_CLK_USDHC1_SEL 60
+#define IMX6SLL_CLK_USDHC2_SEL 61
+#define IMX6SLL_CLK_USDHC3_SEL 62
+#define IMX6SLL_CLK_SSI1_SEL 63
+#define IMX6SLL_CLK_SSI2_SEL 64
+#define IMX6SLL_CLK_SSI3_SEL 65
+#define IMX6SLL_CLK_PXP_SEL 66
+#define IMX6SLL_CLK_LCDIF_PRE_SEL 67
+#define IMX6SLL_CLK_LCDIF_SEL 68
+#define IMX6SLL_CLK_EPDC_PRE_SEL 69
+#define IMX6SLL_CLK_SPDIF_SEL 70
+#define IMX6SLL_CLK_ECSPI_SEL 71
+#define IMX6SLL_CLK_UART_SEL 72
+#define IMX6SLL_CLK_ARM 73
+#define IMX6SLL_CLK_PERIPH 74
+#define IMX6SLL_CLK_PERIPH2 75
+#define IMX6SLL_CLK_PERIPH2_CLK2 76
+#define IMX6SLL_CLK_PERIPH_CLK2 77
+#define IMX6SLL_CLK_MMDC_PODF 78
+#define IMX6SLL_CLK_AXI_PODF 79
+#define IMX6SLL_CLK_AHB 80
+#define IMX6SLL_CLK_IPG 81
+#define IMX6SLL_CLK_PERCLK 82
+#define IMX6SLL_CLK_USDHC1_PODF 83
+#define IMX6SLL_CLK_USDHC2_PODF 84
+#define IMX6SLL_CLK_USDHC3_PODF 85
+#define IMX6SLL_CLK_SSI1_PRED 86
+#define IMX6SLL_CLK_SSI2_PRED 87
+#define IMX6SLL_CLK_SSI3_PRED 88
+#define IMX6SLL_CLK_SSI1_PODF 89
+#define IMX6SLL_CLK_SSI2_PODF 90
+#define IMX6SLL_CLK_SSI3_PODF 91
+#define IMX6SLL_CLK_PXP_PODF 92
+#define IMX6SLL_CLK_LCDIF_PRED 93
+#define IMX6SLL_CLK_LCDIF_PODF 94
+#define IMX6SLL_CLK_EPDC_SEL 95
+#define IMX6SLL_CLK_EPDC_PODF 96
+#define IMX6SLL_CLK_SPDIF_PRED 97
+#define IMX6SLL_CLK_SPDIF_PODF 98
+#define IMX6SLL_CLK_ECSPI_PODF 99
+#define IMX6SLL_CLK_UART_PODF 100
+
+/* CCGR 0 */
+#define IMX6SLL_CLK_AIPSTZ1 101
+#define IMX6SLL_CLK_AIPSTZ2 102
+#define IMX6SLL_CLK_DCP 103
+#define IMX6SLL_CLK_UART2_IPG 104
+#define IMX6SLL_CLK_UART2_SERIAL 105
+
+/* CCGR 1 */
+#define IMX6SLL_CLK_ECSPI1 106
+#define IMX6SLL_CLK_ECSPI2 107
+#define IMX6SLL_CLK_ECSPI3 108
+#define IMX6SLL_CLK_ECSPI4 109
+#define IMX6SLL_CLK_UART3_IPG 110
+#define IMX6SLL_CLK_UART3_SERIAL 111
+#define IMX6SLL_CLK_UART4_IPG 112
+#define IMX6SLL_CLK_UART4_SERIAL 113
+#define IMX6SLL_CLK_EPIT1 114
+#define IMX6SLL_CLK_EPIT2 115
+#define IMX6SLL_CLK_GPT_BUS 116
+#define IMX6SLL_CLK_GPT_SERIAL 117
+
+/* CCGR2 */
+#define IMX6SLL_CLK_CSI 118
+#define IMX6SLL_CLK_I2C1 119
+#define IMX6SLL_CLK_I2C2 120
+#define IMX6SLL_CLK_I2C3 121
+#define IMX6SLL_CLK_OCOTP 122
+#define IMX6SLL_CLK_LCDIF_APB 123
+#define IMX6SLL_CLK_PXP 124
+
+/* CCGR3 */
+#define IMX6SLL_CLK_UART5_IPG 125
+#define IMX6SLL_CLK_UART5_SERIAL 126
+#define IMX6SLL_CLK_EPDC_AXI 127
+#define IMX6SLL_CLK_EPDC_PIX 128
+#define IMX6SLL_CLK_LCDIF_PIX 129
+#define IMX6SLL_CLK_WDOG1 130
+#define IMX6SLL_CLK_MMDC_P0_FAST 131
+#define IMX6SLL_CLK_MMDC_P0_IPG 132
+#define IMX6SLL_CLK_OCRAM 133
+
+/* CCGR4 */
+#define IMX6SLL_CLK_PWM1 134
+#define IMX6SLL_CLK_PWM2 135
+#define IMX6SLL_CLK_PWM3 136
+#define IMX6SLL_CLK_PWM4 137
+
+/* CCGR 5 */
+#define IMX6SLL_CLK_ROM 138
+#define IMX6SLL_CLK_SDMA 139
+#define IMX6SLL_CLK_KPP 140
+#define IMX6SLL_CLK_WDOG2 141
+#define IMX6SLL_CLK_SPBA 142
+#define IMX6SLL_CLK_SPDIF 143
+#define IMX6SLL_CLK_SPDIF_GCLK 144
+#define IMX6SLL_CLK_SSI1 145
+#define IMX6SLL_CLK_SSI1_IPG 146
+#define IMX6SLL_CLK_SSI2 147
+#define IMX6SLL_CLK_SSI2_IPG 148
+#define IMX6SLL_CLK_SSI3 149
+#define IMX6SLL_CLK_SSI3_IPG 150
+#define IMX6SLL_CLK_UART1_IPG 151
+#define IMX6SLL_CLK_UART1_SERIAL 152
+
+/* CCGR 6 */
+#define IMX6SLL_CLK_USBOH3 153
+#define IMX6SLL_CLK_USDHC1 154
+#define IMX6SLL_CLK_USDHC2 155
+#define IMX6SLL_CLK_USDHC3 156
+
+#define IMX6SLL_CLK_IPP_DI0 157
+#define IMX6SLL_CLK_IPP_DI1 158
+#define IMX6SLL_CLK_LDB_DI0_SEL 159
+#define IMX6SLL_CLK_LDB_DI0_DIV_3_5 160
+#define IMX6SLL_CLK_LDB_DI0_DIV_7 161
+#define IMX6SLL_CLK_LDB_DI0_DIV_SEL 162
+#define IMX6SLL_CLK_LDB_DI0 163
+#define IMX6SLL_CLK_LDB_DI1_SEL 164
+#define IMX6SLL_CLK_LDB_DI1_DIV_3_5 165
+#define IMX6SLL_CLK_LDB_DI1_DIV_7 166
+#define IMX6SLL_CLK_LDB_DI1_DIV_SEL 167
+#define IMX6SLL_CLK_LDB_DI1 168
+#define IMX6SLL_CLK_EXTERN_AUDIO_SEL 169
+#define IMX6SLL_CLK_EXTERN_AUDIO_PRED 170
+#define IMX6SLL_CLK_EXTERN_AUDIO_PODF 171
+#define IMX6SLL_CLK_EXTERN_AUDIO 172
+
+#define IMX6SLL_CLK_END 173
+
+#endif /* __DT_BINDINGS_CLOCK_IMX6SLL_H */
diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h
index 551f7600ab58..24e93dfcee9f 100644
--- a/include/dt-bindings/clock/mt2701-clk.h
+++ b/include/dt-bindings/clock/mt2701-clk.h
@@ -176,7 +176,8 @@
#define CLK_TOP_AUD_EXT1 156
#define CLK_TOP_AUD_EXT2 157
#define CLK_TOP_NFI1X_PAD 158
-#define CLK_TOP_NR 159
+#define CLK_TOP_AXISEL_D4 159
+#define CLK_TOP_NR 160
/* APMIXEDSYS */
diff --git a/include/dt-bindings/clock/mt2712-clk.h b/include/dt-bindings/clock/mt2712-clk.h
index 48a8e797a617..76265836a1e1 100644
--- a/include/dt-bindings/clock/mt2712-clk.h
+++ b/include/dt-bindings/clock/mt2712-clk.h
@@ -222,7 +222,13 @@
#define CLK_TOP_APLL_DIV_PDN5 183
#define CLK_TOP_APLL_DIV_PDN6 184
#define CLK_TOP_APLL_DIV_PDN7 185
-#define CLK_TOP_NR_CLK 186
+#define CLK_TOP_APLL1_D3 186
+#define CLK_TOP_APLL1_REF_SEL 187
+#define CLK_TOP_APLL2_REF_SEL 188
+#define CLK_TOP_NFI2X_EN 189
+#define CLK_TOP_NFIECC_EN 190
+#define CLK_TOP_NFI1X_CK_EN 191
+#define CLK_TOP_NR_CLK 192
/* INFRACFG */
@@ -281,7 +287,9 @@
#define CLK_PERI_MSDC30_3_EN 41
#define CLK_PERI_MSDC50_0_HCLK_EN 42
#define CLK_PERI_MSDC50_3_HCLK_EN 43
-#define CLK_PERI_NR_CLK 44
+#define CLK_PERI_MSDC30_0_QTR_EN 44
+#define CLK_PERI_MSDC30_3_QTR_EN 45
+#define CLK_PERI_NR_CLK 46
/* MCUCFG */
diff --git a/include/dt-bindings/clock/mt7622-clk.h b/include/dt-bindings/clock/mt7622-clk.h
index 3e514ed51d15..e9d77f0e8bce 100644
--- a/include/dt-bindings/clock/mt7622-clk.h
+++ b/include/dt-bindings/clock/mt7622-clk.h
@@ -235,7 +235,8 @@
#define CLK_AUDIO_MEM_ASRC3 43
#define CLK_AUDIO_MEM_ASRC4 44
#define CLK_AUDIO_MEM_ASRC5 45
-#define CLK_AUDIO_NR_CLK 46
+#define CLK_AUDIO_AFE_CONN 46
+#define CLK_AUDIO_NR_CLK 47
/* SSUSBSYS */
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index b8337a5fa347..c585b82b9c05 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -40,6 +40,11 @@
#define RPM_SMI_CLK 22
#define RPM_SMI_A_CLK 23
#define RPM_PLL4_CLK 24
+#define RPM_XO_D0 25
+#define RPM_XO_D1 26
+#define RPM_XO_A0 27
+#define RPM_XO_A1 28
+#define RPM_XO_A2 29
/* SMD RPM clocks */
#define RPM_SMD_XO_CLK_SRC 0
diff --git a/include/dt-bindings/clock/r8a77965-cpg-mssr.h b/include/dt-bindings/clock/r8a77965-cpg-mssr.h
new file mode 100644
index 000000000000..6d3b5a9a6084
--- /dev/null
+++ b/include/dt-bindings/clock/r8a77965-cpg-mssr.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A77965_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A77965_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a77965 CPG Core Clocks */
+#define R8A77965_CLK_Z 0
+#define R8A77965_CLK_ZR 1
+#define R8A77965_CLK_ZG 2
+#define R8A77965_CLK_ZTR 3
+#define R8A77965_CLK_ZTRD2 4
+#define R8A77965_CLK_ZT 5
+#define R8A77965_CLK_ZX 6
+#define R8A77965_CLK_S0D1 7
+#define R8A77965_CLK_S0D2 8
+#define R8A77965_CLK_S0D3 9
+#define R8A77965_CLK_S0D4 10
+#define R8A77965_CLK_S0D6 11
+#define R8A77965_CLK_S0D8 12
+#define R8A77965_CLK_S0D12 13
+#define R8A77965_CLK_S1D1 14
+#define R8A77965_CLK_S1D2 15
+#define R8A77965_CLK_S1D4 16
+#define R8A77965_CLK_S2D1 17
+#define R8A77965_CLK_S2D2 18
+#define R8A77965_CLK_S2D4 19
+#define R8A77965_CLK_S3D1 20
+#define R8A77965_CLK_S3D2 21
+#define R8A77965_CLK_S3D4 22
+#define R8A77965_CLK_LB 23
+#define R8A77965_CLK_CL 24
+#define R8A77965_CLK_ZB3 25
+#define R8A77965_CLK_ZB3D2 26
+#define R8A77965_CLK_CR 27
+#define R8A77965_CLK_CRD2 28
+#define R8A77965_CLK_SD0H 29
+#define R8A77965_CLK_SD0 30
+#define R8A77965_CLK_SD1H 31
+#define R8A77965_CLK_SD1 32
+#define R8A77965_CLK_SD2H 33
+#define R8A77965_CLK_SD2 34
+#define R8A77965_CLK_SD3H 35
+#define R8A77965_CLK_SD3 36
+#define R8A77965_CLK_SSP2 37
+#define R8A77965_CLK_SSP1 38
+#define R8A77965_CLK_SSPRS 39
+#define R8A77965_CLK_RPC 40
+#define R8A77965_CLK_RPCD2 41
+#define R8A77965_CLK_MSO 42
+#define R8A77965_CLK_CANFD 43
+#define R8A77965_CLK_HDMI 44
+#define R8A77965_CLK_CSI0 45
+#define R8A77965_CLK_CP 46
+#define R8A77965_CLK_CPEX 47
+#define R8A77965_CLK_R 48
+#define R8A77965_CLK_OSC 49
+
+#endif /* __DT_BINDINGS_CLOCK_R8A77965_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a77980-cpg-mssr.h b/include/dt-bindings/clock/r8a77980-cpg-mssr.h
new file mode 100644
index 000000000000..a4c0d76c392e
--- /dev/null
+++ b/include/dt-bindings/clock/r8a77980-cpg-mssr.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018 Cogent Embedded, Inc.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A77980_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A77980_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a77980 CPG Core Clocks */
+#define R8A77980_CLK_Z2 0
+#define R8A77980_CLK_ZR 1
+#define R8A77980_CLK_ZTR 2
+#define R8A77980_CLK_ZTRD2 3
+#define R8A77980_CLK_ZT 4
+#define R8A77980_CLK_ZX 5
+#define R8A77980_CLK_S0D1 6
+#define R8A77980_CLK_S0D2 7
+#define R8A77980_CLK_S0D3 8
+#define R8A77980_CLK_S0D4 9
+#define R8A77980_CLK_S0D6 10
+#define R8A77980_CLK_S0D12 11
+#define R8A77980_CLK_S0D24 12
+#define R8A77980_CLK_S1D1 13
+#define R8A77980_CLK_S1D2 14
+#define R8A77980_CLK_S1D4 15
+#define R8A77980_CLK_S2D1 16
+#define R8A77980_CLK_S2D2 17
+#define R8A77980_CLK_S2D4 18
+#define R8A77980_CLK_S3D1 19
+#define R8A77980_CLK_S3D2 20
+#define R8A77980_CLK_S3D4 21
+#define R8A77980_CLK_LB 22
+#define R8A77980_CLK_CL 23
+#define R8A77980_CLK_ZB3 24
+#define R8A77980_CLK_ZB3D2 25
+#define R8A77980_CLK_ZB3D4 26
+#define R8A77980_CLK_SD0H 27
+#define R8A77980_CLK_SD0 28
+#define R8A77980_CLK_RPC 29
+#define R8A77980_CLK_RPCD2 30
+#define R8A77980_CLK_MSO 31
+#define R8A77980_CLK_CANFD 32
+#define R8A77980_CLK_CSI0 33
+#define R8A77980_CLK_CP 34
+#define R8A77980_CLK_CPEX 35
+#define R8A77980_CLK_R 36
+#define R8A77980_CLK_OSC 37
+
+#endif /* __DT_BINDINGS_CLOCK_R8A77980_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h
index d2b26a4b43eb..a82a0109faff 100644
--- a/include/dt-bindings/clock/rk3328-cru.h
+++ b/include/dt-bindings/clock/rk3328-cru.h
@@ -193,7 +193,6 @@
#define HCLK_VPU_PRE 324
#define HCLK_VIO_PRE 325
#define HCLK_VPU 326
-#define HCLK_VIO 327
#define HCLK_BUS_PRE 328
#define HCLK_PERI_PRE 329
#define HCLK_H264 330
diff --git a/include/dt-bindings/clock/sprd,sc9860-clk.h b/include/dt-bindings/clock/sprd,sc9860-clk.h
index 4cb202f090c2..f2ab4631df0d 100644
--- a/include/dt-bindings/clock/sprd,sc9860-clk.h
+++ b/include/dt-bindings/clock/sprd,sc9860-clk.h
@@ -229,7 +229,26 @@
#define CLK_SDIO1_2X_EN 65
#define CLK_SDIO2_2X_EN 66
#define CLK_EMMC_2X_EN 67
-#define CLK_AON_GATE_NUM (CLK_EMMC_2X_EN + 1)
+#define CLK_ARCH_RTC_EB 68
+#define CLK_KPB_RTC_EB 69
+#define CLK_AON_SYST_RTC_EB 70
+#define CLK_AP_SYST_RTC_EB 71
+#define CLK_AON_TMR_RTC_EB 72
+#define CLK_AP_TMR0_RTC_EB 73
+#define CLK_EIC_RTC_EB 74
+#define CLK_EIC_RTCDV5_EB 75
+#define CLK_AP_WDG_RTC_EB 76
+#define CLK_AP_TMR1_RTC_EB 77
+#define CLK_AP_TMR2_RTC_EB 78
+#define CLK_DCXO_TMR_RTC_EB 79
+#define CLK_BB_CAL_RTC_EB 80
+#define CLK_AVS_BIG_RTC_EB 81
+#define CLK_AVS_LIT_RTC_EB 82
+#define CLK_AVS_GPU0_RTC_EB 83
+#define CLK_AVS_GPU1_RTC_EB 84
+#define CLK_GPU_TS_EB 85
+#define CLK_RTCDV10_EB 86
+#define CLK_AON_GATE_NUM (CLK_RTCDV10_EB + 1)
#define CLK_LIT_MCU 0
#define CLK_BIG_MCU 1
diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h
index 49bb3c203e5c..58d8b515be55 100644
--- a/include/dt-bindings/clock/stm32fx-clock.h
+++ b/include/dt-bindings/clock/stm32fx-clock.h
@@ -33,11 +33,12 @@
#define CLK_SAI2 11
#define CLK_I2SQ_PDIV 12
#define CLK_SAIQ_PDIV 13
-
-#define END_PRIMARY_CLK 14
-
#define CLK_HSI 14
#define CLK_SYSCLK 15
+#define CLK_F469_DSI 16
+
+#define END_PRIMARY_CLK 17
+
#define CLK_HDMI_CEC 16
#define CLK_SPDIF 17
#define CLK_USART1 18
diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h
new file mode 100644
index 000000000000..86e3ec662ef4
--- /dev/null
+++ b/include/dt-bindings/clock/stm32mp1-clks.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
+ */
+
+#ifndef _DT_BINDINGS_STM32MP1_CLKS_H_
+#define _DT_BINDINGS_STM32MP1_CLKS_H_
+
+/* OSCILLATOR clocks */
+#define CK_HSE 0
+#define CK_CSI 1
+#define CK_LSI 2
+#define CK_LSE 3
+#define CK_HSI 4
+#define CK_HSE_DIV2 5
+
+/* Bus clocks */
+#define TIM2 6
+#define TIM3 7
+#define TIM4 8
+#define TIM5 9
+#define TIM6 10
+#define TIM7 11
+#define TIM12 12
+#define TIM13 13
+#define TIM14 14
+#define LPTIM1 15
+#define SPI2 16
+#define SPI3 17
+#define USART2 18
+#define USART3 19
+#define UART4 20
+#define UART5 21
+#define UART7 22
+#define UART8 23
+#define I2C1 24
+#define I2C2 25
+#define I2C3 26
+#define I2C5 27
+#define SPDIF 28
+#define CEC 29
+#define DAC12 30
+#define MDIO 31
+#define TIM1 32
+#define TIM8 33
+#define TIM15 34
+#define TIM16 35
+#define TIM17 36
+#define SPI1 37
+#define SPI4 38
+#define SPI5 39
+#define USART6 40
+#define SAI1 41
+#define SAI2 42
+#define SAI3 43
+#define DFSDM 44
+#define FDCAN 45
+#define LPTIM2 46
+#define LPTIM3 47
+#define LPTIM4 48
+#define LPTIM5 49
+#define SAI4 50
+#define SYSCFG 51
+#define VREF 52
+#define TMPSENS 53
+#define PMBCTRL 54
+#define HDP 55
+#define LTDC 56
+#define DSI 57
+#define IWDG2 58
+#define USBPHY 59
+#define STGENRO 60
+#define SPI6 61
+#define I2C4 62
+#define I2C6 63
+#define USART1 64
+#define RTCAPB 65
+#define TZC 66
+#define TZPC 67
+#define IWDG1 68
+#define BSEC 69
+#define STGEN 70
+#define DMA1 71
+#define DMA2 72
+#define DMAMUX 73
+#define ADC12 74
+#define USBO 75
+#define SDMMC3 76
+#define DCMI 77
+#define CRYP2 78
+#define HASH2 79
+#define RNG2 80
+#define CRC2 81
+#define HSEM 82
+#define IPCC 83
+#define GPIOA 84
+#define GPIOB 85
+#define GPIOC 86
+#define GPIOD 87
+#define GPIOE 88
+#define GPIOF 89
+#define GPIOG 90
+#define GPIOH 91
+#define GPIOI 92
+#define GPIOJ 93
+#define GPIOK 94
+#define GPIOZ 95
+#define CRYP1 96
+#define HASH1 97
+#define RNG1 98
+#define BKPSRAM 99
+#define MDMA 100
+#define GPU 101
+#define ETHCK 102
+#define ETHTX 103
+#define ETHRX 104
+#define ETHMAC 105
+#define FMC 106
+#define QSPI 107
+#define SDMMC1 108
+#define SDMMC2 109
+#define CRC1 110
+#define USBH 111
+#define ETHSTP 112
+
+/* Kernel clocks */
+#define SDMMC1_K 118
+#define SDMMC2_K 119
+#define SDMMC3_K 120
+#define FMC_K 121
+#define QSPI_K 122
+#define ETHCK_K 123
+#define RNG1_K 124
+#define RNG2_K 125
+#define GPU_K 126
+#define USBPHY_K 127
+#define STGEN_K 128
+#define SPDIF_K 129
+#define SPI1_K 130
+#define SPI2_K 131
+#define SPI3_K 132
+#define SPI4_K 133
+#define SPI5_K 134
+#define SPI6_K 135
+#define CEC_K 136
+#define I2C1_K 137
+#define I2C2_K 138
+#define I2C3_K 139
+#define I2C4_K 140
+#define I2C5_K 141
+#define I2C6_K 142
+#define LPTIM1_K 143
+#define LPTIM2_K 144
+#define LPTIM3_K 145
+#define LPTIM4_K 146
+#define LPTIM5_K 147
+#define USART1_K 148
+#define USART2_K 149
+#define USART3_K 150
+#define UART4_K 151
+#define UART5_K 152
+#define USART6_K 153
+#define UART7_K 154
+#define UART8_K 155
+#define DFSDM_K 156
+#define FDCAN_K 157
+#define SAI1_K 158
+#define SAI2_K 159
+#define SAI3_K 160
+#define SAI4_K 161
+#define ADC12_K 162
+#define DSI_K 163
+#define DSI_PX 164
+#define ADFSDM_K 165
+#define USBO_K 166
+#define LTDC_PX 167
+#define DAC12_K 168
+#define ETHPTP_K 169
+
+/* PLL */
+#define PLL1 176
+#define PLL2 177
+#define PLL3 178
+#define PLL4 179
+
+/* ODF */
+#define PLL1_P 180
+#define PLL1_Q 181
+#define PLL1_R 182
+#define PLL2_P 183
+#define PLL2_Q 184
+#define PLL2_R 185
+#define PLL3_P 186
+#define PLL3_Q 187
+#define PLL3_R 188
+#define PLL4_P 189
+#define PLL4_Q 190
+#define PLL4_R 191
+
+/* AUX */
+#define RTC 192
+
+/* MCLK */
+#define CK_PER 193
+#define CK_MPU 194
+#define CK_AXI 195
+#define CK_MCU 196
+
+/* Time base */
+#define TIM2_K 197
+#define TIM3_K 198
+#define TIM4_K 199
+#define TIM5_K 200
+#define TIM6_K 201
+#define TIM7_K 202
+#define TIM12_K 203
+#define TIM13_K 204
+#define TIM14_K 205
+#define TIM1_K 206
+#define TIM8_K 207
+#define TIM15_K 208
+#define TIM16_K 209
+#define TIM17_K 210
+
+/* MCO clocks */
+#define CK_MCO1 211
+#define CK_MCO2 212
+
+/* TRACE & DEBUG clocks */
+#define DBG 213
+#define CK_DBG 214
+#define CK_TRACE 215
+
+/* DDR */
+#define DDRC1 220
+#define DDRC1LP 221
+#define DDRC2 222
+#define DDRC2LP 223
+#define DDRPHYC 224
+#define DDRPHYCLP 225
+#define DDRCAPB 226
+#define DDRCAPBLP 227
+#define AXIDCG 228
+#define DDRPHYCAPB 229
+#define DDRPHYCAPBLP 230
+#define DDRPERFM 231
+
+#define STM32MP1_LAST_CLK 232
+
+#define LTDC_K LTDC_PX
+#define ETHMAC_K ETHCK_K
+
+#endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */
diff --git a/include/dt-bindings/clock/stratix10-clock.h b/include/dt-bindings/clock/stratix10-clock.h
new file mode 100644
index 000000000000..0ac1c90a18bf
--- /dev/null
+++ b/include/dt-bindings/clock/stratix10-clock.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017, Intel Corporation
+ */
+
+#ifndef __STRATIX10_CLOCK_H
+#define __STRATIX10_CLOCK_H
+
+/* fixed rate clocks */
+#define STRATIX10_OSC1 0
+#define STRATIX10_CB_INTOSC_HS_DIV2_CLK 1
+#define STRATIX10_CB_INTOSC_LS_CLK 2
+#define STRATIX10_F2S_FREE_CLK 3
+
+/* fixed factor clocks */
+#define STRATIX10_L4_SYS_FREE_CLK 4
+#define STRATIX10_MPU_PERIPH_CLK 5
+#define STRATIX10_MPU_L2RAM_CLK 6
+#define STRATIX10_SDMMC_CIU_CLK 7
+
+/* PLL clocks */
+#define STRATIX10_MAIN_PLL_CLK 8
+#define STRATIX10_PERIPH_PLL_CLK 9
+#define STRATIX10_BOOT_CLK 10
+
+/* Periph clocks */
+#define STRATIX10_MAIN_MPU_BASE_CLK 11
+#define STRATIX10_MAIN_NOC_BASE_CLK 12
+#define STRATIX10_MAIN_EMACA_CLK 13
+#define STRATIX10_MAIN_EMACB_CLK 14
+#define STRATIX10_MAIN_EMAC_PTP_CLK 15
+#define STRATIX10_MAIN_GPIO_DB_CLK 16
+#define STRATIX10_MAIN_SDMMC_CLK 17
+#define STRATIX10_MAIN_S2F_USR0_CLK 18
+#define STRATIX10_MAIN_S2F_USR1_CLK 19
+#define STRATIX10_MAIN_PSI_REF_CLK 20
+
+#define STRATIX10_PERI_MPU_BASE_CLK 21
+#define STRATIX10_PERI_NOC_BASE_CLK 22
+#define STRATIX10_PERI_EMACA_CLK 23
+#define STRATIX10_PERI_EMACB_CLK 24
+#define STRATIX10_PERI_EMAC_PTP_CLK 25
+#define STRATIX10_PERI_GPIO_DB_CLK 26
+#define STRATIX10_PERI_SDMMC_CLK 27
+#define STRATIX10_PERI_S2F_USR0_CLK 28
+#define STRATIX10_PERI_S2F_USR1_CLK 29
+#define STRATIX10_PERI_PSI_REF_CLK 30
+
+#define STRATIX10_MPU_FREE_CLK 31
+#define STRATIX10_NOC_FREE_CLK 32
+#define STRATIX10_S2F_USR0_CLK 33
+#define STRATIX10_NOC_CLK 34
+#define STRATIX10_EMAC_A_FREE_CLK 35
+#define STRATIX10_EMAC_B_FREE_CLK 36
+#define STRATIX10_EMAC_PTP_FREE_CLK 37
+#define STRATIX10_GPIO_DB_FREE_CLK 38
+#define STRATIX10_SDMMC_FREE_CLK 39
+#define STRATIX10_S2F_USER1_FREE_CLK 40
+#define STRATIX10_PSI_REF_FREE_CLK 41
+
+/* Gate clocks */
+#define STRATIX10_MPU_CLK 42
+#define STRATIX10_L4_MAIN_CLK 43
+#define STRATIX10_L4_MP_CLK 44
+#define STRATIX10_L4_SP_CLK 45
+#define STRATIX10_CS_AT_CLK 46
+#define STRATIX10_CS_TRACE_CLK 47
+#define STRATIX10_CS_PDBG_CLK 48
+#define STRATIX10_CS_TIMER_CLK 49
+#define STRATIX10_S2F_USER0_CLK 50
+#define STRATIX10_S2F_USER1_CLK 51
+#define STRATIX10_EMAC0_CLK 52
+#define STRATIX10_EMAC1_CLK 53
+#define STRATIX10_EMAC2_CLK 54
+#define STRATIX10_EMAC_PTP_CLK 55
+#define STRATIX10_GPIO_DB_CLK 56
+#define STRATIX10_SDMMC_CLK 57
+#define STRATIX10_PSI_REF_CLK 58
+#define STRATIX10_USB_CLK 59
+#define STRATIX10_SPI_M_CLK 60
+#define STRATIX10_NAND_CLK 61
+#define STRATIX10_NUM_CLKS 62
+
+#endif /* __STRATIX10_CLOCK_H */
diff --git a/include/dt-bindings/clock/sun50i-h6-ccu.h b/include/dt-bindings/clock/sun50i-h6-ccu.h
new file mode 100644
index 000000000000..a1545cd60e75
--- /dev/null
+++ b/include/dt-bindings/clock/sun50i-h6-ccu.h
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/*
+ * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN50I_H6_H_
+#define _DT_BINDINGS_CLK_SUN50I_H6_H_
+
+#define CLK_PLL_PERIPH0 3
+
+#define CLK_CPUX 21
+
+#define CLK_APB1 26
+
+#define CLK_DE 29
+#define CLK_BUS_DE 30
+#define CLK_DEINTERLACE 31
+#define CLK_BUS_DEINTERLACE 32
+#define CLK_GPU 33
+#define CLK_BUS_GPU 34
+#define CLK_CE 35
+#define CLK_BUS_CE 36
+#define CLK_VE 37
+#define CLK_BUS_VE 38
+#define CLK_EMCE 39
+#define CLK_BUS_EMCE 40
+#define CLK_VP9 41
+#define CLK_BUS_VP9 42
+#define CLK_BUS_DMA 43
+#define CLK_BUS_MSGBOX 44
+#define CLK_BUS_SPINLOCK 45
+#define CLK_BUS_HSTIMER 46
+#define CLK_AVS 47
+#define CLK_BUS_DBG 48
+#define CLK_BUS_PSI 49
+#define CLK_BUS_PWM 50
+#define CLK_BUS_IOMMU 51
+
+#define CLK_MBUS_DMA 53
+#define CLK_MBUS_VE 54
+#define CLK_MBUS_CE 55
+#define CLK_MBUS_TS 56
+#define CLK_MBUS_NAND 57
+#define CLK_MBUS_CSI 58
+#define CLK_MBUS_DEINTERLACE 59
+
+#define CLK_NAND0 61
+#define CLK_NAND1 62
+#define CLK_BUS_NAND 63
+#define CLK_MMC0 64
+#define CLK_MMC1 65
+#define CLK_MMC2 66
+#define CLK_BUS_MMC0 67
+#define CLK_BUS_MMC1 68
+#define CLK_BUS_MMC2 69
+#define CLK_BUS_UART0 70
+#define CLK_BUS_UART1 71
+#define CLK_BUS_UART2 72
+#define CLK_BUS_UART3 73
+#define CLK_BUS_I2C0 74
+#define CLK_BUS_I2C1 75
+#define CLK_BUS_I2C2 76
+#define CLK_BUS_I2C3 77
+#define CLK_BUS_SCR0 78
+#define CLK_BUS_SCR1 79
+#define CLK_SPI0 80
+#define CLK_SPI1 81
+#define CLK_BUS_SPI0 82
+#define CLK_BUS_SPI1 83
+#define CLK_BUS_EMAC 84
+#define CLK_TS 85
+#define CLK_BUS_TS 86
+#define CLK_IR_TX 87
+#define CLK_BUS_IR_TX 88
+#define CLK_BUS_THS 89
+#define CLK_I2S3 90
+#define CLK_I2S0 91
+#define CLK_I2S1 92
+#define CLK_I2S2 93
+#define CLK_BUS_I2S0 94
+#define CLK_BUS_I2S1 95
+#define CLK_BUS_I2S2 96
+#define CLK_BUS_I2S3 97
+#define CLK_SPDIF 98
+#define CLK_BUS_SPDIF 99
+#define CLK_DMIC 100
+#define CLK_BUS_DMIC 101
+#define CLK_AUDIO_HUB 102
+#define CLK_BUS_AUDIO_HUB 103
+#define CLK_USB_OHCI0 104
+#define CLK_USB_PHY0 105
+#define CLK_USB_PHY1 106
+#define CLK_USB_OHCI3 107
+#define CLK_USB_PHY3 108
+#define CLK_USB_HSIC_12M 109
+#define CLK_USB_HSIC 110
+#define CLK_BUS_OHCI0 111
+#define CLK_BUS_OHCI3 112
+#define CLK_BUS_EHCI0 113
+#define CLK_BUS_XHCI 114
+#define CLK_BUS_EHCI3 115
+#define CLK_BUS_OTG 116
+#define CLK_PCIE_REF_100M 117
+#define CLK_PCIE_REF 118
+#define CLK_PCIE_REF_OUT 119
+#define CLK_PCIE_MAXI 120
+#define CLK_PCIE_AUX 121
+#define CLK_BUS_PCIE 122
+#define CLK_HDMI 123
+#define CLK_HDMI_SLOW 124
+#define CLK_HDMI_CEC 125
+#define CLK_BUS_HDMI 126
+#define CLK_BUS_TCON_TOP 127
+#define CLK_TCON_LCD0 128
+#define CLK_BUS_TCON_LCD0 129
+#define CLK_TCON_TV0 130
+#define CLK_BUS_TCON_TV0 131
+#define CLK_CSI_CCI 132
+#define CLK_CSI_TOP 133
+#define CLK_CSI_MCLK 134
+#define CLK_BUS_CSI 135
+#define CLK_HDCP 136
+#define CLK_BUS_HDCP 137
+
+#endif /* _DT_BINDINGS_CLK_SUN50I_H6_H_ */
diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h
index e139fe5c62ec..c5f7e9a70968 100644
--- a/include/dt-bindings/clock/sun8i-h3-ccu.h
+++ b/include/dt-bindings/clock/sun8i-h3-ccu.h
@@ -43,6 +43,8 @@
#ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_
#define _DT_BINDINGS_CLK_SUN8I_H3_H_
+#define CLK_PLL_VIDEO 6
+
#define CLK_PLL_PERIPH0 9
#define CLK_CPUX 14
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index 6422314e46eb..6b77e721f6b1 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -95,7 +95,7 @@
#define TEGRA210_CLK_CSITE 73
/* 74 */
/* 75 */
-/* 76 */
+#define TEGRA210_CLK_LA 76
/* 77 */
#define TEGRA210_CLK_SOC_THERM 78
#define TEGRA210_CLK_DTV 79
diff --git a/include/dt-bindings/reset/sun50i-h6-ccu.h b/include/dt-bindings/reset/sun50i-h6-ccu.h
new file mode 100644
index 000000000000..81106f455097
--- /dev/null
+++ b/include/dt-bindings/reset/sun50i-h6-ccu.h
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/*
+ * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#ifndef _DT_BINDINGS_RESET_SUN50I_H6_H_
+#define _DT_BINDINGS_RESET_SUN50I_H6_H_
+
+#define RST_MBUS 0
+#define RST_BUS_DE 1
+#define RST_BUS_DEINTERLACE 2
+#define RST_BUS_GPU 3
+#define RST_BUS_CE 4
+#define RST_BUS_VE 5
+#define RST_BUS_EMCE 6
+#define RST_BUS_VP9 7
+#define RST_BUS_DMA 8
+#define RST_BUS_MSGBOX 9
+#define RST_BUS_SPINLOCK 10
+#define RST_BUS_HSTIMER 11
+#define RST_BUS_DBG 12
+#define RST_BUS_PSI 13
+#define RST_BUS_PWM 14
+#define RST_BUS_IOMMU 15
+#define RST_BUS_DRAM 16
+#define RST_BUS_NAND 17
+#define RST_BUS_MMC0 18
+#define RST_BUS_MMC1 19
+#define RST_BUS_MMC2 20
+#define RST_BUS_UART0 21
+#define RST_BUS_UART1 22
+#define RST_BUS_UART2 23
+#define RST_BUS_UART3 24
+#define RST_BUS_I2C0 25
+#define RST_BUS_I2C1 26
+#define RST_BUS_I2C2 27
+#define RST_BUS_I2C3 28
+#define RST_BUS_SCR0 29
+#define RST_BUS_SCR1 30
+#define RST_BUS_SPI0 31
+#define RST_BUS_SPI1 32
+#define RST_BUS_EMAC 33
+#define RST_BUS_TS 34
+#define RST_BUS_IR_TX 35
+#define RST_BUS_THS 36
+#define RST_BUS_I2S0 37
+#define RST_BUS_I2S1 38
+#define RST_BUS_I2S2 39
+#define RST_BUS_I2S3 40
+#define RST_BUS_SPDIF 41
+#define RST_BUS_DMIC 42
+#define RST_BUS_AUDIO_HUB 43
+#define RST_USB_PHY0 44
+#define RST_USB_PHY1 45
+#define RST_USB_PHY3 46
+#define RST_USB_HSIC 47
+#define RST_BUS_OHCI0 48
+#define RST_BUS_OHCI3 49
+#define RST_BUS_EHCI0 50
+#define RST_BUS_XHCI 51
+#define RST_BUS_EHCI3 52
+#define RST_BUS_OTG 53
+#define RST_BUS_PCIE 54
+#define RST_PCIE_POWERUP 55
+#define RST_BUS_HDMI 56
+#define RST_BUS_HDMI_SUB 57
+#define RST_BUS_TCON_TOP 58
+#define RST_BUS_TCON_LCD0 59
+#define RST_BUS_TCON_TV0 60
+#define RST_BUS_CSI 61
+#define RST_BUS_HDCP 62
+
+#endif /* _DT_BINDINGS_RESET_SUN50I_H6_H_ */
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 2f7a29242b87..38cd77b39a64 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -26,7 +26,8 @@
#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL)
#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL)
-int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node);
+int iort_register_domain_token(int trans_id, phys_addr_t base,
+ struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
#ifdef CONFIG_ACPI_IORT
@@ -38,6 +39,7 @@ int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
/* IOMMU interface */
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
const struct iommu_ops *iort_iommu_configure(struct device *dev);
+int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
#else
static inline void acpi_iort_init(void) { }
static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
@@ -52,6 +54,9 @@ static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
static inline const struct iommu_ops *iort_iommu_configure(
struct device *dev)
{ return NULL; }
+static inline
+int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
+{ return 0; }
#endif
#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 09da0f124699..f6be4b0b6c18 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -28,6 +28,7 @@ void bdi_put(struct backing_dev_info *bdi);
__printf(2, 3)
int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
+__printf(2, 0)
int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
va_list args);
int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8efcf49796a3..e3986f4b3461 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -183,7 +183,6 @@ enum {
BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
BLK_MQ_S_SCHED_RESTART = 2,
- BLK_MQ_S_START_ON_RUN = 3,
BLK_MQ_MAX_DEPTH = 10240,
@@ -270,7 +269,6 @@ void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
-void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
void blk_mq_freeze_queue(struct request_queue *q);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index f711be6e8c44..210a890008f9 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -399,6 +399,7 @@ struct clk_divider {
spinlock_t *lock;
};
+#define clk_div_mask(width) ((1 << (width)) - 1)
#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
#define CLK_DIVIDER_ONE_BASED BIT(0)
@@ -419,6 +420,10 @@ long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
unsigned long rate, unsigned long *prate,
const struct clk_div_table *table,
u8 width, unsigned long flags);
+long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
+ unsigned long rate, unsigned long *prate,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags, unsigned int val);
int divider_get_val(unsigned long rate, unsigned long parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags);
@@ -449,8 +454,9 @@ void clk_hw_unregister_divider(struct clk_hw *hw);
*
* @hw: handle between common and hardware-specific interfaces
* @reg: register controlling multiplexer
+ * @table: array of register values corresponding to the parent index
* @shift: shift to multiplexer bit field
- * @width: width of mutliplexer bit field
+ * @mask: mask of mutliplexer bit field
* @flags: hardware-specific flags
* @lock: register lock
*
@@ -510,6 +516,10 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
+ unsigned int val);
+unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index);
+
void clk_unregister_mux(struct clk *clk);
void clk_hw_unregister_mux(struct clk_hw *hw);
@@ -774,6 +784,17 @@ static inline long divider_round_rate(struct clk_hw *hw, unsigned long rate,
rate, prate, table, width, flags);
}
+static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate,
+ const struct clk_div_table *table,
+ u8 width, unsigned long flags,
+ unsigned int val)
+{
+ return divider_ro_round_rate_parent(hw, clk_hw_get_parent(hw),
+ rate, prate, table, width, flags,
+ val);
+}
+
/*
* FIXME clock api without lock protection
*/
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 4c4ef9f34db3..0dbd0885b2c2 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -209,7 +209,7 @@ static inline int clk_prepare(struct clk *clk)
return 0;
}
-static inline int clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks)
+static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks)
{
might_sleep();
return 0;
@@ -603,8 +603,8 @@ static inline struct clk *clk_get(struct device *dev, const char *id)
return NULL;
}
-static inline int clk_bulk_get(struct device *dev, int num_clks,
- struct clk_bulk_data *clks)
+static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
{
return 0;
}
@@ -614,8 +614,8 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
return NULL;
}
-static inline int devm_clk_bulk_get(struct device *dev, int num_clks,
- struct clk_bulk_data *clks)
+static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
{
return 0;
}
@@ -645,7 +645,7 @@ static inline int clk_enable(struct clk *clk)
return 0;
}
-static inline int clk_bulk_enable(int num_clks, struct clk_bulk_data *clks)
+static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks)
{
return 0;
}
@@ -719,8 +719,8 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
-static inline int clk_bulk_prepare_enable(int num_clks,
- struct clk_bulk_data *clks)
+static inline int __must_check clk_bulk_prepare_enable(int num_clks,
+ struct clk_bulk_data *clks)
{
int ret;
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index d23c9cf26993..afb9edfa5d58 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -128,5 +128,6 @@ extern void tegra210_sata_pll_hw_sequence_start(void);
extern void tegra210_set_sata_pll_seq_sw(bool state);
extern void tegra210_put_utmipll_in_iddq(void);
extern void tegra210_put_utmipll_out_iddq(void);
+extern int tegra210_clk_handle_mbist_war(unsigned int id);
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 7e3bceee3489..a8faa38b1ed6 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -212,6 +212,7 @@ enum {
* struct ti_clk_ll_ops - low-level ops for clocks
* @clk_readl: pointer to register read function
* @clk_writel: pointer to register write function
+ * @clk_rmw: pointer to register read-modify-write function
* @clkdm_clk_enable: pointer to clockdomain enable function
* @clkdm_clk_disable: pointer to clockdomain disable function
* @clkdm_lookup: pointer to clockdomain lookup function
@@ -227,6 +228,7 @@ enum {
struct ti_clk_ll_ops {
u32 (*clk_readl)(const struct clk_omap_reg *reg);
void (*clk_writel)(u32 val, const struct clk_omap_reg *reg);
+ void (*clk_rmw)(u32 val, u32 mask, const struct clk_omap_reg *reg);
int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk);
int (*clkdm_clk_disable)(struct clockdomain *clkdm,
struct clk *clk);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index f188eab10570..081281ad5772 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -24,6 +24,17 @@
#include <asm/siginfo.h>
#include <asm/signal.h>
+#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+/*
+ * It may be useful for an architecture to override the definitions of the
+ * COMPAT_SYSCALL_DEFINE0 and COMPAT_SYSCALL_DEFINEx() macros, in particular
+ * to use a different calling convention for syscalls. To allow for that,
+ + the prototypes for the compat_sys_*() functions below will *not* be included
+ * if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled.
+ */
+#include <asm/syscall_wrapper.h>
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
#ifndef COMPAT_USE_64BIT_TIME
#define COMPAT_USE_64BIT_TIME 0
#endif
@@ -32,10 +43,12 @@
#define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v))
#endif
+#ifndef COMPAT_SYSCALL_DEFINE0
#define COMPAT_SYSCALL_DEFINE0(name) \
asmlinkage long compat_sys_##name(void); \
ALLOW_ERROR_INJECTION(compat_sys_##name, ERRNO); \
asmlinkage long compat_sys_##name(void)
+#endif /* COMPAT_SYSCALL_DEFINE0 */
#define COMPAT_SYSCALL_DEFINE1(name, ...) \
COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
@@ -50,18 +63,25 @@
#define COMPAT_SYSCALL_DEFINE6(name, ...) \
COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
-#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
- asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
- asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))\
- __attribute__((alias(__stringify(compat_SyS##name)))); \
- ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \
- static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
- asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
- asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
- { \
- return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
- } \
- static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+/*
+ * The asmlinkage stub is aliased to a function named __se_compat_sys_*() which
+ * sign-extends 32-bit ints to longs whenever needed. The actual work is
+ * done within __do_compat_sys_*().
+ */
+#ifndef COMPAT_SYSCALL_DEFINEx
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
+ __attribute__((alias(__stringify(__se_compat_sys##name)))); \
+ ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+ } \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+#endif /* COMPAT_SYSCALL_DEFINEx */
#ifndef compat_user_stack_pointer
#define compat_user_stack_pointer() current_user_stack_pointer()
@@ -519,7 +539,12 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
* Please note that these prototypes here are only provided for information
* purposes, for static analysis, and for linking from the syscall table.
* These functions should not be called elsewhere from kernel code.
+ *
+ * As the syscall calling convention may be different from the default
+ * for architectures overriding the syscall calling convention, do not
+ * include the prototypes if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled.
*/
+#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr,
u32 __user *iocb);
@@ -957,6 +982,8 @@ asmlinkage long compat_sys_stime(compat_time_t __user *tptr);
/* obsolete: net/socket.c */
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
/*
* For most but not all architectures, "am I in a compat syscall?" and
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 1fe49724da9e..87f48dd932eb 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -960,8 +960,6 @@ extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
extern struct freq_attr *cpufreq_generic_attr[];
-int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table);
int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
unsigned int cpufreq_generic_get(unsigned int cpu);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index a806e94c482f..1eefabf1621f 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -135,7 +135,8 @@ extern bool cpuidle_not_available(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_select(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ bool *stop_tick);
extern int cpuidle_enter(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index);
extern void cpuidle_reflect(struct cpuidle_device *dev, int index);
@@ -167,7 +168,7 @@ static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return true; }
static inline int cpuidle_select(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev, bool *stop_tick)
{return -ENODEV; }
static inline int cpuidle_enter(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
@@ -250,7 +251,8 @@ struct cpuidle_governor {
struct cpuidle_device *dev);
int (*select) (struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ bool *stop_tick);
void (*reflect) (struct cpuidle_device *dev, int index);
};
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 92efaf1f8977..760d8da1b6c7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1667,7 +1667,7 @@ typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
unsigned);
struct dir_context {
- const filldir_t actor;
+ filldir_t actor;
loff_t pos;
};
@@ -2445,6 +2445,7 @@ extern int sync_blockdev(struct block_device *bdev);
extern void kill_bdev(struct block_device *);
extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
+extern void emergency_thaw_bdev(struct super_block *sb);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
extern int fsync_bdev(struct block_device *);
@@ -2470,6 +2471,11 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
return 0;
}
+static inline int emergency_thaw_bdev(struct super_block *sb)
+{
+ return 0;
+}
+
static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
{
}
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 78f456fcd242..a2656c3ebe81 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -424,6 +424,7 @@ static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
}
extern u64 hrtimer_get_next_event(void);
+extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 8dad3dd26eae..ef169d67df92 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -209,12 +209,12 @@
#define DMA_FECTL_IM (((u32)1) << 31)
/* FSTS_REG */
-#define DMA_FSTS_PPF ((u32)2)
-#define DMA_FSTS_PFO ((u32)1)
-#define DMA_FSTS_IQE (1 << 4)
-#define DMA_FSTS_ICE (1 << 5)
-#define DMA_FSTS_ITE (1 << 6)
-#define DMA_FSTS_PRO (1 << 7)
+#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
+#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
+#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
+#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
+#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
+#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 41b8c5757859..19938ee6eb31 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -465,23 +465,23 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
}
-static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size)
+static inline size_t iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
- return -ENODEV;
+ return 0;
}
-static inline int iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova,
- int gfp_order)
+static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
+ unsigned long iova, int gfp_order)
{
- return -ENODEV;
+ return 0;
}
static inline size_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
{
- return -ENODEV;
+ return 0;
}
static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 9385aa57497b..a27cf6652327 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -62,8 +62,11 @@ extern int register_refined_jiffies(long clock_tick_rate);
/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
-/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
-#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
+/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */
+#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
+
+/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
+#define USER_TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
#ifndef __jiffy_arch_data
#define __jiffy_arch_data
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 0ebcbeb21056..9e4e638fb505 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -99,21 +99,25 @@ struct compat_kexec_segment {
#ifdef CONFIG_KEXEC_FILE
struct purgatory_info {
- /* Pointer to elf header of read only purgatory */
- Elf_Ehdr *ehdr;
-
- /* Pointer to purgatory sechdrs which are modifiable */
+ /*
+ * Pointer to elf header at the beginning of kexec_purgatory.
+ * Note: kexec_purgatory is read only
+ */
+ const Elf_Ehdr *ehdr;
+ /*
+ * Temporary, modifiable buffer for sechdrs used for relocation.
+ * This memory can be freed post image load.
+ */
Elf_Shdr *sechdrs;
/*
- * Temporary buffer location where purgatory is loaded and relocated
- * This memory can be freed post image load
+ * Temporary, modifiable buffer for stripped purgatory used for
+ * relocation. This memory can be freed post image load.
*/
void *purgatory_buf;
-
- /* Address where purgatory is finally loaded and is executed from */
- unsigned long purgatory_load_addr;
};
+struct kimage;
+
typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
unsigned long kernel_len, char *initrd,
@@ -135,6 +139,11 @@ struct kexec_file_ops {
#endif
};
+extern const struct kexec_file_ops * const kexec_file_loaders[];
+
+int kexec_image_probe_default(struct kimage *image, void *buf,
+ unsigned long buf_len);
+
/**
* struct kexec_buf - parameters for finding a place for a buffer in memory
* @image: kexec image in which memory to search.
@@ -159,10 +168,44 @@ struct kexec_buf {
bool top_down;
};
+int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf);
+int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
+ void *buf, unsigned int size,
+ bool get_value);
+void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
+
+int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ Elf_Shdr *section,
+ const Elf_Shdr *relsec,
+ const Elf_Shdr *symtab);
+int __weak arch_kexec_apply_relocations(struct purgatory_info *pi,
+ Elf_Shdr *section,
+ const Elf_Shdr *relsec,
+ const Elf_Shdr *symtab);
+
int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
int (*func)(struct resource *, void *));
extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
+
+/* Alignment required for elf header segment */
+#define ELF_CORE_HEADER_ALIGN 4096
+
+struct crash_mem_range {
+ u64 start, end;
+};
+
+struct crash_mem {
+ unsigned int max_nr_ranges;
+ unsigned int nr_ranges;
+ struct crash_mem_range ranges[0];
+};
+
+extern int crash_exclude_mem_range(struct crash_mem *mem,
+ unsigned long long mstart,
+ unsigned long long mend);
+extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
+ void **addr, unsigned long *sz);
#endif /* CONFIG_KEXEC_FILE */
struct kimage {
@@ -209,7 +252,7 @@ struct kimage {
unsigned long cmdline_buf_len;
/* File operations provided by image loader */
- struct kexec_file_ops *fops;
+ const struct kexec_file_ops *fops;
/* Image loader handling the kernel can store a pointer here */
void *image_loader_data;
@@ -226,14 +269,6 @@ extern void machine_kexec_cleanup(struct kimage *image);
extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
-extern int kexec_load_purgatory(struct kimage *image, unsigned long min,
- unsigned long max, int top_down,
- unsigned long *load_addr);
-extern int kexec_purgatory_get_set_symbol(struct kimage *image,
- const char *name, void *buf,
- unsigned int size, bool get_value);
-extern void *kexec_purgatory_get_symbol_addr(struct kimage *image,
- const char *name);
extern void __crash_kexec(struct pt_regs *);
extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
@@ -273,16 +308,6 @@ int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
-int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
- unsigned long buf_len);
-void * __weak arch_kexec_kernel_image_load(struct kimage *image);
-int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
-int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
- unsigned long buf_len);
-int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
- Elf_Shdr *sechdrs, unsigned int relsec);
-int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
- unsigned int relsec);
void arch_kexec_protect_crashkres(void);
void arch_kexec_unprotect_crashkres(void);
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 2eac32095113..99f17cc8e163 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -37,6 +37,7 @@ struct lockref {
extern void lockref_get(struct lockref *);
extern int lockref_put_return(struct lockref *);
extern int lockref_get_not_zero(struct lockref *);
+extern int lockref_put_not_zero(struct lockref *);
extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index c61535979b8f..2d4e23c9ea0a 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -183,6 +183,7 @@ struct cros_ec_debugfs;
* @ec_dev: cros_ec_device structure to talk to the physical device
* @dev: pointer to the platform device
* @debug_info: cros_ec_debugfs structure for debugging information
+ * @has_kb_wake_angle: true if at least 2 accelerometer are connected to the EC.
* @cmd_offset: offset to apply for each command.
*/
struct cros_ec_dev {
@@ -191,6 +192,7 @@ struct cros_ec_dev {
struct cros_ec_device *ec_dev;
struct device *dev;
struct cros_ec_debugfs *debug_info;
+ bool has_kb_wake_angle;
u16 cmd_offset;
u32 features[2];
};
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 2b96e630e3b6..f2edd9969b40 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -2948,6 +2948,9 @@ struct ec_response_usb_pd_control_v1 {
#define EC_CMD_USB_PD_PORTS 0x102
+/* Maximum number of PD ports on a device, num_ports will be <= this */
+#define EC_USB_PD_MAX_PORTS 8
+
struct ec_response_usb_pd_ports {
uint8_t num_ports;
} __packed;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 48fb2b43c35a..7d361be2e24f 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -502,6 +502,7 @@ enum dmi_field {
DMI_CHASSIS_SERIAL,
DMI_CHASSIS_ASSET_TAG,
DMI_STRING_MAX,
+ DMI_OEM_STRING, /* special case - will not be in dmi_ident */
};
struct dmi_strmatch {
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 38187c68063d..2f129bbfaae8 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -198,14 +198,24 @@ struct nfs_inode {
/*
* Cache validity bit flags
*/
-#define NFS_INO_INVALID_ATTR 0x0001 /* cached attrs are invalid */
-#define NFS_INO_INVALID_DATA 0x0002 /* cached data is invalid */
-#define NFS_INO_INVALID_ATIME 0x0004 /* cached atime is invalid */
-#define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */
-#define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */
-#define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */
-#define NFS_INO_REVAL_FORCED 0x0040 /* force revalidation ignoring a delegation */
-#define NFS_INO_INVALID_LABEL 0x0080 /* cached label is invalid */
+#define NFS_INO_INVALID_DATA BIT(1) /* cached data is invalid */
+#define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */
+#define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */
+#define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */
+#define NFS_INO_REVAL_PAGECACHE BIT(5) /* must revalidate pagecache */
+#define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */
+#define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */
+#define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */
+#define NFS_INO_INVALID_CTIME BIT(9) /* cached ctime is invalid */
+#define NFS_INO_INVALID_MTIME BIT(10) /* cached mtime is invalid */
+#define NFS_INO_INVALID_SIZE BIT(11) /* cached size is invalid */
+#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */
+
+#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
+ | NFS_INO_INVALID_CTIME \
+ | NFS_INO_INVALID_MTIME \
+ | NFS_INO_INVALID_SIZE \
+ | NFS_INO_INVALID_OTHER) /* inode metadata is invalid */
/*
* Bit offsets in flags field
@@ -292,10 +302,11 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR |
- NFS_INO_REVAL_PAGECACHE |
- NFS_INO_INVALID_ACCESS |
- NFS_INO_INVALID_ACL;
+ nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_INVALID_CHANGE
+ | NFS_INO_INVALID_CTIME;
if (S_ISDIR(inode->i_mode))
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
spin_unlock(&inode->i_lock);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 6959968dc36a..34d28564ecf3 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1590,11 +1590,13 @@ struct nfs_rpc_ops {
unsigned int);
int (*create) (struct inode *, struct dentry *,
struct iattr *, int);
- int (*remove) (struct inode *, const struct qstr *);
- void (*unlink_setup) (struct rpc_message *, struct inode *dir);
+ int (*remove) (struct inode *, struct dentry *);
+ void (*unlink_setup) (struct rpc_message *, struct dentry *);
void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *);
int (*unlink_done) (struct rpc_task *, struct inode *);
- void (*rename_setup) (struct rpc_message *msg, struct inode *dir);
+ void (*rename_setup) (struct rpc_message *msg,
+ struct dentry *old_dentry,
+ struct dentry *new_dentry);
void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *);
int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir);
int (*link) (struct inode *, struct inode *, const struct qstr *);
@@ -1633,7 +1635,6 @@ struct nfs_rpc_ops {
struct iattr *iattr,
int *);
int (*have_delegation)(struct inode *, fmode_t);
- int (*return_delegation)(struct inode *);
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
struct nfs_client *(*init_client) (struct nfs_client *,
const struct nfs_client_initdata *);
diff --git a/include/linux/platform_data/atmel_mxt_ts.h b/include/linux/platform_data/atmel_mxt_ts.h
deleted file mode 100644
index 695035a8d7fb..000000000000
--- a/include/linux/platform_data/atmel_mxt_ts.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Atmel maXTouch Touchscreen driver
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
-#define __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
-
-#include <linux/types.h>
-
-enum mxt_suspend_mode {
- MXT_SUSPEND_DEEP_SLEEP = 0,
- MXT_SUSPEND_T9_CTRL = 1,
-};
-
-/* The platform data for the Atmel maXTouch touchscreen driver */
-struct mxt_platform_data {
- unsigned long irqflags;
- u8 t19_num_keys;
- const unsigned int *t19_keymap;
- enum mxt_suspend_mode suspend_mode;
-};
-
-#endif /* __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H */
diff --git a/include/linux/platform_data/clk-da8xx-cfgchip.h b/include/linux/platform_data/clk-da8xx-cfgchip.h
new file mode 100644
index 000000000000..de0f77d38669
--- /dev/null
+++ b/include/linux/platform_data/clk-da8xx-cfgchip.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * clk-da8xx-cfgchip - TI DaVinci DA8xx CFGCHIP clock driver
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__
+#define __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__
+
+#include <linux/regmap.h>
+
+/**
+ * da8xx_cfgchip_clk_platform_data
+ * @cfgchip: CFGCHIP syscon regmap
+ */
+struct da8xx_cfgchip_clk_platform_data {
+ struct regmap *cfgchip;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__ */
diff --git a/include/linux/platform_data/clk-davinci-pll.h b/include/linux/platform_data/clk-davinci-pll.h
new file mode 100644
index 000000000000..e55dab1d578b
--- /dev/null
+++ b/include/linux/platform_data/clk-davinci-pll.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock driver for TI Davinci SoCs
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__
+#define __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__
+
+#include <linux/regmap.h>
+
+/**
+ * davinci_pll_platform_data
+ * @cfgchip: CFGCHIP syscon regmap
+ */
+struct davinci_pll_platform_data {
+ struct regmap *cfgchip;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f228c6033832..b3d697f3b573 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -300,7 +300,7 @@ struct util_est {
unsigned int enqueued;
unsigned int ewma;
#define UTIL_EST_WEIGHT_SHIFT 2
-};
+} __attribute__((__aligned__(sizeof(u64))));
/*
* The load_avg/util_avg accumulates an infinite geometric series
@@ -364,7 +364,7 @@ struct sched_avg {
unsigned long runnable_load_avg;
unsigned long util_avg;
struct util_est util_est;
-};
+} ____cacheline_aligned;
struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
@@ -435,7 +435,7 @@ struct sched_entity {
* Put into separate cache line so it does not
* collide with read-mostly values above.
*/
- struct sched_avg avg ____cacheline_aligned_in_smp;
+ struct sched_avg avg;
#endif
};
diff --git a/arch/x86/purgatory/sha256.h b/include/linux/sha256.h
index 2867d9825a57..244fe01a65fb 100644
--- a/arch/x86/purgatory/sha256.h
+++ b/include/linux/sha256.h
@@ -13,9 +13,18 @@
#include <linux/types.h>
#include <crypto/sha.h>
+/*
+ * Stand-alone implementation of the SHA256 algorithm. It is designed to
+ * have as little dependencies as possible so it can be used in the
+ * kexec_file purgatory. In other cases you should use the implementation in
+ * crypto/.
+ *
+ * For details see lib/sha256.c
+ */
+
extern int sha256_init(struct sha256_state *sctx);
extern int sha256_update(struct sha256_state *sctx, const u8 *input,
- unsigned int length);
+ unsigned int length);
extern int sha256_final(struct sha256_state *sctx, u8 *hash);
#endif /* SHA256_H */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index ed761f751ecb..9b11b6a0978c 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -217,5 +217,12 @@ void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
const struct sockaddr *sap);
void rpc_cleanup_clids(void);
+
+static inline int rpc_reply_expected(struct rpc_task *task)
+{
+ return (task->tk_msg.rpc_proc != NULL) &&
+ (task->tk_msg.rpc_proc->p_decode != NULL);
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index d950223c64b1..2bd68177a442 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -253,6 +253,12 @@ xdr_stream_remaining(const struct xdr_stream *xdr)
return xdr->nwords << 2;
}
+ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr,
+ size_t size);
+ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
+ size_t maxlen, gfp_t gfp_flags);
+ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str,
+ size_t size);
ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
size_t maxlen, gfp_t gfp_flags);
/**
@@ -313,6 +319,31 @@ xdr_stream_encode_u64(struct xdr_stream *xdr, __u64 n)
}
/**
+ * xdr_stream_encode_opaque_inline - Encode opaque xdr data
+ * @xdr: pointer to xdr_stream
+ * @ptr: pointer to void pointer
+ * @len: size of object
+ *
+ * Return values:
+ * On success, returns length in bytes of XDR buffer consumed
+ * %-EMSGSIZE on XDR buffer overflow
+ */
+static inline ssize_t
+xdr_stream_encode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t len)
+{
+ size_t count = sizeof(__u32) + xdr_align_size(len);
+ __be32 *p = xdr_reserve_space(xdr, count);
+
+ if (unlikely(!p)) {
+ *ptr = NULL;
+ return -EMSGSIZE;
+ }
+ xdr_encode_opaque(p, NULL, len);
+ *ptr = ++p;
+ return count;
+}
+
+/**
* xdr_stream_encode_opaque_fixed - Encode fixed length opaque xdr data
* @xdr: pointer to xdr_stream
* @ptr: pointer to opaque data object
@@ -356,6 +387,31 @@ xdr_stream_encode_opaque(struct xdr_stream *xdr, const void *ptr, size_t len)
}
/**
+ * xdr_stream_encode_uint32_array - Encode variable length array of integers
+ * @xdr: pointer to xdr_stream
+ * @array: array of integers
+ * @array_size: number of elements in @array
+ *
+ * Return values:
+ * On success, returns length in bytes of XDR buffer consumed
+ * %-EMSGSIZE on XDR buffer overflow
+ */
+static inline ssize_t
+xdr_stream_encode_uint32_array(struct xdr_stream *xdr,
+ const __u32 *array, size_t array_size)
+{
+ ssize_t ret = (array_size+1) * sizeof(__u32);
+ __be32 *p = xdr_reserve_space(xdr, ret);
+
+ if (unlikely(!p))
+ return -EMSGSIZE;
+ *p++ = cpu_to_be32(array_size);
+ for (; array_size > 0; p++, array++, array_size--)
+ *p = cpu_to_be32p(array);
+ return ret;
+}
+
+/**
* xdr_stream_decode_u32 - Decode a 32-bit integer
* @xdr: pointer to xdr_stream
* @ptr: location to store integer
@@ -432,6 +488,44 @@ xdr_stream_decode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t maxle
}
return len;
}
+
+/**
+ * xdr_stream_decode_uint32_array - Decode variable length array of integers
+ * @xdr: pointer to xdr_stream
+ * @array: location to store the integer array or NULL
+ * @array_size: number of elements to store
+ *
+ * Return values:
+ * On success, returns number of elements stored in @array
+ * %-EBADMSG on XDR buffer overflow
+ * %-EMSGSIZE if the size of the array exceeds @array_size
+ */
+static inline ssize_t
+xdr_stream_decode_uint32_array(struct xdr_stream *xdr,
+ __u32 *array, size_t array_size)
+{
+ __be32 *p;
+ __u32 len;
+ ssize_t retval;
+
+ if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
+ return -EBADMSG;
+ p = xdr_inline_decode(xdr, len * sizeof(*p));
+ if (unlikely(!p))
+ return -EBADMSG;
+ if (array == NULL)
+ return len;
+ if (len <= array_size) {
+ if (len < array_size)
+ memset(array+len, 0, (array_size-len)*sizeof(*array));
+ array_size = len;
+ retval = len;
+ } else
+ retval = -EMSGSIZE;
+ for (; array_size > 0; p++, array++, array_size--)
+ *array = be32_to_cpup(p);
+ return retval;
+}
#endif /* __KERNEL__ */
#endif /* _SUNRPC_XDR_H_ */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 7fad83881ce1..5fea0fb420df 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -197,7 +197,7 @@ struct rpc_xprt {
struct list_head free; /* free slots */
unsigned int max_reqs; /* max number of slots */
unsigned int min_reqs; /* min number of slots */
- atomic_t num_reqs; /* total slots */
+ unsigned int num_reqs; /* total slots */
unsigned long state; /* transport state */
unsigned char resvport : 1; /* use a reserved port */
atomic_t swapper; /* we're swapping over this
@@ -373,6 +373,7 @@ void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action);
void xprt_write_space(struct rpc_xprt *xprt);
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
+void xprt_update_rtt(struct rpc_task *task);
void xprt_complete_rqst(struct rpc_task *task, int copied);
void xprt_pin_rqst(struct rpc_rqst *req);
void xprt_unpin_rqst(struct rpc_rqst *req);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index b961184f597a..70fcda1a9049 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -81,6 +81,17 @@ union bpf_attr;
#include <linux/key.h>
#include <trace/syscall.h>
+#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+/*
+ * It may be useful for an architecture to override the definitions of the
+ * SYSCALL_DEFINE0() and __SYSCALL_DEFINEx() macros, in particular to use a
+ * different calling convention for syscalls. To allow for that, the prototypes
+ * for the sys_*() functions below will *not* be included if
+ * CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled.
+ */
+#include <asm/syscall_wrapper.h>
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
/*
* __MAP - apply a macro to syscall arguments
* __MAP(n, m, t1, a1, t2, a2, ..., tn, an) will expand to
@@ -91,7 +102,7 @@ union bpf_attr;
* for SYSCALL_DEFINE<n>/COMPAT_SYSCALL_DEFINE<n>
*/
#define __MAP0(m,...)
-#define __MAP1(m,t,a) m(t,a)
+#define __MAP1(m,t,a,...) m(t,a)
#define __MAP2(m,t,a,...) m(t,a), __MAP1(m,__VA_ARGS__)
#define __MAP3(m,t,a,...) m(t,a), __MAP2(m,__VA_ARGS__)
#define __MAP4(m,t,a,...) m(t,a), __MAP3(m,__VA_ARGS__)
@@ -189,11 +200,13 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
}
#endif
+#ifndef SYSCALL_DEFINE0
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
asmlinkage long sys_##sname(void); \
ALLOW_ERROR_INJECTION(sys_##sname, ERRNO); \
asmlinkage long sys_##sname(void)
+#endif /* SYSCALL_DEFINE0 */
#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
@@ -209,20 +222,28 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
__SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
#define __PROTECT(...) asmlinkage_protect(__VA_ARGS__)
+
+/*
+ * The asmlinkage stub is aliased to a function named __se_sys_*() which
+ * sign-extends 32-bit ints to longs whenever needed. The actual work is
+ * done within __do_sys_*().
+ */
+#ifndef __SYSCALL_DEFINEx
#define __SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
- __attribute__((alias(__stringify(SyS##name)))); \
+ __attribute__((alias(__stringify(__se_sys##name)))); \
ALLOW_ERROR_INJECTION(sys##name, ERRNO); \
- static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
- asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
- asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
{ \
- long ret = SYSC##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
+ long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__));\
__MAP(x,__SC_TEST,__VA_ARGS__); \
__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
return ret; \
} \
- static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+#endif /* __SYSCALL_DEFINEx */
/*
* Called before coming back to user-mode. Returning to user-mode with an
@@ -252,7 +273,12 @@ static inline void addr_limit_user_check(void)
* Please note that these prototypes here are only provided for information
* purposes, for static analysis, and for linking from the syscall table.
* These functions should not be called elsewhere from kernel code.
+ *
+ * As the syscall calling convention may be different from the default
+ * for architectures overriding the syscall calling convention, do not
+ * include the prototypes if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled.
*/
+#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
asmlinkage long sys_io_setup(unsigned nr_reqs, aio_context_t __user *ctx);
asmlinkage long sys_io_destroy(aio_context_t ctx);
asmlinkage long sys_io_submit(aio_context_t, long,
@@ -1076,6 +1102,8 @@ asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg);
*/
asmlinkage long sys_ni_syscall(void);
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
/*
* Kernel code should not call syscalls (i.e., sys_xyzyyz()) directly.
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 8c5302374eaa..7834be668d80 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -148,6 +148,7 @@ struct thermal_cooling_device {
struct device device;
struct device_node *np;
void *devdata;
+ void *stats;
const struct thermal_cooling_device_ops *ops;
bool updated; /* true if the cooling device does not need update */
struct mutex lock; /* protect thermal_instances list */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 7f8c9a127f5a..55388ab45fd4 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -115,27 +115,46 @@ enum tick_dep_bits {
extern bool tick_nohz_enabled;
extern bool tick_nohz_tick_stopped(void);
extern bool tick_nohz_tick_stopped_cpu(int cpu);
+extern void tick_nohz_idle_stop_tick(void);
+extern void tick_nohz_idle_retain_tick(void);
+extern void tick_nohz_idle_restart_tick(void);
extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void);
-extern ktime_t tick_nohz_get_sleep_length(void);
+extern bool tick_nohz_idle_got_tick(void);
+extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
extern unsigned long tick_nohz_get_idle_calls(void);
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
+
+static inline void tick_nohz_idle_stop_tick_protected(void)
+{
+ local_irq_disable();
+ tick_nohz_idle_stop_tick();
+ local_irq_enable();
+}
+
#else /* !CONFIG_NO_HZ_COMMON */
#define tick_nohz_enabled (0)
static inline int tick_nohz_tick_stopped(void) { return 0; }
static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; }
+static inline void tick_nohz_idle_stop_tick(void) { }
+static inline void tick_nohz_idle_retain_tick(void) { }
+static inline void tick_nohz_idle_restart_tick(void) { }
static inline void tick_nohz_idle_enter(void) { }
static inline void tick_nohz_idle_exit(void) { }
+static inline bool tick_nohz_idle_got_tick(void) { return false; }
-static inline ktime_t tick_nohz_get_sleep_length(void)
+static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
{
- return NSEC_PER_SEC / HZ;
+ *delta_next = TICK_NSEC;
+ return *delta_next;
}
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
+
+static inline void tick_nohz_idle_stop_tick_protected(void) { }
#endif /* !CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 82c219dfd3bb..9737fbec7019 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -31,6 +31,7 @@ struct timespec64 get_monotonic_coarse64(void);
extern void getrawmonotonic64(struct timespec64 *ts);
extern void ktime_get_ts64(struct timespec64 *ts);
extern time64_t ktime_get_seconds(void);
+extern time64_t __ktime_get_real_seconds(void);
extern time64_t ktime_get_real_seconds(void);
extern void ktime_get_active_ts64(struct timespec64 *ts);
diff --git a/include/net/slhc_vj.h b/include/net/slhc_vj.h
index 8716d5942b65..8fcf8908a694 100644
--- a/include/net/slhc_vj.h
+++ b/include/net/slhc_vj.h
@@ -127,6 +127,7 @@ typedef __u32 int32;
*/
struct cstate {
byte_t cs_this; /* connection id number (xmit) */
+ bool initialized; /* true if initialized */
struct cstate *next; /* next in ring (xmit) */
struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
struct tcphdr cs_tcp;
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 63815f66b274..f0820554caa9 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -49,6 +49,7 @@ enum afs_fs_operation {
afs_FS_ExtendLock = 157, /* AFS Extend a file lock */
afs_FS_ReleaseLock = 158, /* AFS Release a file lock */
afs_FS_Lookup = 161, /* AFS lookup file in directory */
+ afs_FS_InlineBulkStatus = 65536, /* AFS Fetch multiple file statuses with errors */
afs_FS_FetchData64 = 65537, /* AFS Fetch file data */
afs_FS_StoreData64 = 65538, /* AFS Store file data */
afs_FS_GiveUpAllCallBacks = 65539, /* AFS Give up all our callbacks on a server */
@@ -62,6 +63,27 @@ enum afs_vl_operation {
afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */
};
+enum afs_edit_dir_op {
+ afs_edit_dir_create,
+ afs_edit_dir_create_error,
+ afs_edit_dir_create_inval,
+ afs_edit_dir_create_nospc,
+ afs_edit_dir_delete,
+ afs_edit_dir_delete_error,
+ afs_edit_dir_delete_inval,
+ afs_edit_dir_delete_noent,
+};
+
+enum afs_edit_dir_reason {
+ afs_edit_dir_for_create,
+ afs_edit_dir_for_link,
+ afs_edit_dir_for_mkdir,
+ afs_edit_dir_for_rename,
+ afs_edit_dir_for_rmdir,
+ afs_edit_dir_for_symlink,
+ afs_edit_dir_for_unlink,
+};
+
#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
/*
@@ -93,6 +115,7 @@ enum afs_vl_operation {
EM(afs_FS_ExtendLock, "FS.ExtendLock") \
EM(afs_FS_ReleaseLock, "FS.ReleaseLock") \
EM(afs_FS_Lookup, "FS.Lookup") \
+ EM(afs_FS_InlineBulkStatus, "FS.InlineBulkStatus") \
EM(afs_FS_FetchData64, "FS.FetchData64") \
EM(afs_FS_StoreData64, "FS.StoreData64") \
EM(afs_FS_GiveUpAllCallBacks, "FS.GiveUpAllCallBacks") \
@@ -104,6 +127,25 @@ enum afs_vl_operation {
EM(afs_YFSVL_GetEndpoints, "YFSVL.GetEndpoints") \
E_(afs_VL_GetCapabilities, "VL.GetCapabilities")
+#define afs_edit_dir_ops \
+ EM(afs_edit_dir_create, "create") \
+ EM(afs_edit_dir_create_error, "c_fail") \
+ EM(afs_edit_dir_create_inval, "c_invl") \
+ EM(afs_edit_dir_create_nospc, "c_nspc") \
+ EM(afs_edit_dir_delete, "delete") \
+ EM(afs_edit_dir_delete_error, "d_err ") \
+ EM(afs_edit_dir_delete_inval, "d_invl") \
+ E_(afs_edit_dir_delete_noent, "d_nent")
+
+#define afs_edit_dir_reasons \
+ EM(afs_edit_dir_for_create, "Create") \
+ EM(afs_edit_dir_for_link, "Link ") \
+ EM(afs_edit_dir_for_mkdir, "MkDir ") \
+ EM(afs_edit_dir_for_rename, "Rename") \
+ EM(afs_edit_dir_for_rmdir, "RmDir ") \
+ EM(afs_edit_dir_for_symlink, "Symlnk") \
+ E_(afs_edit_dir_for_unlink, "Unlink")
+
/*
* Export enum symbols via userspace.
@@ -116,6 +158,8 @@ enum afs_vl_operation {
afs_call_traces;
afs_fs_operations;
afs_vl_operations;
+afs_edit_dir_ops;
+afs_edit_dir_reasons;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -462,6 +506,75 @@ TRACE_EVENT(afs_call_state,
__entry->ret, __entry->abort)
);
+TRACE_EVENT(afs_edit_dir,
+ TP_PROTO(struct afs_vnode *dvnode,
+ enum afs_edit_dir_reason why,
+ enum afs_edit_dir_op op,
+ unsigned int block,
+ unsigned int slot,
+ unsigned int f_vnode,
+ unsigned int f_unique,
+ const char *name),
+
+ TP_ARGS(dvnode, why, op, block, slot, f_vnode, f_unique, name),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode )
+ __field(unsigned int, unique )
+ __field(enum afs_edit_dir_reason, why )
+ __field(enum afs_edit_dir_op, op )
+ __field(unsigned int, block )
+ __field(unsigned short, slot )
+ __field(unsigned int, f_vnode )
+ __field(unsigned int, f_unique )
+ __array(char, name, 18 )
+ ),
+
+ TP_fast_assign(
+ int __len = strlen(name);
+ __len = min(__len, 17);
+ __entry->vnode = dvnode->fid.vnode;
+ __entry->unique = dvnode->fid.unique;
+ __entry->why = why;
+ __entry->op = op;
+ __entry->block = block;
+ __entry->slot = slot;
+ __entry->f_vnode = f_vnode;
+ __entry->f_unique = f_unique;
+ memcpy(__entry->name, name, __len);
+ __entry->name[__len] = 0;
+ ),
+
+ TP_printk("d=%x:%x %s %s %u[%u] f=%x:%x %s",
+ __entry->vnode, __entry->unique,
+ __print_symbolic(__entry->why, afs_edit_dir_reasons),
+ __print_symbolic(__entry->op, afs_edit_dir_ops),
+ __entry->block, __entry->slot,
+ __entry->f_vnode, __entry->f_unique,
+ __entry->name)
+ );
+
+TRACE_EVENT(afs_protocol_error,
+ TP_PROTO(struct afs_call *call, int error, const void *where),
+
+ TP_ARGS(call, error, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(int, error )
+ __field(const void *, where )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call ? call->debug_id : 0;
+ __entry->error = error;
+ __entry->where = where;
+ ),
+
+ TP_printk("c=%08x r=%d sp=%pSR",
+ __entry->call, __entry->error, __entry->where)
+ );
+
#endif /* _TRACE_AFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 922cb8968fb2..335d87242439 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -50,9 +50,9 @@ DEFINE_EVENT(rpc_task_status, rpc_bind_status,
);
TRACE_EVENT(rpc_connect_status,
- TP_PROTO(struct rpc_task *task, int status),
+ TP_PROTO(const struct rpc_task *task),
- TP_ARGS(task, status),
+ TP_ARGS(task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
@@ -63,7 +63,7 @@ TRACE_EVENT(rpc_connect_status,
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
- __entry->status = status;
+ __entry->status = task->tk_status;
),
TP_printk("task:%u@%u status=%d",
@@ -103,9 +103,9 @@ TRACE_EVENT(rpc_request,
DECLARE_EVENT_CLASS(rpc_task_running,
- TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
+ TP_PROTO(const struct rpc_task *task, const void *action),
- TP_ARGS(clnt, task, action),
+ TP_ARGS(task, action),
TP_STRUCT__entry(
__field(unsigned int, task_id)
@@ -117,7 +117,8 @@ DECLARE_EVENT_CLASS(rpc_task_running,
),
TP_fast_assign(
- __entry->client_id = clnt ? clnt->cl_clid : -1;
+ __entry->client_id = task->tk_client ?
+ task->tk_client->cl_clid : -1;
__entry->task_id = task->tk_pid;
__entry->action = action;
__entry->runstate = task->tk_runstate;
@@ -136,33 +137,33 @@ DECLARE_EVENT_CLASS(rpc_task_running,
DEFINE_EVENT(rpc_task_running, rpc_task_begin,
- TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
+ TP_PROTO(const struct rpc_task *task, const void *action),
- TP_ARGS(clnt, task, action)
+ TP_ARGS(task, action)
);
DEFINE_EVENT(rpc_task_running, rpc_task_run_action,
- TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
+ TP_PROTO(const struct rpc_task *task, const void *action),
- TP_ARGS(clnt, task, action)
+ TP_ARGS(task, action)
);
DEFINE_EVENT(rpc_task_running, rpc_task_complete,
- TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
+ TP_PROTO(const struct rpc_task *task, const void *action),
- TP_ARGS(clnt, task, action)
+ TP_ARGS(task, action)
);
DECLARE_EVENT_CLASS(rpc_task_queued,
- TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
+ TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q),
- TP_ARGS(clnt, task, q),
+ TP_ARGS(task, q),
TP_STRUCT__entry(
__field(unsigned int, task_id)
@@ -175,7 +176,8 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
),
TP_fast_assign(
- __entry->client_id = clnt ? clnt->cl_clid : -1;
+ __entry->client_id = task->tk_client ?
+ task->tk_client->cl_clid : -1;
__entry->task_id = task->tk_pid;
__entry->timeout = task->tk_timeout;
__entry->runstate = task->tk_runstate;
@@ -196,18 +198,63 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
DEFINE_EVENT(rpc_task_queued, rpc_task_sleep,
- TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
+ TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q),
- TP_ARGS(clnt, task, q)
+ TP_ARGS(task, q)
);
DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup,
- TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
+ TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q),
+
+ TP_ARGS(task, q)
+
+);
+
+TRACE_EVENT(rpc_stats_latency,
+
+ TP_PROTO(
+ const struct rpc_task *task,
+ ktime_t backlog,
+ ktime_t rtt,
+ ktime_t execute
+ ),
- TP_ARGS(clnt, task, q)
+ TP_ARGS(task, backlog, rtt, execute),
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(int, version)
+ __string(progname, task->tk_client->cl_program->name)
+ __string(procname, rpc_proc_name(task))
+ __field(unsigned long, backlog)
+ __field(unsigned long, rtt)
+ __field(unsigned long, execute)
+ __string(addr,
+ task->tk_xprt->address_strings[RPC_DISPLAY_ADDR])
+ __string(port,
+ task->tk_xprt->address_strings[RPC_DISPLAY_PORT])
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
+ __entry->version = task->tk_client->cl_vers;
+ __assign_str(progname, task->tk_client->cl_program->name)
+ __assign_str(procname, rpc_proc_name(task))
+ __entry->backlog = ktime_to_us(backlog);
+ __entry->rtt = ktime_to_us(rtt);
+ __entry->execute = ktime_to_us(execute);
+ __assign_str(addr,
+ task->tk_xprt->address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port,
+ task->tk_xprt->address_strings[RPC_DISPLAY_PORT]);
+ ),
+
+ TP_printk("peer=[%s]:%s xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu",
+ __get_str(addr), __get_str(port), __entry->xid,
+ __get_str(progname), __entry->version, __get_str(procname),
+ __entry->backlog, __entry->rtt, __entry->execute)
);
/*
@@ -406,6 +453,27 @@ DEFINE_EVENT(rpc_xprt_event, xprt_complete_rqst,
TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
TP_ARGS(xprt, xid, status));
+TRACE_EVENT(xprt_ping,
+ TP_PROTO(const struct rpc_xprt *xprt, int status),
+
+ TP_ARGS(xprt, status),
+
+ TP_STRUCT__entry(
+ __field(int, status)
+ __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+ __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ ),
+
+ TP_printk("peer=[%s]:%s status=%d",
+ __get_str(addr), __get_str(port), __entry->status)
+);
+
TRACE_EVENT(xs_tcp_data_ready,
TP_PROTO(struct rpc_xprt *xprt, int err, unsigned int total),
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index 544208fd3db1..558b902f18d4 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -211,7 +211,8 @@ typedef struct siginfo {
#define __FPE_INVASC 12 /* invalid ASCII digit */
#define __FPE_INVDEC 13 /* invalid decimal digit */
#define FPE_FLTUNK 14 /* undiagnosed floating-point exception */
-#define NSIGFPE 14
+#define FPE_CONDTRAP 15 /* trap on condition */
+#define NSIGFPE 15
/*
* SIGSEGV si_codes
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 4e8b8304b793..40297a3181ed 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -53,7 +53,9 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
#define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */
#define VIRTIO_BALLOON_S_CACHES 7 /* Disk caches */
-#define VIRTIO_BALLOON_S_NR 8
+#define VIRTIO_BALLOON_S_HTLB_PGALLOC 8 /* Hugetlb page allocations */
+#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */
+#define VIRTIO_BALLOON_S_NR 10
/*
* Memory statistics structure.
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 9b0eb574f0d1..6d1384abfbdf 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -42,6 +42,9 @@
/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
#define XENFEAT_mmu_pt_update_preserve_ad 5
+/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */
+#define XENFEAT_highmem_assist 6
+
/*
* If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
* available pte bits.
@@ -60,6 +63,26 @@
/* operation as Dom0 is supported */
#define XENFEAT_dom0 11
+/* Xen also maps grant references at pfn = mfn.
+ * This feature flag is deprecated and should not be used.
+#define XENFEAT_grant_map_identity 12
+ */
+
+/* Guest can use XENMEMF_vnode to specify virtual node for memory op. */
+#define XENFEAT_memory_op_vnode_supported 13
+
+/* arm: Hypervisor supports ARM SMC calling convention. */
+#define XENFEAT_ARM_SMCCC_supported 14
+
+/*
+ * x86/PVH: If set, ACPI RSDP can be placed at any address. Otherwise RSDP
+ * must be located in lower 1MB, as required by ACPI Specification for IA-PC
+ * systems.
+ * This feature flag is only consulted if XEN_ELFNOTE_GUEST_OS contains
+ * the "linux" string.
+ */
+#define XENFEAT_linux_rsdp_unrestricted 15
+
#define XENFEAT_NR_SUBMAPS 1
#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/init/Kconfig b/init/Kconfig
index 9d167a5b4a94..f013afc74b11 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1923,3 +1923,13 @@ source "kernel/Kconfig.locks"
config ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
bool
+
+# It may be useful for an architecture to override the definitions of the
+# SYSCALL_DEFINE() and __SYSCALL_DEFINEx() macros in <linux/syscalls.h>
+# and the COMPAT_ variants in <linux/compat.h>, in particular to use a
+# different calling convention for syscalls. They can also override the
+# macros for not-implemented syscalls in kernel/sys_ni.c and
+# kernel/time/posix-stubs.c. All these overrides need to be available in
+# <asm/syscall_wrapper.h>.
+config ARCH_HAS_SYSCALL_WRAPPER
+ def_bool n
diff --git a/ipc/shm.c b/ipc/shm.c
index 5639345dbec9..3cf48988d68c 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -225,6 +225,12 @@ static int __shm_open(struct vm_area_struct *vma)
if (IS_ERR(shp))
return PTR_ERR(shp);
+ if (shp->shm_file != sfd->file) {
+ /* ID was reused */
+ shm_unlock(shp);
+ return -EINVAL;
+ }
+
shp->shm_atim = ktime_get_real_seconds();
ipc_update_pid(&shp->shm_lprid, task_tgid(current));
shp->shm_nattch++;
@@ -455,8 +461,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
int ret;
/*
- * In case of remap_file_pages() emulation, the file can represent
- * removed IPC ID: propogate shm_lock() error to caller.
+ * In case of remap_file_pages() emulation, the file can represent an
+ * IPC ID that was removed, and possibly even reused by another shm
+ * segment already. Propagate this case as an error to caller.
*/
ret = __shm_open(vma);
if (ret)
@@ -480,6 +487,7 @@ static int shm_release(struct inode *ino, struct file *file)
struct shm_file_data *sfd = shm_file_data(file);
put_ipc_ns(sfd->ns);
+ fput(sfd->file);
shm_file_data(file) = NULL;
kfree(sfd);
return 0;
@@ -1445,7 +1453,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
file->f_mapping = shp->shm_file->f_mapping;
sfd->id = shp->shm_perm.id;
sfd->ns = get_ipc_ns(ns);
- sfd->file = shp->shm_file;
+ /*
+ * We need to take a reference to the real shm file to prevent the
+ * pointer from becoming stale in cases where the lifetime of the outer
+ * file extends beyond that of the shm segment. It's not usually
+ * possible, but it can happen during remap_file_pages() emulation as
+ * that unmaps the memory, then does ->mmap() via file reference only.
+ * We'll deny the ->mmap() if the shm segment was since removed, but to
+ * detect shm ID reuse we need to compare the file pointers.
+ */
+ sfd->file = get_file(shp->shm_file);
sfd->vm_ops = NULL;
err = security_mmap_file(file, prot, flags);
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index a93590cdd9e1..f7674d676889 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -454,6 +454,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_lru);
VMCOREINFO_NUMBER(PG_private);
VMCOREINFO_NUMBER(PG_swapcache);
+ VMCOREINFO_NUMBER(PG_swapbacked);
VMCOREINFO_NUMBER(PG_slab);
#ifdef CONFIG_MEMORY_FAILURE
VMCOREINFO_NUMBER(PG_hwpoison);
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index 90ff129c88a2..62c301ad0773 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -242,11 +242,11 @@ static void kdb_printbp(kdb_bp_t *bp, int i)
kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT);
if (bp->bp_enabled)
- kdb_printf("\n is enabled");
+ kdb_printf("\n is enabled ");
else
kdb_printf("\n is disabled");
- kdb_printf("\taddr at %016lx, hardtype=%d installed=%d\n",
+ kdb_printf(" addr at %016lx, hardtype=%d installed=%d\n",
bp->bp_addr, bp->bp_type, bp->bp_installed);
kdb_printf("\n");
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index dbb0781a0533..e405677ee08d 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1150,6 +1150,16 @@ void kdb_set_current_task(struct task_struct *p)
kdb_current_regs = NULL;
}
+static void drop_newline(char *buf)
+{
+ size_t len = strlen(buf);
+
+ if (len == 0)
+ return;
+ if (*(buf + len - 1) == '\n')
+ *(buf + len - 1) = '\0';
+}
+
/*
* kdb_local - The main code for kdb. This routine is invoked on a
* specific processor, it is not global. The main kdb() routine
@@ -1327,6 +1337,7 @@ do_full_getstr:
cmdptr = cmd_head;
diag = kdb_parse(cmdbuf);
if (diag == KDB_NOTFOUND) {
+ drop_newline(cmdbuf);
kdb_printf("Unknown kdb command: '%s'\n", cmdbuf);
diag = 0;
}
@@ -1566,6 +1577,7 @@ static int kdb_md(int argc, const char **argv)
int symbolic = 0;
int valid = 0;
int phys = 0;
+ int raw = 0;
kdbgetintenv("MDCOUNT", &mdcount);
kdbgetintenv("RADIX", &radix);
@@ -1575,9 +1587,10 @@ static int kdb_md(int argc, const char **argv)
repeat = mdcount * 16 / bytesperword;
if (strcmp(argv[0], "mdr") == 0) {
- if (argc != 2)
+ if (argc == 2 || (argc == 0 && last_addr != 0))
+ valid = raw = 1;
+ else
return KDB_ARGCOUNT;
- valid = 1;
} else if (isdigit(argv[0][2])) {
bytesperword = (int)(argv[0][2] - '0');
if (bytesperword == 0) {
@@ -1613,7 +1626,10 @@ static int kdb_md(int argc, const char **argv)
radix = last_radix;
bytesperword = last_bytesperword;
repeat = last_repeat;
- mdcount = ((repeat * bytesperword) + 15) / 16;
+ if (raw)
+ mdcount = repeat;
+ else
+ mdcount = ((repeat * bytesperword) + 15) / 16;
}
if (argc) {
@@ -1630,7 +1646,10 @@ static int kdb_md(int argc, const char **argv)
diag = kdbgetularg(argv[nextarg], &val);
if (!diag) {
mdcount = (int) val;
- repeat = mdcount * 16 / bytesperword;
+ if (raw)
+ repeat = mdcount;
+ else
+ repeat = mdcount * 16 / bytesperword;
}
}
if (argc >= nextarg+1) {
@@ -1640,8 +1659,15 @@ static int kdb_md(int argc, const char **argv)
}
}
- if (strcmp(argv[0], "mdr") == 0)
- return kdb_mdr(addr, mdcount);
+ if (strcmp(argv[0], "mdr") == 0) {
+ int ret;
+ last_addr = addr;
+ ret = kdb_mdr(addr, mdcount);
+ last_addr += mdcount;
+ last_repeat = mdcount;
+ last_bytesperword = bytesperword; // to make REPEAT happy
+ return ret;
+ }
switch (radix) {
case 10:
@@ -2473,41 +2499,6 @@ static int kdb_kill(int argc, const char **argv)
return 0;
}
-struct kdb_tm {
- int tm_sec; /* seconds */
- int tm_min; /* minutes */
- int tm_hour; /* hours */
- int tm_mday; /* day of the month */
- int tm_mon; /* month */
- int tm_year; /* year */
-};
-
-static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm)
-{
- /* This will work from 1970-2099, 2100 is not a leap year */
- static int mon_day[] = { 31, 29, 31, 30, 31, 30, 31,
- 31, 30, 31, 30, 31 };
- memset(tm, 0, sizeof(*tm));
- tm->tm_sec = tv->tv_sec % (24 * 60 * 60);
- tm->tm_mday = tv->tv_sec / (24 * 60 * 60) +
- (2 * 365 + 1); /* shift base from 1970 to 1968 */
- tm->tm_min = tm->tm_sec / 60 % 60;
- tm->tm_hour = tm->tm_sec / 60 / 60;
- tm->tm_sec = tm->tm_sec % 60;
- tm->tm_year = 68 + 4*(tm->tm_mday / (4*365+1));
- tm->tm_mday %= (4*365+1);
- mon_day[1] = 29;
- while (tm->tm_mday >= mon_day[tm->tm_mon]) {
- tm->tm_mday -= mon_day[tm->tm_mon];
- if (++tm->tm_mon == 12) {
- tm->tm_mon = 0;
- ++tm->tm_year;
- mon_day[1] = 28;
- }
- }
- ++tm->tm_mday;
-}
-
/*
* Most of this code has been lifted from kernel/timer.c::sys_sysinfo().
* I cannot call that code directly from kdb, it has an unconditional
@@ -2515,10 +2506,10 @@ static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm)
*/
static void kdb_sysinfo(struct sysinfo *val)
{
- struct timespec uptime;
- ktime_get_ts(&uptime);
+ u64 uptime = ktime_get_mono_fast_ns();
+
memset(val, 0, sizeof(*val));
- val->uptime = uptime.tv_sec;
+ val->uptime = div_u64(uptime, NSEC_PER_SEC);
val->loads[0] = avenrun[0];
val->loads[1] = avenrun[1];
val->loads[2] = avenrun[2];
@@ -2533,8 +2524,8 @@ static void kdb_sysinfo(struct sysinfo *val)
*/
static int kdb_summary(int argc, const char **argv)
{
- struct timespec now;
- struct kdb_tm tm;
+ time64_t now;
+ struct tm tm;
struct sysinfo val;
if (argc)
@@ -2548,9 +2539,9 @@ static int kdb_summary(int argc, const char **argv)
kdb_printf("domainname %s\n", init_uts_ns.name.domainname);
kdb_printf("ccversion %s\n", __stringify(CCVERSION));
- now = __current_kernel_time();
- kdb_gmtime(&now, &tm);
- kdb_printf("date %04d-%02d-%02d %02d:%02d:%02d "
+ now = __ktime_get_real_seconds();
+ time64_to_tm(now, 0, &tm);
+ kdb_printf("date %04ld-%02d-%02d %02d:%02d:%02d "
"tz_minuteswest %d\n",
1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec,
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index d35cc2d3a4cc..990b3cc526c8 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -129,13 +129,13 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
}
if (i >= ARRAY_SIZE(kdb_name_table)) {
debug_kfree(kdb_name_table[0]);
- memcpy(kdb_name_table, kdb_name_table+1,
+ memmove(kdb_name_table, kdb_name_table+1,
sizeof(kdb_name_table[0]) *
(ARRAY_SIZE(kdb_name_table)-1));
} else {
debug_kfree(knt1);
knt1 = kdb_name_table[i];
- memcpy(kdb_name_table+i, kdb_name_table+i+1,
+ memmove(kdb_name_table+i, kdb_name_table+i+1,
sizeof(kdb_name_table[0]) *
(ARRAY_SIZE(kdb_name_table)-i-1));
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index fc1c330c6bd6..2d5fe26551f8 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4447,6 +4447,9 @@ static void _free_event(struct perf_event *event)
if (event->ctx)
put_ctx(event->ctx);
+ if (event->hw.target)
+ put_task_struct(event->hw.target);
+
exclusive_event_destroy(event);
module_put(event->pmu->module);
@@ -8397,6 +8400,10 @@ static int perf_kprobe_event_init(struct perf_event *event)
if (event->attr.type != perf_kprobe.type)
return -ENOENT;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
/*
* no branch sampling for probe events
*/
@@ -8434,6 +8441,10 @@ static int perf_uprobe_event_init(struct perf_event *event)
if (event->attr.type != perf_uprobe.type)
return -ENOENT;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
/*
* no branch sampling for probe events
*/
@@ -9955,6 +9966,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
* and we cannot use the ctx information because we need the
* pmu before we get a ctx.
*/
+ get_task_struct(task);
event->hw.target = task;
}
@@ -10070,6 +10082,8 @@ err_ns:
perf_detach_cgroup(event);
if (event->ns)
put_pid_ns(event->ns);
+ if (event->hw.target)
+ put_task_struct(event->hw.target);
kfree(event);
return ERR_PTR(err);
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index a37a3b4b6342..f4f29b9d90ee 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -39,7 +39,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
}
}
-static cpumask_var_t *alloc_node_to_possible_cpumask(void)
+static cpumask_var_t *alloc_node_to_cpumask(void)
{
cpumask_var_t *masks;
int node;
@@ -62,7 +62,7 @@ out_unwind:
return NULL;
}
-static void free_node_to_possible_cpumask(cpumask_var_t *masks)
+static void free_node_to_cpumask(cpumask_var_t *masks)
{
int node;
@@ -71,7 +71,7 @@ static void free_node_to_possible_cpumask(cpumask_var_t *masks)
kfree(masks);
}
-static void build_node_to_possible_cpumask(cpumask_var_t *masks)
+static void build_node_to_cpumask(cpumask_var_t *masks)
{
int cpu;
@@ -79,14 +79,14 @@ static void build_node_to_possible_cpumask(cpumask_var_t *masks)
cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
}
-static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask,
+static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
const struct cpumask *mask, nodemask_t *nodemsk)
{
int n, nodes = 0;
/* Calculate the number of nodes in the supplied affinity mask */
for_each_node(n) {
- if (cpumask_intersects(mask, node_to_possible_cpumask[n])) {
+ if (cpumask_intersects(mask, node_to_cpumask[n])) {
node_set(n, *nodemsk);
nodes++;
}
@@ -94,73 +94,46 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask,
return nodes;
}
-/**
- * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
- * @nvecs: The total number of vectors
- * @affd: Description of the affinity requirements
- *
- * Returns the masks pointer or NULL if allocation failed.
- */
-struct cpumask *
-irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
+static int irq_build_affinity_masks(const struct irq_affinity *affd,
+ int startvec, int numvecs,
+ cpumask_var_t *node_to_cpumask,
+ const struct cpumask *cpu_mask,
+ struct cpumask *nmsk,
+ struct cpumask *masks)
{
- int n, nodes, cpus_per_vec, extra_vecs, curvec;
- int affv = nvecs - affd->pre_vectors - affd->post_vectors;
- int last_affv = affv + affd->pre_vectors;
+ int n, nodes, cpus_per_vec, extra_vecs, done = 0;
+ int last_affv = affd->pre_vectors + numvecs;
+ int curvec = startvec;
nodemask_t nodemsk = NODE_MASK_NONE;
- struct cpumask *masks;
- cpumask_var_t nmsk, *node_to_possible_cpumask;
-
- /*
- * If there aren't any vectors left after applying the pre/post
- * vectors don't bother with assigning affinity.
- */
- if (!affv)
- return NULL;
-
- if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
- return NULL;
-
- masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
- if (!masks)
- goto out;
- node_to_possible_cpumask = alloc_node_to_possible_cpumask();
- if (!node_to_possible_cpumask)
- goto out;
+ if (!cpumask_weight(cpu_mask))
+ return 0;
- /* Fill out vectors at the beginning that don't need affinity */
- for (curvec = 0; curvec < affd->pre_vectors; curvec++)
- cpumask_copy(masks + curvec, irq_default_affinity);
-
- /* Stabilize the cpumasks */
- get_online_cpus();
- build_node_to_possible_cpumask(node_to_possible_cpumask);
- nodes = get_nodes_in_cpumask(node_to_possible_cpumask, cpu_possible_mask,
- &nodemsk);
+ nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
/*
* If the number of nodes in the mask is greater than or equal the
* number of vectors we just spread the vectors across the nodes.
*/
- if (affv <= nodes) {
+ if (numvecs <= nodes) {
for_each_node_mask(n, nodemsk) {
- cpumask_copy(masks + curvec,
- node_to_possible_cpumask[n]);
- if (++curvec == last_affv)
+ cpumask_copy(masks + curvec, node_to_cpumask[n]);
+ if (++done == numvecs)
break;
+ if (++curvec == last_affv)
+ curvec = affd->pre_vectors;
}
- goto done;
+ goto out;
}
for_each_node_mask(n, nodemsk) {
int ncpus, v, vecs_to_assign, vecs_per_node;
/* Spread the vectors per node */
- vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
+ vecs_per_node = (numvecs - (curvec - affd->pre_vectors)) / nodes;
/* Get the cpus on this node which are in the mask */
- cpumask_and(nmsk, cpu_possible_mask, node_to_possible_cpumask[n]);
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
/* Calculate the number of cpus per vector */
ncpus = cpumask_weight(nmsk);
@@ -181,19 +154,96 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
}
- if (curvec >= last_affv)
+ done += v;
+ if (done >= numvecs)
break;
+ if (curvec >= last_affv)
+ curvec = affd->pre_vectors;
--nodes;
}
-done:
+out:
+ return done;
+}
+
+/**
+ * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
+ * @nvecs: The total number of vectors
+ * @affd: Description of the affinity requirements
+ *
+ * Returns the masks pointer or NULL if allocation failed.
+ */
+struct cpumask *
+irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
+{
+ int affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
+ int curvec, usedvecs;
+ cpumask_var_t nmsk, npresmsk, *node_to_cpumask;
+ struct cpumask *masks = NULL;
+
+ /*
+ * If there aren't any vectors left after applying the pre/post
+ * vectors don't bother with assigning affinity.
+ */
+ if (nvecs == affd->pre_vectors + affd->post_vectors)
+ return NULL;
+
+ if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+ return NULL;
+
+ if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
+ goto outcpumsk;
+
+ node_to_cpumask = alloc_node_to_cpumask();
+ if (!node_to_cpumask)
+ goto outnpresmsk;
+
+ masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
+ if (!masks)
+ goto outnodemsk;
+
+ /* Fill out vectors at the beginning that don't need affinity */
+ for (curvec = 0; curvec < affd->pre_vectors; curvec++)
+ cpumask_copy(masks + curvec, irq_default_affinity);
+
+ /* Stabilize the cpumasks */
+ get_online_cpus();
+ build_node_to_cpumask(node_to_cpumask);
+
+ /* Spread on present CPUs starting from affd->pre_vectors */
+ usedvecs = irq_build_affinity_masks(affd, curvec, affvecs,
+ node_to_cpumask, cpu_present_mask,
+ nmsk, masks);
+
+ /*
+ * Spread on non present CPUs starting from the next vector to be
+ * handled. If the spreading of present CPUs already exhausted the
+ * vector space, assign the non present CPUs to the already spread
+ * out vectors.
+ */
+ if (usedvecs >= affvecs)
+ curvec = affd->pre_vectors;
+ else
+ curvec = affd->pre_vectors + usedvecs;
+ cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
+ usedvecs += irq_build_affinity_masks(affd, curvec, affvecs,
+ node_to_cpumask, npresmsk,
+ nmsk, masks);
put_online_cpus();
/* Fill out vectors at the end that don't need affinity */
+ if (usedvecs >= affvecs)
+ curvec = affd->pre_vectors + affvecs;
+ else
+ curvec = affd->pre_vectors + usedvecs;
for (; curvec < nvecs; curvec++)
cpumask_copy(masks + curvec, irq_default_affinity);
- free_node_to_possible_cpumask(node_to_possible_cpumask);
-out:
+
+outnodemsk:
+ free_node_to_cpumask(node_to_cpumask);
+outnpresmsk:
+ free_cpumask_var(npresmsk);
+outcpumsk:
free_cpumask_var(nmsk);
return masks;
}
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index e5bcd94c1efb..75d8e7cf040e 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -22,50 +22,123 @@
#include <linux/ima.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
+#include <linux/elf.h>
+#include <linux/elfcore.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/vmalloc.h>
#include "kexec_internal.h"
static int kexec_calculate_store_digests(struct kimage *image);
+/*
+ * Currently this is the only default function that is exported as some
+ * architectures need it to do additional handlings.
+ * In the future, other default functions may be exported too if required.
+ */
+int kexec_image_probe_default(struct kimage *image, void *buf,
+ unsigned long buf_len)
+{
+ const struct kexec_file_ops * const *fops;
+ int ret = -ENOEXEC;
+
+ for (fops = &kexec_file_loaders[0]; *fops && (*fops)->probe; ++fops) {
+ ret = (*fops)->probe(buf, buf_len);
+ if (!ret) {
+ image->fops = *fops;
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
/* Architectures can provide this probe function */
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len)
{
- return -ENOEXEC;
+ return kexec_image_probe_default(image, buf, buf_len);
+}
+
+static void *kexec_image_load_default(struct kimage *image)
+{
+ if (!image->fops || !image->fops->load)
+ return ERR_PTR(-ENOEXEC);
+
+ return image->fops->load(image, image->kernel_buf,
+ image->kernel_buf_len, image->initrd_buf,
+ image->initrd_buf_len, image->cmdline_buf,
+ image->cmdline_buf_len);
}
void * __weak arch_kexec_kernel_image_load(struct kimage *image)
{
- return ERR_PTR(-ENOEXEC);
+ return kexec_image_load_default(image);
+}
+
+static int kexec_image_post_load_cleanup_default(struct kimage *image)
+{
+ if (!image->fops || !image->fops->cleanup)
+ return 0;
+
+ return image->fops->cleanup(image->image_loader_data);
}
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
{
- return -EINVAL;
+ return kexec_image_post_load_cleanup_default(image);
}
#ifdef CONFIG_KEXEC_VERIFY_SIG
+static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
+ unsigned long buf_len)
+{
+ if (!image->fops || !image->fops->verify_sig) {
+ pr_debug("kernel loader does not support signature verification.\n");
+ return -EKEYREJECTED;
+ }
+
+ return image->fops->verify_sig(buf, buf_len);
+}
+
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
unsigned long buf_len)
{
- return -EKEYREJECTED;
+ return kexec_image_verify_sig_default(image, buf, buf_len);
}
#endif
-/* Apply relocations of type RELA */
+/*
+ * arch_kexec_apply_relocations_add - apply relocations of type RELA
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELAs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
int __weak
-arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
- unsigned int relsec)
+arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
{
pr_err("RELA relocation unsupported.\n");
return -ENOEXEC;
}
-/* Apply relocations of type REL */
+/*
+ * arch_kexec_apply_relocations - apply relocations of type REL
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
int __weak
-arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
- unsigned int relsec)
+arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
{
pr_err("REL relocation unsupported.\n");
return -ENOEXEC;
@@ -532,6 +605,9 @@ static int kexec_calculate_store_digests(struct kimage *image)
struct kexec_sha_region *sha_regions;
struct purgatory_info *pi = &image->purgatory_info;
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_KEXEC_PURGATORY))
+ return 0;
+
zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
zero_buf_sz = PAGE_SIZE;
@@ -633,87 +709,29 @@ out:
return ret;
}
-/* Actually load purgatory. Lot of code taken from kexec-tools */
-static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
- unsigned long max, int top_down)
+#ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
+/*
+ * kexec_purgatory_setup_kbuf - prepare buffer to load purgatory.
+ * @pi: Purgatory to be loaded.
+ * @kbuf: Buffer to setup.
+ *
+ * Allocates the memory needed for the buffer. Caller is responsible to free
+ * the memory after use.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static int kexec_purgatory_setup_kbuf(struct purgatory_info *pi,
+ struct kexec_buf *kbuf)
{
- struct purgatory_info *pi = &image->purgatory_info;
- unsigned long align, bss_align, bss_sz, bss_pad;
- unsigned long entry, load_addr, curr_load_addr, bss_addr, offset;
- unsigned char *buf_addr, *src;
- int i, ret = 0, entry_sidx = -1;
- const Elf_Shdr *sechdrs_c;
- Elf_Shdr *sechdrs = NULL;
- struct kexec_buf kbuf = { .image = image, .bufsz = 0, .buf_align = 1,
- .buf_min = min, .buf_max = max,
- .top_down = top_down };
-
- /*
- * sechdrs_c points to section headers in purgatory and are read
- * only. No modifications allowed.
- */
- sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff;
-
- /*
- * We can not modify sechdrs_c[] and its fields. It is read only.
- * Copy it over to a local copy where one can store some temporary
- * data and free it at the end. We need to modify ->sh_addr and
- * ->sh_offset fields to keep track of permanent and temporary
- * locations of sections.
- */
- sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
- if (!sechdrs)
- return -ENOMEM;
-
- memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr));
-
- /*
- * We seem to have multiple copies of sections. First copy is which
- * is embedded in kernel in read only section. Some of these sections
- * will be copied to a temporary buffer and relocated. And these
- * sections will finally be copied to their final destination at
- * segment load time.
- *
- * Use ->sh_offset to reflect section address in memory. It will
- * point to original read only copy if section is not allocatable.
- * Otherwise it will point to temporary copy which will be relocated.
- *
- * Use ->sh_addr to contain final address of the section where it
- * will go during execution time.
- */
- for (i = 0; i < pi->ehdr->e_shnum; i++) {
- if (sechdrs[i].sh_type == SHT_NOBITS)
- continue;
-
- sechdrs[i].sh_offset = (unsigned long)pi->ehdr +
- sechdrs[i].sh_offset;
- }
-
- /*
- * Identify entry point section and make entry relative to section
- * start.
- */
- entry = pi->ehdr->e_entry;
- for (i = 0; i < pi->ehdr->e_shnum; i++) {
- if (!(sechdrs[i].sh_flags & SHF_ALLOC))
- continue;
-
- if (!(sechdrs[i].sh_flags & SHF_EXECINSTR))
- continue;
-
- /* Make entry section relative */
- if (sechdrs[i].sh_addr <= pi->ehdr->e_entry &&
- ((sechdrs[i].sh_addr + sechdrs[i].sh_size) >
- pi->ehdr->e_entry)) {
- entry_sidx = i;
- entry -= sechdrs[i].sh_addr;
- break;
- }
- }
+ const Elf_Shdr *sechdrs;
+ unsigned long bss_align;
+ unsigned long bss_sz;
+ unsigned long align;
+ int i, ret;
- /* Determine how much memory is needed to load relocatable object. */
- bss_align = 1;
- bss_sz = 0;
+ sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
+ kbuf->buf_align = bss_align = 1;
+ kbuf->bufsz = bss_sz = 0;
for (i = 0; i < pi->ehdr->e_shnum; i++) {
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
@@ -721,111 +739,124 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
align = sechdrs[i].sh_addralign;
if (sechdrs[i].sh_type != SHT_NOBITS) {
- if (kbuf.buf_align < align)
- kbuf.buf_align = align;
- kbuf.bufsz = ALIGN(kbuf.bufsz, align);
- kbuf.bufsz += sechdrs[i].sh_size;
+ if (kbuf->buf_align < align)
+ kbuf->buf_align = align;
+ kbuf->bufsz = ALIGN(kbuf->bufsz, align);
+ kbuf->bufsz += sechdrs[i].sh_size;
} else {
- /* bss section */
if (bss_align < align)
bss_align = align;
bss_sz = ALIGN(bss_sz, align);
bss_sz += sechdrs[i].sh_size;
}
}
+ kbuf->bufsz = ALIGN(kbuf->bufsz, bss_align);
+ kbuf->memsz = kbuf->bufsz + bss_sz;
+ if (kbuf->buf_align < bss_align)
+ kbuf->buf_align = bss_align;
- /* Determine the bss padding required to align bss properly */
- bss_pad = 0;
- if (kbuf.bufsz & (bss_align - 1))
- bss_pad = bss_align - (kbuf.bufsz & (bss_align - 1));
-
- kbuf.memsz = kbuf.bufsz + bss_pad + bss_sz;
+ kbuf->buffer = vzalloc(kbuf->bufsz);
+ if (!kbuf->buffer)
+ return -ENOMEM;
+ pi->purgatory_buf = kbuf->buffer;
- /* Allocate buffer for purgatory */
- kbuf.buffer = vzalloc(kbuf.bufsz);
- if (!kbuf.buffer) {
- ret = -ENOMEM;
+ ret = kexec_add_buffer(kbuf);
+ if (ret)
goto out;
- }
- if (kbuf.buf_align < bss_align)
- kbuf.buf_align = bss_align;
+ return 0;
+out:
+ vfree(pi->purgatory_buf);
+ pi->purgatory_buf = NULL;
+ return ret;
+}
- /* Add buffer to segment list */
- ret = kexec_add_buffer(&kbuf);
- if (ret)
- goto out;
- pi->purgatory_load_addr = kbuf.mem;
+/*
+ * kexec_purgatory_setup_sechdrs - prepares the pi->sechdrs buffer.
+ * @pi: Purgatory to be loaded.
+ * @kbuf: Buffer prepared to store purgatory.
+ *
+ * Allocates the memory needed for the buffer. Caller is responsible to free
+ * the memory after use.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
+ struct kexec_buf *kbuf)
+{
+ unsigned long bss_addr;
+ unsigned long offset;
+ Elf_Shdr *sechdrs;
+ int i;
+
+ /*
+ * The section headers in kexec_purgatory are read-only. In order to
+ * have them modifiable make a temporary copy.
+ */
+ sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
+ if (!sechdrs)
+ return -ENOMEM;
+ memcpy(sechdrs, (void *)pi->ehdr + pi->ehdr->e_shoff,
+ pi->ehdr->e_shnum * sizeof(Elf_Shdr));
+ pi->sechdrs = sechdrs;
- /* Load SHF_ALLOC sections */
- buf_addr = kbuf.buffer;
- load_addr = curr_load_addr = pi->purgatory_load_addr;
- bss_addr = load_addr + kbuf.bufsz + bss_pad;
+ offset = 0;
+ bss_addr = kbuf->mem + kbuf->bufsz;
+ kbuf->image->start = pi->ehdr->e_entry;
for (i = 0; i < pi->ehdr->e_shnum; i++) {
+ unsigned long align;
+ void *src, *dst;
+
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
continue;
align = sechdrs[i].sh_addralign;
- if (sechdrs[i].sh_type != SHT_NOBITS) {
- curr_load_addr = ALIGN(curr_load_addr, align);
- offset = curr_load_addr - load_addr;
- /* We already modifed ->sh_offset to keep src addr */
- src = (char *) sechdrs[i].sh_offset;
- memcpy(buf_addr + offset, src, sechdrs[i].sh_size);
-
- /* Store load address and source address of section */
- sechdrs[i].sh_addr = curr_load_addr;
-
- /*
- * This section got copied to temporary buffer. Update
- * ->sh_offset accordingly.
- */
- sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset);
-
- /* Advance to the next address */
- curr_load_addr += sechdrs[i].sh_size;
- } else {
+ if (sechdrs[i].sh_type == SHT_NOBITS) {
bss_addr = ALIGN(bss_addr, align);
sechdrs[i].sh_addr = bss_addr;
bss_addr += sechdrs[i].sh_size;
+ continue;
}
- }
- /* Update entry point based on load address of text section */
- if (entry_sidx >= 0)
- entry += sechdrs[entry_sidx].sh_addr;
+ offset = ALIGN(offset, align);
+ if (sechdrs[i].sh_flags & SHF_EXECINSTR &&
+ pi->ehdr->e_entry >= sechdrs[i].sh_addr &&
+ pi->ehdr->e_entry < (sechdrs[i].sh_addr
+ + sechdrs[i].sh_size)) {
+ kbuf->image->start -= sechdrs[i].sh_addr;
+ kbuf->image->start += kbuf->mem + offset;
+ }
- /* Make kernel jump to purgatory after shutdown */
- image->start = entry;
+ src = (void *)pi->ehdr + sechdrs[i].sh_offset;
+ dst = pi->purgatory_buf + offset;
+ memcpy(dst, src, sechdrs[i].sh_size);
- /* Used later to get/set symbol values */
- pi->sechdrs = sechdrs;
+ sechdrs[i].sh_addr = kbuf->mem + offset;
+ sechdrs[i].sh_offset = offset;
+ offset += sechdrs[i].sh_size;
+ }
- /*
- * Used later to identify which section is purgatory and skip it
- * from checksumming.
- */
- pi->purgatory_buf = kbuf.buffer;
- return ret;
-out:
- vfree(sechdrs);
- vfree(kbuf.buffer);
- return ret;
+ return 0;
}
static int kexec_apply_relocations(struct kimage *image)
{
int i, ret;
struct purgatory_info *pi = &image->purgatory_info;
- Elf_Shdr *sechdrs = pi->sechdrs;
+ const Elf_Shdr *sechdrs;
+
+ sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
- /* Apply relocations */
for (i = 0; i < pi->ehdr->e_shnum; i++) {
- Elf_Shdr *section, *symtab;
+ const Elf_Shdr *relsec;
+ const Elf_Shdr *symtab;
+ Elf_Shdr *section;
+
+ relsec = sechdrs + i;
- if (sechdrs[i].sh_type != SHT_RELA &&
- sechdrs[i].sh_type != SHT_REL)
+ if (relsec->sh_type != SHT_RELA &&
+ relsec->sh_type != SHT_REL)
continue;
/*
@@ -834,12 +865,12 @@ static int kexec_apply_relocations(struct kimage *image)
* symbol table. And ->sh_info contains section header
* index of section to which relocations apply.
*/
- if (sechdrs[i].sh_info >= pi->ehdr->e_shnum ||
- sechdrs[i].sh_link >= pi->ehdr->e_shnum)
+ if (relsec->sh_info >= pi->ehdr->e_shnum ||
+ relsec->sh_link >= pi->ehdr->e_shnum)
return -ENOEXEC;
- section = &sechdrs[sechdrs[i].sh_info];
- symtab = &sechdrs[sechdrs[i].sh_link];
+ section = pi->sechdrs + relsec->sh_info;
+ symtab = sechdrs + relsec->sh_link;
if (!(section->sh_flags & SHF_ALLOC))
continue;
@@ -856,12 +887,12 @@ static int kexec_apply_relocations(struct kimage *image)
* Respective architecture needs to provide support for applying
* relocations of type SHT_RELA/SHT_REL.
*/
- if (sechdrs[i].sh_type == SHT_RELA)
- ret = arch_kexec_apply_relocations_add(pi->ehdr,
- sechdrs, i);
- else if (sechdrs[i].sh_type == SHT_REL)
- ret = arch_kexec_apply_relocations(pi->ehdr,
- sechdrs, i);
+ if (relsec->sh_type == SHT_RELA)
+ ret = arch_kexec_apply_relocations_add(pi, section,
+ relsec, symtab);
+ else if (relsec->sh_type == SHT_REL)
+ ret = arch_kexec_apply_relocations(pi, section,
+ relsec, symtab);
if (ret)
return ret;
}
@@ -869,10 +900,18 @@ static int kexec_apply_relocations(struct kimage *image)
return 0;
}
-/* Load relocatable purgatory object and relocate it appropriately */
-int kexec_load_purgatory(struct kimage *image, unsigned long min,
- unsigned long max, int top_down,
- unsigned long *load_addr)
+/*
+ * kexec_load_purgatory - Load and relocate the purgatory object.
+ * @image: Image to add the purgatory to.
+ * @kbuf: Memory parameters to use.
+ *
+ * Allocates the memory needed for image->purgatory_info.sechdrs and
+ * image->purgatory_info.purgatory_buf/kbuf->buffer. Caller is responsible
+ * to free the memory after use.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf)
{
struct purgatory_info *pi = &image->purgatory_info;
int ret;
@@ -880,55 +919,51 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min,
if (kexec_purgatory_size <= 0)
return -EINVAL;
- if (kexec_purgatory_size < sizeof(Elf_Ehdr))
- return -ENOEXEC;
-
- pi->ehdr = (Elf_Ehdr *)kexec_purgatory;
-
- if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0
- || pi->ehdr->e_type != ET_REL
- || !elf_check_arch(pi->ehdr)
- || pi->ehdr->e_shentsize != sizeof(Elf_Shdr))
- return -ENOEXEC;
-
- if (pi->ehdr->e_shoff >= kexec_purgatory_size
- || (pi->ehdr->e_shnum * sizeof(Elf_Shdr) >
- kexec_purgatory_size - pi->ehdr->e_shoff))
- return -ENOEXEC;
+ pi->ehdr = (const Elf_Ehdr *)kexec_purgatory;
- ret = __kexec_load_purgatory(image, min, max, top_down);
+ ret = kexec_purgatory_setup_kbuf(pi, kbuf);
if (ret)
return ret;
+ ret = kexec_purgatory_setup_sechdrs(pi, kbuf);
+ if (ret)
+ goto out_free_kbuf;
+
ret = kexec_apply_relocations(image);
if (ret)
goto out;
- *load_addr = pi->purgatory_load_addr;
return 0;
out:
vfree(pi->sechdrs);
pi->sechdrs = NULL;
-
+out_free_kbuf:
vfree(pi->purgatory_buf);
pi->purgatory_buf = NULL;
return ret;
}
-static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
- const char *name)
+/*
+ * kexec_purgatory_find_symbol - find a symbol in the purgatory
+ * @pi: Purgatory to search in.
+ * @name: Name of the symbol.
+ *
+ * Return: pointer to symbol in read-only symtab on success, NULL on error.
+ */
+static const Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
+ const char *name)
{
- Elf_Sym *syms;
- Elf_Shdr *sechdrs;
- Elf_Ehdr *ehdr;
- int i, k;
+ const Elf_Shdr *sechdrs;
+ const Elf_Ehdr *ehdr;
+ const Elf_Sym *syms;
const char *strtab;
+ int i, k;
- if (!pi->sechdrs || !pi->ehdr)
+ if (!pi->ehdr)
return NULL;
- sechdrs = pi->sechdrs;
ehdr = pi->ehdr;
+ sechdrs = (void *)ehdr + ehdr->e_shoff;
for (i = 0; i < ehdr->e_shnum; i++) {
if (sechdrs[i].sh_type != SHT_SYMTAB)
@@ -937,8 +972,8 @@ static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
if (sechdrs[i].sh_link >= ehdr->e_shnum)
/* Invalid strtab section number */
continue;
- strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset;
- syms = (Elf_Sym *)sechdrs[i].sh_offset;
+ strtab = (void *)ehdr + sechdrs[sechdrs[i].sh_link].sh_offset;
+ syms = (void *)ehdr + sechdrs[i].sh_offset;
/* Go through symbols for a match */
for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
@@ -966,7 +1001,7 @@ static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
{
struct purgatory_info *pi = &image->purgatory_info;
- Elf_Sym *sym;
+ const Elf_Sym *sym;
Elf_Shdr *sechdr;
sym = kexec_purgatory_find_symbol(pi, name);
@@ -989,9 +1024,9 @@ void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
void *buf, unsigned int size, bool get_value)
{
- Elf_Sym *sym;
- Elf_Shdr *sechdrs;
struct purgatory_info *pi = &image->purgatory_info;
+ const Elf_Sym *sym;
+ Elf_Shdr *sec;
char *sym_buf;
sym = kexec_purgatory_find_symbol(pi, name);
@@ -1004,16 +1039,15 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
return -EINVAL;
}
- sechdrs = pi->sechdrs;
+ sec = pi->sechdrs + sym->st_shndx;
- if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
+ if (sec->sh_type == SHT_NOBITS) {
pr_err("symbol %s is in a bss section. Cannot %s\n", name,
get_value ? "get" : "set");
return -EINVAL;
}
- sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset +
- sym->st_value;
+ sym_buf = (char *)pi->purgatory_buf + sec->sh_offset + sym->st_value;
if (get_value)
memcpy((void *)buf, sym_buf, size);
@@ -1022,3 +1056,174 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
return 0;
}
+#endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
+
+int crash_exclude_mem_range(struct crash_mem *mem,
+ unsigned long long mstart, unsigned long long mend)
+{
+ int i, j;
+ unsigned long long start, end;
+ struct crash_mem_range temp_range = {0, 0};
+
+ for (i = 0; i < mem->nr_ranges; i++) {
+ start = mem->ranges[i].start;
+ end = mem->ranges[i].end;
+
+ if (mstart > end || mend < start)
+ continue;
+
+ /* Truncate any area outside of range */
+ if (mstart < start)
+ mstart = start;
+ if (mend > end)
+ mend = end;
+
+ /* Found completely overlapping range */
+ if (mstart == start && mend == end) {
+ mem->ranges[i].start = 0;
+ mem->ranges[i].end = 0;
+ if (i < mem->nr_ranges - 1) {
+ /* Shift rest of the ranges to left */
+ for (j = i; j < mem->nr_ranges - 1; j++) {
+ mem->ranges[j].start =
+ mem->ranges[j+1].start;
+ mem->ranges[j].end =
+ mem->ranges[j+1].end;
+ }
+ }
+ mem->nr_ranges--;
+ return 0;
+ }
+
+ if (mstart > start && mend < end) {
+ /* Split original range */
+ mem->ranges[i].end = mstart - 1;
+ temp_range.start = mend + 1;
+ temp_range.end = end;
+ } else if (mstart != start)
+ mem->ranges[i].end = mstart - 1;
+ else
+ mem->ranges[i].start = mend + 1;
+ break;
+ }
+
+ /* If a split happened, add the split to array */
+ if (!temp_range.end)
+ return 0;
+
+ /* Split happened */
+ if (i == mem->max_nr_ranges - 1)
+ return -ENOMEM;
+
+ /* Location where new range should go */
+ j = i + 1;
+ if (j < mem->nr_ranges) {
+ /* Move over all ranges one slot towards the end */
+ for (i = mem->nr_ranges - 1; i >= j; i--)
+ mem->ranges[i + 1] = mem->ranges[i];
+ }
+
+ mem->ranges[j].start = temp_range.start;
+ mem->ranges[j].end = temp_range.end;
+ mem->nr_ranges++;
+ return 0;
+}
+
+int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
+ void **addr, unsigned long *sz)
+{
+ Elf64_Ehdr *ehdr;
+ Elf64_Phdr *phdr;
+ unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
+ unsigned char *buf;
+ unsigned int cpu, i;
+ unsigned long long notes_addr;
+ unsigned long mstart, mend;
+
+ /* extra phdr for vmcoreinfo elf note */
+ nr_phdr = nr_cpus + 1;
+ nr_phdr += mem->nr_ranges;
+
+ /*
+ * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
+ * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
+ * I think this is required by tools like gdb. So same physical
+ * memory will be mapped in two elf headers. One will contain kernel
+ * text virtual addresses and other will have __va(physical) addresses.
+ */
+
+ nr_phdr++;
+ elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
+ elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
+
+ buf = vzalloc(elf_sz);
+ if (!buf)
+ return -ENOMEM;
+
+ ehdr = (Elf64_Ehdr *)buf;
+ phdr = (Elf64_Phdr *)(ehdr + 1);
+ memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
+ ehdr->e_ident[EI_CLASS] = ELFCLASS64;
+ ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
+ ehdr->e_ident[EI_VERSION] = EV_CURRENT;
+ ehdr->e_ident[EI_OSABI] = ELF_OSABI;
+ memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
+ ehdr->e_type = ET_CORE;
+ ehdr->e_machine = ELF_ARCH;
+ ehdr->e_version = EV_CURRENT;
+ ehdr->e_phoff = sizeof(Elf64_Ehdr);
+ ehdr->e_ehsize = sizeof(Elf64_Ehdr);
+ ehdr->e_phentsize = sizeof(Elf64_Phdr);
+
+ /* Prepare one phdr of type PT_NOTE for each present cpu */
+ for_each_present_cpu(cpu) {
+ phdr->p_type = PT_NOTE;
+ notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
+ phdr->p_offset = phdr->p_paddr = notes_addr;
+ phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
+ (ehdr->e_phnum)++;
+ phdr++;
+ }
+
+ /* Prepare one PT_NOTE header for vmcoreinfo */
+ phdr->p_type = PT_NOTE;
+ phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
+ phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
+ (ehdr->e_phnum)++;
+ phdr++;
+
+ /* Prepare PT_LOAD type program header for kernel text region */
+ if (kernel_map) {
+ phdr->p_type = PT_LOAD;
+ phdr->p_flags = PF_R|PF_W|PF_X;
+ phdr->p_vaddr = (Elf64_Addr)_text;
+ phdr->p_filesz = phdr->p_memsz = _end - _text;
+ phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
+ ehdr->e_phnum++;
+ phdr++;
+ }
+
+ /* Go through all the ranges in mem->ranges[] and prepare phdr */
+ for (i = 0; i < mem->nr_ranges; i++) {
+ mstart = mem->ranges[i].start;
+ mend = mem->ranges[i].end;
+
+ phdr->p_type = PT_LOAD;
+ phdr->p_flags = PF_R|PF_W|PF_X;
+ phdr->p_offset = mstart;
+
+ phdr->p_paddr = mstart;
+ phdr->p_vaddr = (unsigned long long) __va(mstart);
+ phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
+ phdr->p_align = 0;
+ ehdr->e_phnum++;
+ phdr++;
+ pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
+ phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
+ ehdr->e_phnum, phdr->p_offset);
+ }
+
+ *addr = buf;
+ *sz = elf_sz;
+ return 0;
+}
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 9d7503910ce2..fa39092b7aea 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -295,6 +295,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
* changed
*/
plist_del(node, &c->list);
+ /* fall through */
case PM_QOS_ADD_REQ:
plist_node_init(node, new_value);
plist_add(node, &c->list);
@@ -367,6 +368,7 @@ bool pm_qos_update_flags(struct pm_qos_flags *pqf,
break;
case PM_QOS_UPDATE_REQ:
pm_qos_flags_remove_req(pqf, req);
+ /* fall through */
case PM_QOS_ADD_REQ:
req->flags = val;
INIT_LIST_HEAD(&req->node);
diff --git a/kernel/resource.c b/kernel/resource.c
index e270b5048988..2af6c03858b9 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -651,7 +651,8 @@ static int __find_resource(struct resource *root, struct resource *old,
alloc.start = constraint->alignf(constraint->alignf_data, &avail,
size, constraint->align);
alloc.end = alloc.start + size - 1;
- if (resource_contains(&avail, &alloc)) {
+ if (alloc.start <= alloc.end &&
+ resource_contains(&avail, &alloc)) {
new->start = alloc.start;
new->end = alloc.end;
return 0;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e8afd6086f23..5e10aaeebfcc 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
* this case, we can save a useless back to back clock update.
*/
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
- rq_clock_skip_update(rq, true);
+ rq_clock_skip_update(rq);
}
#ifdef CONFIG_SMP
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 2b124811947d..d2c6083304b4 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -631,10 +631,9 @@ fail:
stop_kthread:
sugov_kthread_stop(sg_policy);
-
-free_sg_policy:
mutex_unlock(&global_tunables_lock);
+free_sg_policy:
sugov_policy_free(sg_policy);
disable_fast_switch:
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index d1c7bf7c7e5b..e7b3008b85bb 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq)
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
- rq_clock_skip_update(rq, true);
+ rq_clock_skip_update(rq);
}
#ifdef CONFIG_SMP
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0951d1c58d2f..54dc31e7ab9b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq)
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
- rq_clock_skip_update(rq, true);
+ rq_clock_skip_update(rq);
}
set_skip_buddy(se);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 2975f195e1c4..1a3e9bddd17b 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -141,13 +141,15 @@ static void cpuidle_idle_call(void)
}
/*
- * Tell the RCU framework we are entering an idle section,
- * so no more rcu read side critical sections and one more
+ * The RCU framework needs to be told that we are entering an idle
+ * section, so no more rcu read side critical sections and one more
* step to the grace period
*/
- rcu_idle_enter();
if (cpuidle_not_available(drv, dev)) {
+ tick_nohz_idle_stop_tick();
+ rcu_idle_enter();
+
default_idle_call();
goto exit_idle;
}
@@ -164,20 +166,37 @@ static void cpuidle_idle_call(void)
if (idle_should_enter_s2idle() || dev->use_deepest_state) {
if (idle_should_enter_s2idle()) {
+ rcu_idle_enter();
+
entered_state = cpuidle_enter_s2idle(drv, dev);
if (entered_state > 0) {
local_irq_enable();
goto exit_idle;
}
+
+ rcu_idle_exit();
}
+ tick_nohz_idle_stop_tick();
+ rcu_idle_enter();
+
next_state = cpuidle_find_deepest_state(drv, dev);
call_cpuidle(drv, dev, next_state);
} else {
+ bool stop_tick = true;
+
/*
* Ask the cpuidle framework to choose a convenient idle state.
*/
- next_state = cpuidle_select(drv, dev);
+ next_state = cpuidle_select(drv, dev, &stop_tick);
+
+ if (stop_tick)
+ tick_nohz_idle_stop_tick();
+ else
+ tick_nohz_idle_retain_tick();
+
+ rcu_idle_enter();
+
entered_state = call_cpuidle(drv, dev, next_state);
/*
* Give the governor an opportunity to reflect on the outcome
@@ -222,6 +241,7 @@ static void do_idle(void)
rmb();
if (cpu_is_offline(cpu)) {
+ tick_nohz_idle_stop_tick_protected();
cpuhp_report_idle_dead();
arch_cpu_idle_dead();
}
@@ -235,10 +255,12 @@ static void do_idle(void)
* broadcast device expired for us, we don't want to go deep
* idle as we know that the IPI is going to arrive right away.
*/
- if (cpu_idle_force_poll || tick_check_broadcast_expired())
+ if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
+ tick_nohz_idle_restart_tick();
cpu_idle_poll();
- else
+ } else {
cpuidle_idle_call();
+ }
arch_cpu_idle_exit();
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 86b77987435e..7aef6b4e885a 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -839,6 +839,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
continue;
raw_spin_lock(&rq->lock);
+ update_rq_clock(rq);
+
if (rt_rq->rt_time) {
u64 runtime;
@@ -859,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
* 'runtime'.
*/
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
- rq_clock_skip_update(rq, false);
+ rq_clock_cancel_skipupdate(rq);
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c3deaee7a7a2..15750c222ca2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -976,13 +976,20 @@ static inline u64 rq_clock_task(struct rq *rq)
return rq->clock_task;
}
-static inline void rq_clock_skip_update(struct rq *rq, bool skip)
+static inline void rq_clock_skip_update(struct rq *rq)
{
lockdep_assert_held(&rq->lock);
- if (skip)
- rq->clock_update_flags |= RQCF_REQ_SKIP;
- else
- rq->clock_update_flags &= ~RQCF_REQ_SKIP;
+ rq->clock_update_flags |= RQCF_REQ_SKIP;
+}
+
+/*
+ * See rt task throttoling, which is the only time a skip
+ * request is cancelled.
+ */
+static inline void rq_clock_cancel_skipupdate(struct rq *rq)
+{
+ lockdep_assert_held(&rq->lock);
+ rq->clock_update_flags &= ~RQCF_REQ_SKIP;
}
struct rq_flags {
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 6cafc008f6db..9791364925dc 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -5,6 +5,11 @@
#include <asm/unistd.h>
+#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+/* Architectures may override COND_SYSCALL and COND_SYSCALL_COMPAT */
+#include <asm/syscall_wrapper.h>
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
/* we can't #include <linux/syscalls.h> here,
but tell gcc to not warn with -Wmissing-prototypes */
asmlinkage long sys_ni_syscall(void);
@@ -17,8 +22,13 @@ asmlinkage long sys_ni_syscall(void)
return -ENOSYS;
}
+#ifndef COND_SYSCALL
#define COND_SYSCALL(name) cond_syscall(sys_##name)
+#endif /* COND_SYSCALL */
+
+#ifndef COND_SYSCALL_COMPAT
#define COND_SYSCALL_COMPAT(name) cond_syscall(compat_sys_##name)
+#endif /* COND_SYSCALL_COMPAT */
/*
* This list is kept in the same order as include/uapi/asm-generic/unistd.h.
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 9b082ce86325..eda1210ce50f 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -480,6 +480,7 @@ __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
while ((base = __next_base((cpu_base), &(active))))
static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
+ const struct hrtimer *exclude,
unsigned int active,
ktime_t expires_next)
{
@@ -492,9 +493,22 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
next = timerqueue_getnext(&base->active);
timer = container_of(next, struct hrtimer, node);
+ if (timer == exclude) {
+ /* Get to the next timer in the queue. */
+ next = timerqueue_iterate_next(next);
+ if (!next)
+ continue;
+
+ timer = container_of(next, struct hrtimer, node);
+ }
expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
if (expires < expires_next) {
expires_next = expires;
+
+ /* Skip cpu_base update if a timer is being excluded. */
+ if (exclude)
+ continue;
+
if (timer->is_soft)
cpu_base->softirq_next_timer = timer;
else
@@ -538,7 +552,8 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
cpu_base->softirq_next_timer = NULL;
- expires_next = __hrtimer_next_event_base(cpu_base, active, KTIME_MAX);
+ expires_next = __hrtimer_next_event_base(cpu_base, NULL,
+ active, KTIME_MAX);
next_timer = cpu_base->softirq_next_timer;
}
@@ -546,7 +561,8 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
if (active_mask & HRTIMER_ACTIVE_HARD) {
active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
cpu_base->next_timer = next_timer;
- expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
+ expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
+ expires_next);
}
return expires_next;
@@ -1190,6 +1206,39 @@ u64 hrtimer_get_next_event(void)
return expires;
}
+
+/**
+ * hrtimer_next_event_without - time until next expiry event w/o one timer
+ * @exclude: timer to exclude
+ *
+ * Returns the next expiry time over all timers except for the @exclude one or
+ * KTIME_MAX if none of them is pending.
+ */
+u64 hrtimer_next_event_without(const struct hrtimer *exclude)
+{
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ u64 expires = KTIME_MAX;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+ if (__hrtimer_hres_active(cpu_base)) {
+ unsigned int active;
+
+ if (!cpu_base->softirq_activated) {
+ active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
+ expires = __hrtimer_next_event_base(cpu_base, exclude,
+ active, KTIME_MAX);
+ }
+ active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
+ expires = __hrtimer_next_event_base(cpu_base, exclude, active,
+ expires);
+ }
+
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+
+ return expires;
+}
#endif
static inline int hrtimer_clockid_to_base(clockid_t clock_id)
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 8d70da1b9a0d..a09ded765f6c 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -31,7 +31,7 @@
/* USER_HZ period (usecs): */
-unsigned long tick_usec = TICK_USEC;
+unsigned long tick_usec = USER_TICK_USEC;
/* SHIFTED_HZ period (nsecs): */
unsigned long tick_nsec;
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index 6259dbc0191a..e0dbae98db9d 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -19,6 +19,11 @@
#include <linux/posix-timers.h>
#include <linux/compat.h>
+#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+/* Architectures may override SYS_NI and COMPAT_SYS_NI */
+#include <asm/syscall_wrapper.h>
+#endif
+
asmlinkage long sys_ni_posix_timers(void)
{
pr_err_once("process %d (%s) attempted a POSIX timer syscall "
@@ -27,8 +32,13 @@ asmlinkage long sys_ni_posix_timers(void)
return -ENOSYS;
}
+#ifndef SYS_NI
#define SYS_NI(name) SYSCALL_ALIAS(sys_##name, sys_ni_posix_timers)
+#endif
+
+#ifndef COMPAT_SYS_NI
#define COMPAT_SYS_NI(name) SYSCALL_ALIAS(compat_sys_##name, sys_ni_posix_timers)
+#endif
SYS_NI(timer_create);
SYS_NI(timer_gettime);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f3ab08caa2c3..646645e981f9 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -122,8 +122,7 @@ static ktime_t tick_init_jiffy_update(void)
return period;
}
-
-static void tick_sched_do_timer(ktime_t now)
+static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
{
int cpu = smp_processor_id();
@@ -143,6 +142,9 @@ static void tick_sched_do_timer(ktime_t now)
/* Check, if the jiffies need an update */
if (tick_do_timer_cpu == cpu)
tick_do_update_jiffies64(now);
+
+ if (ts->inidle)
+ ts->got_idle_tick = 1;
}
static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
@@ -474,7 +476,9 @@ __setup("nohz=", setup_tick_nohz);
bool tick_nohz_tick_stopped(void)
{
- return __this_cpu_read(tick_cpu_sched.tick_stopped);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
+
+ return ts->tick_stopped;
}
bool tick_nohz_tick_stopped_cpu(int cpu)
@@ -537,14 +541,11 @@ static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
sched_clock_idle_wakeup_event();
}
-static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
+static void tick_nohz_start_idle(struct tick_sched *ts)
{
- ktime_t now = ktime_get();
-
- ts->idle_entrytime = now;
+ ts->idle_entrytime = ktime_get();
ts->idle_active = 1;
sched_clock_idle_sleep_event();
- return now;
}
/**
@@ -653,13 +654,10 @@ static inline bool local_timer_softirq_pending(void)
return local_softirq_pending() & TIMER_SOFTIRQ;
}
-static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
- ktime_t now, int cpu)
+static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
{
- struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
unsigned long seq, basejiff;
- ktime_t tick;
/* Read jiffies and the time when jiffies were updated last */
do {
@@ -668,6 +666,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
basejiff = jiffies;
} while (read_seqretry(&jiffies_lock, seq));
ts->last_jiffies = basejiff;
+ ts->timer_expires_base = basemono;
/*
* Keep the periodic tick, when RCU, architecture or irq_work
@@ -712,47 +711,63 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
* next period, so no point in stopping it either, bail.
*/
if (!ts->tick_stopped) {
- tick = 0;
+ ts->timer_expires = 0;
goto out;
}
}
/*
+ * If this CPU is the one which had the do_timer() duty last, we limit
+ * the sleep time to the timekeeping max_deferment value.
+ * Otherwise we can sleep as long as we want.
+ */
+ delta = timekeeping_max_deferment();
+ if (cpu != tick_do_timer_cpu &&
+ (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last))
+ delta = KTIME_MAX;
+
+ /* Calculate the next expiry time */
+ if (delta < (KTIME_MAX - basemono))
+ expires = basemono + delta;
+ else
+ expires = KTIME_MAX;
+
+ ts->timer_expires = min_t(u64, expires, next_tick);
+
+out:
+ return ts->timer_expires;
+}
+
+static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
+{
+ struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
+ u64 basemono = ts->timer_expires_base;
+ u64 expires = ts->timer_expires;
+ ktime_t tick = expires;
+
+ /* Make sure we won't be trying to stop it twice in a row. */
+ ts->timer_expires_base = 0;
+
+ /*
* If this CPU is the one which updates jiffies, then give up
* the assignment and let it be taken by the CPU which runs
* the tick timer next, which might be this CPU as well. If we
* don't drop this here the jiffies might be stale and
* do_timer() never invoked. Keep track of the fact that it
- * was the one which had the do_timer() duty last. If this CPU
- * is the one which had the do_timer() duty last, we limit the
- * sleep time to the timekeeping max_deferment value.
- * Otherwise we can sleep as long as we want.
+ * was the one which had the do_timer() duty last.
*/
- delta = timekeeping_max_deferment();
if (cpu == tick_do_timer_cpu) {
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
ts->do_timer_last = 1;
} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
- delta = KTIME_MAX;
ts->do_timer_last = 0;
- } else if (!ts->do_timer_last) {
- delta = KTIME_MAX;
}
- /* Calculate the next expiry time */
- if (delta < (KTIME_MAX - basemono))
- expires = basemono + delta;
- else
- expires = KTIME_MAX;
-
- expires = min_t(u64, expires, next_tick);
- tick = expires;
-
/* Skip reprogram of event if its not changed */
if (ts->tick_stopped && (expires == ts->next_tick)) {
/* Sanity check: make sure clockevent is actually programmed */
if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
- goto out;
+ return;
WARN_ON_ONCE(1);
printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
@@ -786,7 +801,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
if (unlikely(expires == KTIME_MAX)) {
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
hrtimer_cancel(&ts->sched_timer);
- goto out;
+ return;
}
hrtimer_set_expires(&ts->sched_timer, tick);
@@ -795,15 +810,23 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
else
tick_program_event(tick, 1);
-out:
- /*
- * Update the estimated sleep length until the next timer
- * (not only the tick).
- */
- ts->sleep_length = ktime_sub(dev->next_event, now);
- return tick;
}
+static void tick_nohz_retain_tick(struct tick_sched *ts)
+{
+ ts->timer_expires_base = 0;
+}
+
+#ifdef CONFIG_NO_HZ_FULL
+static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu)
+{
+ if (tick_nohz_next_event(ts, cpu))
+ tick_nohz_stop_tick(ts, cpu);
+ else
+ tick_nohz_retain_tick(ts);
+}
+#endif /* CONFIG_NO_HZ_FULL */
+
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
{
/* Update jiffies first */
@@ -839,7 +862,7 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts)
return;
if (can_stop_full_tick(cpu, ts))
- tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
+ tick_nohz_stop_sched_tick(ts, cpu);
else if (ts->tick_stopped)
tick_nohz_restart_sched_tick(ts, ktime_get());
#endif
@@ -865,10 +888,8 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
return false;
}
- if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
- ts->sleep_length = NSEC_PER_SEC / HZ;
+ if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
return false;
- }
if (need_resched())
return false;
@@ -903,42 +924,65 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
return true;
}
-static void __tick_nohz_idle_enter(struct tick_sched *ts)
+static void __tick_nohz_idle_stop_tick(struct tick_sched *ts)
{
- ktime_t now, expires;
+ ktime_t expires;
int cpu = smp_processor_id();
- now = tick_nohz_start_idle(ts);
+ /*
+ * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the
+ * tick timer expiration time is known already.
+ */
+ if (ts->timer_expires_base)
+ expires = ts->timer_expires;
+ else if (can_stop_idle_tick(cpu, ts))
+ expires = tick_nohz_next_event(ts, cpu);
+ else
+ return;
+
+ ts->idle_calls++;
- if (can_stop_idle_tick(cpu, ts)) {
+ if (expires > 0LL) {
int was_stopped = ts->tick_stopped;
- ts->idle_calls++;
+ tick_nohz_stop_tick(ts, cpu);
- expires = tick_nohz_stop_sched_tick(ts, now, cpu);
- if (expires > 0LL) {
- ts->idle_sleeps++;
- ts->idle_expires = expires;
- }
+ ts->idle_sleeps++;
+ ts->idle_expires = expires;
if (!was_stopped && ts->tick_stopped) {
ts->idle_jiffies = ts->last_jiffies;
nohz_balance_enter_idle(cpu);
}
+ } else {
+ tick_nohz_retain_tick(ts);
}
}
/**
- * tick_nohz_idle_enter - stop the idle tick from the idle task
+ * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
*
* When the next event is more than a tick into the future, stop the idle tick
- * Called when we start the idle loop.
- *
- * The arch is responsible of calling:
+ */
+void tick_nohz_idle_stop_tick(void)
+{
+ __tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched));
+}
+
+void tick_nohz_idle_retain_tick(void)
+{
+ tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched));
+ /*
+ * Undo the effect of get_next_timer_interrupt() called from
+ * tick_nohz_next_event().
+ */
+ timer_clear_idle();
+}
+
+/**
+ * tick_nohz_idle_enter - prepare for entering idle on the current CPU
*
- * - rcu_idle_enter() after its last use of RCU before the CPU is put
- * to sleep.
- * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
+ * Called when we start the idle loop.
*/
void tick_nohz_idle_enter(void)
{
@@ -949,8 +993,11 @@ void tick_nohz_idle_enter(void)
local_irq_disable();
ts = this_cpu_ptr(&tick_cpu_sched);
+
+ WARN_ON_ONCE(ts->timer_expires_base);
+
ts->inidle = 1;
- __tick_nohz_idle_enter(ts);
+ tick_nohz_start_idle(ts);
local_irq_enable();
}
@@ -968,21 +1015,62 @@ void tick_nohz_irq_exit(void)
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (ts->inidle)
- __tick_nohz_idle_enter(ts);
+ tick_nohz_start_idle(ts);
else
tick_nohz_full_update_tick(ts);
}
/**
- * tick_nohz_get_sleep_length - return the length of the current sleep
+ * tick_nohz_idle_got_tick - Check whether or not the tick handler has run
+ */
+bool tick_nohz_idle_got_tick(void)
+{
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
+
+ if (ts->got_idle_tick) {
+ ts->got_idle_tick = 0;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tick_nohz_get_sleep_length - return the expected length of the current sleep
+ * @delta_next: duration until the next event if the tick cannot be stopped
*
* Called from power state control code with interrupts disabled
*/
-ktime_t tick_nohz_get_sleep_length(void)
+ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
{
+ struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
+ int cpu = smp_processor_id();
+ /*
+ * The idle entry time is expected to be a sufficient approximation of
+ * the current time at this point.
+ */
+ ktime_t now = ts->idle_entrytime;
+ ktime_t next_event;
+
+ WARN_ON_ONCE(!ts->inidle);
+
+ *delta_next = ktime_sub(dev->next_event, now);
+
+ if (!can_stop_idle_tick(cpu, ts))
+ return *delta_next;
+
+ next_event = tick_nohz_next_event(ts, cpu);
+ if (!next_event)
+ return *delta_next;
+
+ /*
+ * If the next highres timer to expire is earlier than next_event, the
+ * idle governor needs to know that.
+ */
+ next_event = min_t(u64, next_event,
+ hrtimer_next_event_without(&ts->sched_timer));
- return ts->sleep_length;
+ return ktime_sub(next_event, now);
}
/**
@@ -1031,6 +1119,20 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
#endif
}
+static void __tick_nohz_idle_restart_tick(struct tick_sched *ts, ktime_t now)
+{
+ tick_nohz_restart_sched_tick(ts, now);
+ tick_nohz_account_idle_ticks(ts);
+}
+
+void tick_nohz_idle_restart_tick(void)
+{
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
+
+ if (ts->tick_stopped)
+ __tick_nohz_idle_restart_tick(ts, ktime_get());
+}
+
/**
* tick_nohz_idle_exit - restart the idle tick from the idle task
*
@@ -1041,24 +1143,26 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
void tick_nohz_idle_exit(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
+ bool idle_active, tick_stopped;
ktime_t now;
local_irq_disable();
WARN_ON_ONCE(!ts->inidle);
+ WARN_ON_ONCE(ts->timer_expires_base);
ts->inidle = 0;
+ idle_active = ts->idle_active;
+ tick_stopped = ts->tick_stopped;
- if (ts->idle_active || ts->tick_stopped)
+ if (idle_active || tick_stopped)
now = ktime_get();
- if (ts->idle_active)
+ if (idle_active)
tick_nohz_stop_idle(ts, now);
- if (ts->tick_stopped) {
- tick_nohz_restart_sched_tick(ts, now);
- tick_nohz_account_idle_ticks(ts);
- }
+ if (tick_stopped)
+ __tick_nohz_idle_restart_tick(ts, now);
local_irq_enable();
}
@@ -1074,7 +1178,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
dev->next_event = KTIME_MAX;
- tick_sched_do_timer(now);
+ tick_sched_do_timer(ts, now);
tick_sched_handle(ts, regs);
/* No need to reprogram if we are running tickless */
@@ -1169,7 +1273,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
struct pt_regs *regs = get_irq_regs();
ktime_t now = ktime_get();
- tick_sched_do_timer(now);
+ tick_sched_do_timer(ts, now);
/*
* Do not call, when we are not in irq context and have
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index 954b43dbf21c..6de959a854b2 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -38,31 +38,37 @@ enum tick_nohz_mode {
* @idle_exittime: Time when the idle state was left
* @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
* @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
- * @sleep_length: Duration of the current idle sleep
+ * @timer_expires: Anticipated timer expiration time (in case sched tick is stopped)
+ * @timer_expires_base: Base time clock monotonic for @timer_expires
* @do_timer_lst: CPU was the last one doing do_timer before going idle
+ * @got_idle_tick: Tick timer function has run with @inidle set
*/
struct tick_sched {
struct hrtimer sched_timer;
unsigned long check_clocks;
enum tick_nohz_mode nohz_mode;
+
+ unsigned int inidle : 1;
+ unsigned int tick_stopped : 1;
+ unsigned int idle_active : 1;
+ unsigned int do_timer_last : 1;
+ unsigned int got_idle_tick : 1;
+
ktime_t last_tick;
ktime_t next_tick;
- int inidle;
- int tick_stopped;
unsigned long idle_jiffies;
unsigned long idle_calls;
unsigned long idle_sleeps;
- int idle_active;
ktime_t idle_entrytime;
ktime_t idle_waketime;
ktime_t idle_exittime;
ktime_t idle_sleeptime;
ktime_t iowait_sleeptime;
- ktime_t sleep_length;
unsigned long last_jiffies;
+ u64 timer_expires;
+ u64 timer_expires_base;
u64 next_timer;
ktime_t idle_expires;
- int do_timer_last;
atomic_t tick_dep_mask;
};
diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h
index fdbeeb02dde9..cf5c0828ee31 100644
--- a/kernel/time/timekeeping_internal.h
+++ b/kernel/time/timekeeping_internal.h
@@ -31,6 +31,4 @@ static inline u64 clocksource_delta(u64 now, u64 last, u64 mask)
}
#endif
-extern time64_t __ktime_get_real_seconds(void);
-
#endif /* _TIMEKEEPING_INTERNAL_H */
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 2c416509b834..c79193e598f5 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -252,6 +252,8 @@ int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
ret = strncpy_from_user(
func, u64_to_user_ptr(p_event->attr.kprobe_func),
KSYM_NAME_LEN);
+ if (ret == KSYM_NAME_LEN)
+ ret = -E2BIG;
if (ret < 0)
goto out;
@@ -300,6 +302,8 @@ int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe)
return -ENOMEM;
ret = strncpy_from_user(
path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
+ if (ret == PATH_MAX)
+ return -E2BIG;
if (ret < 0)
goto out;
if (path[0] == '\0') {
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 1bda4ec95e18..9b4716bb8bb0 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1704,18 +1704,16 @@ static int create_filter(struct trace_event_call *call,
struct event_filter **filterp)
{
struct filter_parse_error *pe = NULL;
- struct event_filter *filter = NULL;
int err;
- err = create_filter_start(filter_string, set_str, &pe, &filter);
+ err = create_filter_start(filter_string, set_str, &pe, filterp);
if (err)
return err;
- err = process_preds(call, filter_string, filter, pe);
+ err = process_preds(call, filter_string, *filterp, pe);
if (err && set_str)
- append_filter_err(pe, filter);
+ append_filter_err(pe, *filterp);
- *filterp = filter;
return err;
}
@@ -1739,24 +1737,22 @@ static int create_system_filter(struct trace_subsystem_dir *dir,
struct trace_array *tr,
char *filter_str, struct event_filter **filterp)
{
- struct event_filter *filter = NULL;
struct filter_parse_error *pe = NULL;
int err;
- err = create_filter_start(filter_str, true, &pe, &filter);
+ err = create_filter_start(filter_str, true, &pe, filterp);
if (!err) {
err = process_system_preds(dir, tr, pe, filter_str);
if (!err) {
/* System filters just show a default message */
- kfree(filter->filter_string);
- filter->filter_string = NULL;
+ kfree((*filterp)->filter_string);
+ (*filterp)->filter_string = NULL;
} else {
- append_filter_err(pe, filter);
+ append_filter_err(pe, *filterp);
}
}
create_filter_finish(pe);
- *filterp = filter;
return err;
}
@@ -1764,7 +1760,7 @@ static int create_system_filter(struct trace_subsystem_dir *dir,
int apply_event_filter(struct trace_event_file *file, char *filter_string)
{
struct trace_event_call *call = file->event_call;
- struct event_filter *filter;
+ struct event_filter *filter = NULL;
int err;
if (!strcmp(strstrip(filter_string), "0")) {
@@ -1817,7 +1813,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
{
struct event_subsystem *system = dir->subsystem;
struct trace_array *tr = dir->tr;
- struct event_filter *filter;
+ struct event_filter *filter = NULL;
int err = 0;
mutex_lock(&event_mutex);
@@ -2024,7 +2020,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str)
{
int err;
- struct event_filter *filter;
+ struct event_filter *filter = NULL;
struct trace_event_call *call;
mutex_lock(&event_mutex);
@@ -2140,7 +2136,7 @@ static struct test_filter_data_t {
#undef YES
#undef NO
-#define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t))
+#define DATA_CNT ARRAY_SIZE(test_filter_data)
static int test_pred_visited;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 2014f4351ae0..34fd0e0ec51d 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -151,6 +151,8 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
return;
ret = strncpy_from_user(dst, src, maxlen);
+ if (ret == maxlen)
+ dst[--ret] = '\0';
if (ret < 0) { /* Failed to fetch string */
((u8 *)get_rloc_data(dest))[0] = '\0';
@@ -446,7 +448,7 @@ static int create_trace_uprobe(int argc, char **argv)
if (ret)
goto fail_address_parse;
- inode = igrab(d_inode(path.dentry));
+ inode = igrab(d_real_inode(path.dentry));
path_put(&path);
if (!inode || !S_ISREG(inode->i_mode)) {
@@ -602,24 +604,9 @@ static int probes_seq_show(struct seq_file *m, void *v)
char c = is_ret_probe(tu) ? 'r' : 'p';
int i;
- seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
- trace_event_name(&tu->tp.call));
- seq_printf(m, " %s:", tu->filename);
-
- /* Don't print "0x (null)" when offset is 0 */
- if (tu->offset) {
- seq_printf(m, "0x%px", (void *)tu->offset);
- } else {
- switch (sizeof(void *)) {
- case 4:
- seq_printf(m, "0x00000000");
- break;
- case 8:
- default:
- seq_printf(m, "0x0000000000000000");
- break;
- }
- }
+ seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
+ trace_event_name(&tu->tp.call), tu->filename,
+ (int)(sizeof(void *) * 2), tu->offset);
for (i = 0; i < tu->tp.nr_args; i++)
seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
diff --git a/lib/lockref.c b/lib/lockref.c
index 47169ed7e964..3d468b53d4c9 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -81,6 +81,34 @@ int lockref_get_not_zero(struct lockref *lockref)
EXPORT_SYMBOL(lockref_get_not_zero);
/**
+ * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
+ * @lockref: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count would become zero
+ */
+int lockref_put_not_zero(struct lockref *lockref)
+{
+ int retval;
+
+ CMPXCHG_LOOP(
+ new.count--;
+ if (old.count <= 1)
+ return 0;
+ ,
+ return 1;
+ );
+
+ spin_lock(&lockref->lock);
+ retval = 0;
+ if (lockref->count > 1) {
+ lockref->count--;
+ retval = 1;
+ }
+ spin_unlock(&lockref->lock);
+ return retval;
+}
+EXPORT_SYMBOL(lockref_put_not_zero);
+
+/**
* lockref_get_or_lock - Increments count unless the count is 0 or dead
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count was zero
diff --git a/arch/x86/purgatory/sha256.c b/lib/sha256.c
index 548ca675a14a..4400c832e2aa 100644
--- a/arch/x86/purgatory/sha256.c
+++ b/lib/sha256.c
@@ -16,9 +16,9 @@
*/
#include <linux/bitops.h>
+#include <linux/sha256.h>
+#include <linux/string.h>
#include <asm/byteorder.h>
-#include "sha256.h"
-#include "../boot/string.h"
static inline u32 Ch(u32 x, u32 y, u32 z)
{
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 47aeb04c1997..fece57566d45 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -719,7 +719,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
goto out_warn;
*dma_handle = __phys_to_dma(dev, phys_addr);
- if (dma_coherent_ok(dev, *dma_handle, size))
+ if (!dma_coherent_ok(dev, *dma_handle, size))
goto out_unmap;
memset(phys_to_virt(phys_addr), 0, size);
@@ -1087,6 +1087,6 @@ const struct dma_map_ops swiotlb_dma_ops = {
.unmap_sg = swiotlb_unmap_sg_attrs,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
- .dma_supported = swiotlb_dma_supported,
+ .dma_supported = dma_direct_supported,
};
#endif /* CONFIG_DMA_DIRECT_OPS */
diff --git a/mm/filemap.c b/mm/filemap.c
index ab77e19ab09c..9276bdb2343c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2719,7 +2719,6 @@ out:
sb_end_pagefault(inode->i_sb);
return ret;
}
-EXPORT_SYMBOL(filemap_page_mkwrite);
const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault,
@@ -2750,6 +2749,10 @@ int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
return generic_file_mmap(file, vma);
}
#else
+int filemap_page_mkwrite(struct vm_fault *vmf)
+{
+ return -ENOSYS;
+}
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
{
return -ENOSYS;
@@ -2760,6 +2763,7 @@ int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
}
#endif /* CONFIG_MMU */
+EXPORT_SYMBOL(filemap_page_mkwrite);
EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_file_readonly_mmap);
diff --git a/mm/gup.c b/mm/gup.c
index f296df6cf666..76af4cfeaf68 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1740,7 +1740,9 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
/*
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
- * the regular GUP. It will only return non-negative values.
+ * the regular GUP.
+ * Note a difference with get_user_pages_fast: this always returns the
+ * number of pages pinned, 0 if no pages were pinned.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
@@ -1806,9 +1808,12 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
+ if (nr_pages <= 0)
+ return 0;
+
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(void __user *)start, len)))
- return 0;
+ return -EFAULT;
if (gup_fast_permitted(start, nr_pages, write)) {
local_irq_disable();
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
index 5c8e2abeaa15..0f44759486e2 100644
--- a/mm/gup_benchmark.c
+++ b/mm/gup_benchmark.c
@@ -23,7 +23,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
struct page **pages;
nr_pages = gup->size / PAGE_SIZE;
- pages = kvmalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
+ pages = kvzalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
if (!pages)
return -ENOMEM;
@@ -41,6 +41,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
}
nr = get_user_pages_fast(addr, nr, gup->flags & 1, pages + i);
+ if (nr <= 0)
+ break;
i += nr;
}
end_time = ktime_get();
diff --git a/mm/slab.c b/mm/slab.c
index e3a9b8e23306..2f308253c3d7 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4086,7 +4086,8 @@ next:
next_reap_node();
out:
/* Set up the next iteration */
- schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
+ schedule_delayed_work_on(smp_processor_id(), work,
+ round_jiffies_relative(REAPTIMEOUT_AC));
}
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
diff --git a/mm/util.c b/mm/util.c
index 1fc4fa7576f7..45fc3169e7b0 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -297,8 +297,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
/*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP.
- * If the architecture not support this function, simply return with no
- * page pinned
+ * Note a difference with get_user_pages_fast: this always returns the
+ * number of pages pinned, 0 if no pages were pinned.
+ * If the architecture does not support this function, simply return with no
+ * pages pinned.
*/
int __weak __get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index a8772a978224..9c169bb2444d 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -781,8 +781,14 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
tunnel->encap.type == TUNNEL_ENCAP_NONE) {
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ } else {
+ dev->features &= ~NETIF_F_GSO_SOFTWARE;
+ dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
}
dev->features |= NETIF_F_LLTX;
+ } else {
+ dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
+ dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
}
}
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 7523ddb2566b..0e5edd0c7926 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -28,10 +28,9 @@ obj-$(CONFIG_NF_REJECT_IPV4) += nf_reject_ipv4.o
obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
-nf_nat_snmp_basic-y := nf_nat_snmp_basic-asn1.o nf_nat_snmp_basic_main.o
-$(obj)/nf_nat_snmp_basic_main.o: $(obj)/nf_nat_snmp_basic-asn1.h
+nf_nat_snmp_basic-y := nf_nat_snmp_basic.asn1.o nf_nat_snmp_basic_main.o
+$(obj)/nf_nat_snmp_basic_main.o: $(obj)/nf_nat_snmp_basic.asn1.h
obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
-clean-files := nf_nat_snmp_basic-asn1.c nf_nat_snmp_basic-asn1.h
obj-$(CONFIG_NF_NAT_MASQUERADE_IPV4) += nf_nat_masquerade_ipv4.o
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
index b6e277093e7e..ac110c1d55b5 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
@@ -54,7 +54,7 @@
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <linux/netfilter/nf_conntrack_snmp.h>
-#include "nf_nat_snmp_basic-asn1.h"
+#include "nf_nat_snmp_basic.asn1.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 14b67dfacc4b..0fbd3ee26165 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -335,26 +335,6 @@ err_tlock:
}
EXPORT_SYMBOL_GPL(l2tp_session_register);
-/* Lookup a tunnel by id
- */
-struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id)
-{
- struct l2tp_tunnel *tunnel;
- struct l2tp_net *pn = l2tp_pernet(net);
-
- rcu_read_lock_bh();
- list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
- if (tunnel->tunnel_id == tunnel_id) {
- rcu_read_unlock_bh();
- return tunnel;
- }
- }
- rcu_read_unlock_bh();
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
-
struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
{
struct l2tp_net *pn = l2tp_pernet(net);
@@ -1436,74 +1416,11 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
{
struct l2tp_tunnel *tunnel = NULL;
int err;
- struct socket *sock = NULL;
- struct sock *sk = NULL;
- struct l2tp_net *pn;
enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
- /* Get the tunnel socket from the fd, which was opened by
- * the userspace L2TP daemon. If not specified, create a
- * kernel socket.
- */
- if (fd < 0) {
- err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
- cfg, &sock);
- if (err < 0)
- goto err;
- } else {
- sock = sockfd_lookup(fd, &err);
- if (!sock) {
- pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
- tunnel_id, fd, err);
- err = -EBADF;
- goto err;
- }
-
- /* Reject namespace mismatches */
- if (!net_eq(sock_net(sock->sk), net)) {
- pr_err("tunl %u: netns mismatch\n", tunnel_id);
- err = -EINVAL;
- goto err;
- }
- }
-
- sk = sock->sk;
-
if (cfg != NULL)
encap = cfg->encap;
- /* Quick sanity checks */
- err = -EPROTONOSUPPORT;
- if (sk->sk_type != SOCK_DGRAM) {
- pr_debug("tunl %hu: fd %d wrong socket type\n",
- tunnel_id, fd);
- goto err;
- }
- switch (encap) {
- case L2TP_ENCAPTYPE_UDP:
- if (sk->sk_protocol != IPPROTO_UDP) {
- pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
- tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
- goto err;
- }
- break;
- case L2TP_ENCAPTYPE_IP:
- if (sk->sk_protocol != IPPROTO_L2TP) {
- pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
- tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
- goto err;
- }
- break;
- }
-
- /* Check if this socket has already been prepped */
- tunnel = l2tp_tunnel(sk);
- if (tunnel != NULL) {
- /* This socket has already been prepped */
- err = -EBUSY;
- goto err;
- }
-
tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
if (tunnel == NULL) {
err = -ENOMEM;
@@ -1520,72 +1437,126 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
rwlock_init(&tunnel->hlist_lock);
tunnel->acpt_newsess = true;
- /* The net we belong to */
- tunnel->l2tp_net = net;
- pn = l2tp_pernet(net);
-
if (cfg != NULL)
tunnel->debug = cfg->debug;
- /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
tunnel->encap = encap;
- if (encap == L2TP_ENCAPTYPE_UDP) {
- struct udp_tunnel_sock_cfg udp_cfg = { };
-
- udp_cfg.sk_user_data = tunnel;
- udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
- udp_cfg.encap_rcv = l2tp_udp_encap_recv;
- udp_cfg.encap_destroy = l2tp_udp_encap_destroy;
-
- setup_udp_tunnel_sock(net, sock, &udp_cfg);
- } else {
- sk->sk_user_data = tunnel;
- }
- /* Bump the reference count. The tunnel context is deleted
- * only when this drops to zero. A reference is also held on
- * the tunnel socket to ensure that it is not released while
- * the tunnel is extant. Must be done before sk_destruct is
- * set.
- */
refcount_set(&tunnel->ref_count, 1);
- sock_hold(sk);
- tunnel->sock = sk;
tunnel->fd = fd;
- /* Hook on the tunnel socket destructor so that we can cleanup
- * if the tunnel socket goes away.
- */
- tunnel->old_sk_destruct = sk->sk_destruct;
- sk->sk_destruct = &l2tp_tunnel_destruct;
- lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
-
- sk->sk_allocation = GFP_ATOMIC;
-
/* Init delete workqueue struct */
INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
- /* Add tunnel to our list */
INIT_LIST_HEAD(&tunnel->list);
- spin_lock_bh(&pn->l2tp_tunnel_list_lock);
- list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
- spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
err = 0;
err:
if (tunnelp)
*tunnelp = tunnel;
- /* If tunnel's socket was created by the kernel, it doesn't
- * have a file.
- */
- if (sock && sock->file)
- sockfd_put(sock);
-
return err;
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
+static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
+ enum l2tp_encap_type encap)
+{
+ if (!net_eq(sock_net(sk), net))
+ return -EINVAL;
+
+ if (sk->sk_type != SOCK_DGRAM)
+ return -EPROTONOSUPPORT;
+
+ if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
+ (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
+ return -EPROTONOSUPPORT;
+
+ if (sk->sk_user_data)
+ return -EBUSY;
+
+ return 0;
+}
+
+int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ struct l2tp_tunnel_cfg *cfg)
+{
+ struct l2tp_tunnel *tunnel_walk;
+ struct l2tp_net *pn;
+ struct socket *sock;
+ struct sock *sk;
+ int ret;
+
+ if (tunnel->fd < 0) {
+ ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
+ tunnel->peer_tunnel_id, cfg,
+ &sock);
+ if (ret < 0)
+ goto err;
+ } else {
+ sock = sockfd_lookup(tunnel->fd, &ret);
+ if (!sock)
+ goto err;
+
+ ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
+ if (ret < 0)
+ goto err_sock;
+ }
+
+ sk = sock->sk;
+
+ sock_hold(sk);
+ tunnel->sock = sk;
+ tunnel->l2tp_net = net;
+
+ pn = l2tp_pernet(net);
+
+ spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+ list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
+ if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+
+ ret = -EEXIST;
+ goto err_sock;
+ }
+ }
+ list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+
+ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
+ struct udp_tunnel_sock_cfg udp_cfg = {
+ .sk_user_data = tunnel,
+ .encap_type = UDP_ENCAP_L2TPINUDP,
+ .encap_rcv = l2tp_udp_encap_recv,
+ .encap_destroy = l2tp_udp_encap_destroy,
+ };
+
+ setup_udp_tunnel_sock(net, sock, &udp_cfg);
+ } else {
+ sk->sk_user_data = tunnel;
+ }
+
+ tunnel->old_sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = &l2tp_tunnel_destruct;
+ lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
+ "l2tp_sock");
+ sk->sk_allocation = GFP_ATOMIC;
+
+ if (tunnel->fd >= 0)
+ sockfd_put(sock);
+
+ return 0;
+
+err_sock:
+ if (tunnel->fd < 0)
+ sock_release(sock);
+ else
+ sockfd_put(sock);
+err:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
+
/* This function is used by the netlink TUNNEL_DELETE command.
*/
void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 2718d0b284d0..ba33cbec71eb 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -220,12 +220,14 @@ struct l2tp_session *l2tp_session_get(const struct net *net,
struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
const char *ifname);
-struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id);
struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth);
int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
struct l2tp_tunnel **tunnelp);
+int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
+ struct l2tp_tunnel_cfg *cfg);
+
void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
struct l2tp_session *l2tp_session_create(int priv_size,
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index e7ea9c4b89ff..b05dbd9ffcb2 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -236,12 +236,6 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
if (info->attrs[L2TP_ATTR_DEBUG])
cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel != NULL) {
- ret = -EEXIST;
- goto out;
- }
-
ret = -EINVAL;
switch (cfg.encap) {
case L2TP_ENCAPTYPE_UDP:
@@ -251,9 +245,19 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
break;
}
- if (ret >= 0)
- ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
- tunnel, L2TP_CMD_TUNNEL_CREATE);
+ if (ret < 0)
+ goto out;
+
+ l2tp_tunnel_inc_refcount(tunnel);
+ ret = l2tp_tunnel_register(tunnel, net, &cfg);
+ if (ret < 0) {
+ kfree(tunnel);
+ goto out;
+ }
+ ret = l2tp_tunnel_notify(&l2tp_nl_family, info, tunnel,
+ L2TP_CMD_TUNNEL_CREATE);
+ l2tp_tunnel_dec_refcount(tunnel);
+
out:
return ret;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index d6deca11da19..896bbca9bdaa 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -698,6 +698,15 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
if (error < 0)
goto end;
+
+ l2tp_tunnel_inc_refcount(tunnel);
+ error = l2tp_tunnel_register(tunnel, sock_net(sk),
+ &tcfg);
+ if (error < 0) {
+ kfree(tunnel);
+ goto end;
+ }
+ drop_tunnel = true;
}
} else {
/* Error if we can't find the tunnel */
diff --git a/net/rds/send.c b/net/rds/send.c
index acad04243b41..94c7f74909be 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Oracle. All rights reserved.
+ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -1017,10 +1017,15 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
if (conn->c_npaths == 0 && hash != 0) {
rds_send_ping(conn, 0);
- if (conn->c_npaths == 0) {
- wait_event_interruptible(conn->c_hs_waitq,
- (conn->c_npaths != 0));
- }
+ /* The underlying connection is not up yet. Need to wait
+ * until it is up to be sure that the non-zero c_path can be
+ * used. But if we are interrupted, we have to use the zero
+ * c_path in case the connection ends up being non-MP capable.
+ */
+ if (conn->c_npaths == 0)
+ if (wait_event_interruptible(conn->c_hs_waitq,
+ conn->c_npaths != 0))
+ hash = 0;
if (conn->c_npaths == 1)
hash = 0;
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 806395687bb6..c2266f387213 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1887,7 +1887,7 @@ call_connect_status(struct rpc_task *task)
dprint_status(task);
- trace_rpc_connect_status(task, status);
+ trace_rpc_connect_status(task);
task->tk_status = 0;
switch (status) {
case -ECONNREFUSED:
@@ -2014,6 +2014,9 @@ call_transmit_status(struct rpc_task *task)
case -EPERM:
if (RPC_IS_SOFTCONN(task)) {
xprt_end_transmit(task);
+ if (!task->tk_msg.rpc_proc->p_proc)
+ trace_xprt_ping(task->tk_xprt,
+ task->tk_status);
rpc_exit(task, task->tk_status);
break;
}
@@ -2112,6 +2115,9 @@ call_status(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
int status;
+ if (!task->tk_msg.rpc_proc->p_proc)
+ trace_xprt_ping(task->tk_xprt, task->tk_status);
+
if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
task->tk_status = req->rq_reply_bytes_recvd;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index d9db2eab3a8d..3fe5d60ab0e2 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -276,7 +276,7 @@ static void rpc_set_active(struct rpc_task *task)
{
rpc_task_set_debuginfo(task);
set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
- trace_rpc_task_begin(task->tk_client, task, NULL);
+ trace_rpc_task_begin(task, NULL);
}
/*
@@ -291,7 +291,7 @@ static int rpc_complete_task(struct rpc_task *task)
unsigned long flags;
int ret;
- trace_rpc_task_complete(task->tk_client, task, NULL);
+ trace_rpc_task_complete(task, NULL);
spin_lock_irqsave(&wq->lock, flags);
clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
@@ -358,7 +358,7 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
task->tk_pid, rpc_qname(q), jiffies);
- trace_rpc_task_sleep(task->tk_client, task, q);
+ trace_rpc_task_sleep(task, q);
__rpc_add_wait_queue(q, task, queue_priority);
@@ -428,7 +428,7 @@ static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
return;
}
- trace_rpc_task_wakeup(task->tk_client, task, queue);
+ trace_rpc_task_wakeup(task, queue);
__rpc_remove_wait_queue(queue, task);
@@ -780,7 +780,7 @@ static void __rpc_execute(struct rpc_task *task)
}
if (!do_action)
break;
- trace_rpc_task_run_action(task->tk_client, task, do_action);
+ trace_rpc_task_run_action(task, do_action);
do_action(task);
/*
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 1e671333c3d5..f68aa46c9dd7 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -24,6 +24,8 @@
#include <linux/sunrpc/metrics.h>
#include <linux/rcupdate.h>
+#include <trace/events/sunrpc.h>
+
#include "netns.h"
#define RPCDBG_FACILITY RPCDBG_MISC
@@ -148,7 +150,7 @@ void rpc_count_iostats_metrics(const struct rpc_task *task,
struct rpc_iostats *op_metrics)
{
struct rpc_rqst *req = task->tk_rqstp;
- ktime_t delta, now;
+ ktime_t backlog, execute, now;
if (!op_metrics || !req)
return;
@@ -164,16 +166,20 @@ void rpc_count_iostats_metrics(const struct rpc_task *task,
op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
+ backlog = 0;
if (ktime_to_ns(req->rq_xtime)) {
- delta = ktime_sub(req->rq_xtime, task->tk_start);
- op_metrics->om_queue = ktime_add(op_metrics->om_queue, delta);
+ backlog = ktime_sub(req->rq_xtime, task->tk_start);
+ op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog);
}
+
op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
- delta = ktime_sub(now, task->tk_start);
- op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta);
+ execute = ktime_sub(now, task->tk_start);
+ op_metrics->om_execute = ktime_add(op_metrics->om_execute, execute);
spin_unlock(&op_metrics->om_lock);
+
+ trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute);
}
EXPORT_SYMBOL_GPL(rpc_count_iostats_metrics);
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
index f2b7cb540e61..09a0315ea77b 100644
--- a/net/sunrpc/sunrpc.h
+++ b/net/sunrpc/sunrpc.h
@@ -37,12 +37,6 @@ struct rpc_buffer {
char data[];
};
-static inline int rpc_reply_expected(struct rpc_task *task)
-{
- return (task->tk_msg.rpc_proc != NULL) &&
- (task->tk_msg.rpc_proc->p_decode != NULL);
-}
-
static inline int sock_is_loopback(struct sock *sk)
{
struct dst_entry *dst;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index e34f4ee7f2b6..30afbd236656 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1519,6 +1519,88 @@ out:
EXPORT_SYMBOL_GPL(xdr_process_buf);
/**
+ * xdr_stream_decode_opaque - Decode variable length opaque
+ * @xdr: pointer to xdr_stream
+ * @ptr: location to store opaque data
+ * @size: size of storage buffer @ptr
+ *
+ * Return values:
+ * On success, returns size of object stored in *@ptr
+ * %-EBADMSG on XDR buffer overflow
+ * %-EMSGSIZE on overflow of storage buffer @ptr
+ */
+ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
+{
+ ssize_t ret;
+ void *p;
+
+ ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
+ if (ret <= 0)
+ return ret;
+ memcpy(ptr, p, ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
+
+/**
+ * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
+ * @xdr: pointer to xdr_stream
+ * @ptr: location to store pointer to opaque data
+ * @maxlen: maximum acceptable object size
+ * @gfp_flags: GFP mask to use
+ *
+ * Return values:
+ * On success, returns size of object stored in *@ptr
+ * %-EBADMSG on XDR buffer overflow
+ * %-EMSGSIZE if the size of the object would exceed @maxlen
+ * %-ENOMEM on memory allocation failure
+ */
+ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
+ size_t maxlen, gfp_t gfp_flags)
+{
+ ssize_t ret;
+ void *p;
+
+ ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
+ if (ret > 0) {
+ *ptr = kmemdup(p, ret, gfp_flags);
+ if (*ptr != NULL)
+ return ret;
+ ret = -ENOMEM;
+ }
+ *ptr = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
+
+/**
+ * xdr_stream_decode_string - Decode variable length string
+ * @xdr: pointer to xdr_stream
+ * @str: location to store string
+ * @size: size of storage buffer @str
+ *
+ * Return values:
+ * On success, returns length of NUL-terminated string stored in *@str
+ * %-EBADMSG on XDR buffer overflow
+ * %-EMSGSIZE on overflow of storage buffer @str
+ */
+ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
+{
+ ssize_t ret;
+ void *p;
+
+ ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
+ if (ret > 0) {
+ memcpy(str, p, ret);
+ str[ret] = '\0';
+ return strlen(str);
+ }
+ *str = '\0';
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
+
+/**
* xdr_stream_decode_string_dup - Decode and duplicate variable length string
* @xdr: pointer to xdr_stream
* @str: location to store pointer to string
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 8f0ad4f268da..70f005044f06 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -826,6 +826,7 @@ static void xprt_connect_status(struct rpc_task *task)
* @xprt: transport on which the original request was transmitted
* @xid: RPC XID of incoming reply
*
+ * Caller holds xprt->recv_lock.
*/
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
{
@@ -834,6 +835,7 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
list_for_each_entry(entry, &xprt->recv, rq_list)
if (entry->rq_xid == xid) {
trace_xprt_lookup_rqst(xprt, xid, 0);
+ entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
return entry;
}
@@ -889,7 +891,13 @@ __must_hold(&req->rq_xprt->recv_lock)
}
}
-static void xprt_update_rtt(struct rpc_task *task)
+/**
+ * xprt_update_rtt - Update RPC RTT statistics
+ * @task: RPC request that recently completed
+ *
+ * Caller holds xprt->recv_lock.
+ */
+void xprt_update_rtt(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_rtt *rtt = task->tk_client->cl_rtt;
@@ -902,13 +910,14 @@ static void xprt_update_rtt(struct rpc_task *task)
rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
}
}
+EXPORT_SYMBOL_GPL(xprt_update_rtt);
/**
* xprt_complete_rqst - called when reply processing is complete
* @task: RPC request that recently completed
* @copied: actual number of bytes received from the transport
*
- * Caller holds transport lock.
+ * Caller holds xprt->recv_lock.
*/
void xprt_complete_rqst(struct rpc_task *task, int copied)
{
@@ -920,9 +929,6 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
xprt->stat.recvs++;
- req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
- if (xprt->ops->timer != NULL)
- xprt_update_rtt(task);
list_del_init(&req->rq_list);
req->rq_private_buf.len = copied;
@@ -1003,7 +1009,7 @@ void xprt_transmit(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
unsigned int connect_cookie;
- int status, numreqs;
+ int status;
dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
@@ -1027,7 +1033,6 @@ void xprt_transmit(struct rpc_task *task)
return;
connect_cookie = xprt->connect_cookie;
- req->rq_xtime = ktime_get();
status = xprt->ops->send_request(task);
trace_xprt_transmit(xprt, req->rq_xid, status);
if (status != 0) {
@@ -1042,9 +1047,6 @@ void xprt_transmit(struct rpc_task *task)
xprt->ops->set_retrans_timeout(task);
- numreqs = atomic_read(&xprt->num_reqs);
- if (numreqs > xprt->stat.max_slots)
- xprt->stat.max_slots = numreqs;
xprt->stat.sends++;
xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
xprt->stat.bklog_u += xprt->backlog.qlen;
@@ -1106,14 +1108,15 @@ static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
{
struct rpc_rqst *req = ERR_PTR(-EAGAIN);
- if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
+ if (xprt->num_reqs >= xprt->max_reqs)
goto out;
+ ++xprt->num_reqs;
spin_unlock(&xprt->reserve_lock);
req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
spin_lock(&xprt->reserve_lock);
if (req != NULL)
goto out;
- atomic_dec(&xprt->num_reqs);
+ --xprt->num_reqs;
req = ERR_PTR(-ENOMEM);
out:
return req;
@@ -1121,7 +1124,8 @@ out:
static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
- if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
+ if (xprt->num_reqs > xprt->min_reqs) {
+ --xprt->num_reqs;
kfree(req);
return true;
}
@@ -1157,6 +1161,8 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
spin_unlock(&xprt->reserve_lock);
return;
out_init_req:
+ xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
+ xprt->num_reqs);
task->tk_status = 0;
task->tk_rqstp = req;
xprt_request_init(task, xprt);
@@ -1224,7 +1230,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
else
xprt->max_reqs = num_prealloc;
xprt->min_reqs = num_prealloc;
- atomic_set(&xprt->num_reqs, num_prealloc);
+ xprt->num_reqs = num_prealloc;
return xprt;
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index ed1a4a3065ee..47ebac949769 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -44,13 +44,6 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
if (IS_ERR(req))
return PTR_ERR(req);
- rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
- DMA_TO_DEVICE, GFP_KERNEL);
- if (IS_ERR(rb))
- goto out_fail;
- req->rl_rdmabuf = rb;
- xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
-
size = r_xprt->rx_data.inline_rsize;
rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
if (IS_ERR(rb))
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index d5f95bb39300..5cc68a824f45 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -191,7 +191,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
mr = rpcrdma_mr_get(r_xprt);
if (!mr)
- return ERR_PTR(-ENOBUFS);
+ return ERR_PTR(-EAGAIN);
pageoff = offset_in_page(seg1->mr_offset);
seg1->mr_offset -= pageoff; /* start of page */
@@ -251,6 +251,16 @@ out_maperr:
return ERR_PTR(-EIO);
}
+/* Post Send WR containing the RPC Call message.
+ */
+static int
+fmr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
+{
+ struct ib_send_wr *bad_wr;
+
+ return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, &bad_wr);
+}
+
/* Invalidate all memory regions that were registered for "req".
*
* Sleeps until it is safe for the host CPU to access the
@@ -305,6 +315,7 @@ out_reset:
const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
.ro_map = fmr_op_map,
+ .ro_send = fmr_op_send,
.ro_unmap_sync = fmr_op_unmap_sync,
.ro_recover_mr = fmr_op_recover_mr,
.ro_open = fmr_op_open,
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 90f688f19783..c5743a0960be 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -357,8 +357,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
struct rpcrdma_mr *mr;
struct ib_mr *ibmr;
struct ib_reg_wr *reg_wr;
- struct ib_send_wr *bad_wr;
- int rc, i, n;
+ int i, n;
u8 key;
mr = NULL;
@@ -367,7 +366,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
rpcrdma_mr_defer_recovery(mr);
mr = rpcrdma_mr_get(r_xprt);
if (!mr)
- return ERR_PTR(-ENOBUFS);
+ return ERR_PTR(-EAGAIN);
} while (mr->frwr.fr_state != FRWR_IS_INVALID);
frwr = &mr->frwr;
frwr->fr_state = FRWR_IS_VALID;
@@ -407,22 +406,12 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
ib_update_fast_reg_key(ibmr, ++key);
reg_wr = &frwr->fr_regwr;
- reg_wr->wr.next = NULL;
- reg_wr->wr.opcode = IB_WR_REG_MR;
- frwr->fr_cqe.done = frwr_wc_fastreg;
- reg_wr->wr.wr_cqe = &frwr->fr_cqe;
- reg_wr->wr.num_sge = 0;
- reg_wr->wr.send_flags = 0;
reg_wr->mr = ibmr;
reg_wr->key = ibmr->rkey;
reg_wr->access = writing ?
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
IB_ACCESS_REMOTE_READ;
- rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
- if (rc)
- goto out_senderr;
-
mr->mr_handle = ibmr->rkey;
mr->mr_length = ibmr->length;
mr->mr_offset = ibmr->iova;
@@ -442,11 +431,40 @@ out_mapmr_err:
frwr->fr_mr, n, mr->mr_nents);
rpcrdma_mr_defer_recovery(mr);
return ERR_PTR(-EIO);
+}
-out_senderr:
- pr_err("rpcrdma: FRWR registration ib_post_send returned %i\n", rc);
- rpcrdma_mr_defer_recovery(mr);
- return ERR_PTR(-ENOTCONN);
+/* Post Send WR containing the RPC Call message.
+ *
+ * For FRMR, chain any FastReg WRs to the Send WR. Only a
+ * single ib_post_send call is needed to register memory
+ * and then post the Send WR.
+ */
+static int
+frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
+{
+ struct ib_send_wr *post_wr, *bad_wr;
+ struct rpcrdma_mr *mr;
+
+ post_wr = &req->rl_sendctx->sc_wr;
+ list_for_each_entry(mr, &req->rl_registered, mr_list) {
+ struct rpcrdma_frwr *frwr;
+
+ frwr = &mr->frwr;
+
+ frwr->fr_cqe.done = frwr_wc_fastreg;
+ frwr->fr_regwr.wr.next = post_wr;
+ frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
+ frwr->fr_regwr.wr.num_sge = 0;
+ frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
+ frwr->fr_regwr.wr.send_flags = 0;
+
+ post_wr = &frwr->fr_regwr.wr;
+ }
+
+ /* If ib_post_send fails, the next ->send_request for
+ * @req will queue these MWs for recovery.
+ */
+ return ib_post_send(ia->ri_id->qp, post_wr, &bad_wr);
}
/* Handle a remotely invalidated mr on the @mrs list
@@ -561,6 +579,7 @@ reset_mrs:
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
.ro_map = frwr_op_map,
+ .ro_send = frwr_op_send,
.ro_reminv = frwr_op_reminv,
.ro_unmap_sync = frwr_op_unmap_sync,
.ro_recover_mr = frwr_op_recover_mr,
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index f0855a959a27..e8adad33d0bb 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -365,7 +365,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
false, &mr);
if (IS_ERR(seg))
- return PTR_ERR(seg);
+ goto out_maperr;
rpcrdma_mr_push(mr, &req->rl_registered);
if (encode_read_segment(xdr, mr, pos) < 0)
@@ -377,6 +377,11 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
} while (nsegs);
return 0;
+
+out_maperr:
+ if (PTR_ERR(seg) == -EAGAIN)
+ xprt_wait_for_buffer_space(rqst->rq_task, NULL);
+ return PTR_ERR(seg);
}
/* Register and XDR encode the Write list. Supports encoding a list
@@ -423,7 +428,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
true, &mr);
if (IS_ERR(seg))
- return PTR_ERR(seg);
+ goto out_maperr;
rpcrdma_mr_push(mr, &req->rl_registered);
if (encode_rdma_segment(xdr, mr) < 0)
@@ -440,6 +445,11 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
*segcount = cpu_to_be32(nchunks);
return 0;
+
+out_maperr:
+ if (PTR_ERR(seg) == -EAGAIN)
+ xprt_wait_for_buffer_space(rqst->rq_task, NULL);
+ return PTR_ERR(seg);
}
/* Register and XDR encode the Reply chunk. Supports encoding an array
@@ -481,7 +491,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
true, &mr);
if (IS_ERR(seg))
- return PTR_ERR(seg);
+ goto out_maperr;
rpcrdma_mr_push(mr, &req->rl_registered);
if (encode_rdma_segment(xdr, mr) < 0)
@@ -498,6 +508,11 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
*segcount = cpu_to_be32(nchunks);
return 0;
+
+out_maperr:
+ if (PTR_ERR(seg) == -EAGAIN)
+ xprt_wait_for_buffer_space(rqst->rq_task, NULL);
+ return PTR_ERR(seg);
}
/**
@@ -724,8 +739,8 @@ rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
* Returns:
* %0 if the RPC was sent successfully,
* %-ENOTCONN if the connection was lost,
- * %-EAGAIN if not enough pages are available for on-demand reply buffer,
- * %-ENOBUFS if no MRs are available to register chunks,
+ * %-EAGAIN if the caller should call again with the same arguments,
+ * %-ENOBUFS if the caller should call again after a delay,
* %-EMSGSIZE if the transport header is too small,
* %-EIO if a permanent problem occurred while marshaling.
*/
@@ -868,10 +883,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
return 0;
out_err:
- if (ret != -ENOBUFS) {
- pr_err("rpcrdma: header marshaling failed (%d)\n", ret);
- r_xprt->rx_stats.failed_marshal_count++;
- }
+ r_xprt->rx_stats.failed_marshal_count++;
return ret;
}
@@ -1366,7 +1378,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
- queue_work_on(req->rl_cpu, rpcrdma_receive_wq, &rep->rr_work);
+ queue_work(rpcrdma_receive_wq, &rep->rr_work);
return;
out_badstatus:
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 4b1ecfe979cf..cc1aad325496 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -52,7 +52,6 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/sunrpc/addr.h>
-#include <linux/smp.h>
#include "xprt_rdma.h"
@@ -237,8 +236,6 @@ rpcrdma_connect_worker(struct work_struct *work)
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
spin_lock_bh(&xprt->transport_lock);
- if (++xprt->connect_cookie == 0) /* maintain a reserved value */
- ++xprt->connect_cookie;
if (ep->rep_connected > 0) {
if (!xprt_test_and_set_connected(xprt))
xprt_wake_pending_tasks(xprt, 0);
@@ -540,29 +537,6 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
}
}
-/* Allocate a fixed-size buffer in which to construct and send the
- * RPC-over-RDMA header for this request.
- */
-static bool
-rpcrdma_get_rdmabuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
- gfp_t flags)
-{
- size_t size = RPCRDMA_HDRBUF_SIZE;
- struct rpcrdma_regbuf *rb;
-
- if (req->rl_rdmabuf)
- return true;
-
- rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, flags);
- if (IS_ERR(rb))
- return false;
-
- r_xprt->rx_stats.hardway_register_count += size;
- req->rl_rdmabuf = rb;
- xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
- return true;
-}
-
static bool
rpcrdma_get_sendbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
size_t size, gfp_t flags)
@@ -644,15 +618,11 @@ xprt_rdma_allocate(struct rpc_task *task)
if (RPC_IS_SWAPPER(task))
flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
- if (!rpcrdma_get_rdmabuf(r_xprt, req, flags))
- goto out_fail;
if (!rpcrdma_get_sendbuf(r_xprt, req, rqst->rq_callsize, flags))
goto out_fail;
if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
goto out_fail;
- req->rl_cpu = smp_processor_id();
- req->rl_connect_cookie = 0; /* our reserved value */
rpcrdma_set_xprtdata(rqst, req);
rqst->rq_buffer = req->rl_sendbuf->rg_base;
rqst->rq_rbuffer = req->rl_recvbuf->rg_base;
@@ -694,7 +664,8 @@ xprt_rdma_free(struct rpc_task *task)
* Returns:
* %0 if the RPC message has been sent
* %-ENOTCONN if the caller should reconnect and call again
- * %-ENOBUFS if the caller should call again later
+ * %-EAGAIN if the caller should call again
+ * %-ENOBUFS if the caller should call again after a delay
* %-EIO if a permanent error occurred and the request was not
* sent. Do not try to send this message again.
*/
@@ -723,9 +694,9 @@ xprt_rdma_send_request(struct rpc_task *task)
rpcrdma_recv_buffer_get(req);
/* Must suppress retransmit to maintain credits */
- if (req->rl_connect_cookie == xprt->connect_cookie)
+ if (rqst->rq_connect_cookie == xprt->connect_cookie)
goto drop_connection;
- req->rl_connect_cookie = xprt->connect_cookie;
+ rqst->rq_xtime = ktime_get();
__set_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
@@ -733,6 +704,12 @@ xprt_rdma_send_request(struct rpc_task *task)
rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
rqst->rq_bytes_sent = 0;
+
+ /* An RPC with no reply will throw off credit accounting,
+ * so drop the connection to reset the credit grant.
+ */
+ if (!rpc_reply_expected(task))
+ goto drop_connection;
return 0;
failed_marshal:
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index e6f84a6434a0..fe5eaca2d197 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -250,11 +250,11 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
wait_for_completion(&ia->ri_remove_done);
ia->ri_id = NULL;
- ia->ri_pd = NULL;
ia->ri_device = NULL;
/* Return 1 to ensure the core destroys the id. */
return 1;
case RDMA_CM_EVENT_ESTABLISHED:
+ ++xprt->rx_xprt.connect_cookie;
connstate = 1;
rpcrdma_update_connect_private(xprt, &event->param.conn);
goto connected;
@@ -273,6 +273,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
connstate = -EAGAIN;
goto connected;
case RDMA_CM_EVENT_DISCONNECTED:
+ ++xprt->rx_xprt.connect_cookie;
connstate = -ECONNABORTED;
connected:
xprt->rx_buf.rb_credits = 1;
@@ -445,7 +446,9 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
ia->ri_id->qp = NULL;
}
ib_free_cq(ep->rep_attr.recv_cq);
+ ep->rep_attr.recv_cq = NULL;
ib_free_cq(ep->rep_attr.send_cq);
+ ep->rep_attr.send_cq = NULL;
/* The ULP is responsible for ensuring all DMA
* mappings and MRs are gone.
@@ -458,6 +461,8 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
}
rpcrdma_mrs_destroy(buf);
+ ib_dealloc_pd(ia->ri_pd);
+ ia->ri_pd = NULL;
/* Allow waiters to continue */
complete(&ia->ri_remove_done);
@@ -589,11 +594,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
/* Client offers RDMA Read but does not initiate */
ep->rep_remote_cma.initiator_depth = 0;
- if (ia->ri_device->attrs.max_qp_rd_atom > 32) /* arbitrary but <= 255 */
- ep->rep_remote_cma.responder_resources = 32;
- else
- ep->rep_remote_cma.responder_resources =
- ia->ri_device->attrs.max_qp_rd_atom;
+ ep->rep_remote_cma.responder_resources =
+ min_t(int, U8_MAX, ia->ri_device->attrs.max_qp_rd_atom);
/* Limit transport retries so client can detect server
* GID changes quickly. RPC layer handles re-establishing
@@ -628,14 +630,16 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
cancel_delayed_work_sync(&ep->rep_connect_worker);
- if (ia->ri_id->qp) {
+ if (ia->ri_id && ia->ri_id->qp) {
rpcrdma_ep_disconnect(ep, ia);
rdma_destroy_qp(ia->ri_id);
ia->ri_id->qp = NULL;
}
- ib_free_cq(ep->rep_attr.recv_cq);
- ib_free_cq(ep->rep_attr.send_cq);
+ if (ep->rep_attr.recv_cq)
+ ib_free_cq(ep->rep_attr.recv_cq);
+ if (ep->rep_attr.send_cq)
+ ib_free_cq(ep->rep_attr.send_cq);
}
/* Re-establish a connection after a device removal event.
@@ -1024,7 +1028,7 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
LIST_HEAD(free);
LIST_HEAD(all);
- for (count = 0; count < 32; count++) {
+ for (count = 0; count < 3; count++) {
struct rpcrdma_mr *mr;
int rc;
@@ -1049,8 +1053,9 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
list_splice(&all, &buf->rb_all);
r_xprt->rx_stats.mrs_allocated += count;
spin_unlock(&buf->rb_mrlock);
-
trace_xprtrdma_createmrs(r_xprt, count);
+
+ xprt_write_space(&r_xprt->rx_xprt);
}
static void
@@ -1068,17 +1073,27 @@ struct rpcrdma_req *
rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
+ struct rpcrdma_regbuf *rb;
struct rpcrdma_req *req;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (req == NULL)
return ERR_PTR(-ENOMEM);
+ rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
+ DMA_TO_DEVICE, GFP_KERNEL);
+ if (IS_ERR(rb)) {
+ kfree(req);
+ return ERR_PTR(-ENOMEM);
+ }
+ req->rl_rdmabuf = rb;
+ xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
+ req->rl_buffer = buffer;
+ INIT_LIST_HEAD(&req->rl_registered);
+
spin_lock(&buffer->rb_reqslock);
list_add(&req->rl_all, &buffer->rb_allreqs);
spin_unlock(&buffer->rb_reqslock);
- req->rl_buffer = &r_xprt->rx_buf;
- INIT_LIST_HEAD(&req->rl_registered);
return req;
}
@@ -1535,7 +1550,6 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
struct rpcrdma_req *req)
{
struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
- struct ib_send_wr *send_wr_fail;
int rc;
if (req->rl_reply) {
@@ -1554,7 +1568,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
--ep->rep_send_count;
}
- rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
+ rc = ia->ri_ops->ro_send(ia, req);
trace_xprtrdma_post_send(req, rc);
if (rc)
return -ENOTCONN;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 69883a960a3f..3d3b423fa9c1 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -334,8 +334,6 @@ enum {
struct rpcrdma_buffer;
struct rpcrdma_req {
struct list_head rl_list;
- int rl_cpu;
- unsigned int rl_connect_cookie;
struct rpcrdma_buffer *rl_buffer;
struct rpcrdma_rep *rl_reply;
struct xdr_stream rl_stream;
@@ -474,6 +472,8 @@ struct rpcrdma_memreg_ops {
(*ro_map)(struct rpcrdma_xprt *,
struct rpcrdma_mr_seg *, int, bool,
struct rpcrdma_mr **);
+ int (*ro_send)(struct rpcrdma_ia *ia,
+ struct rpcrdma_req *req);
void (*ro_reminv)(struct rpcrdma_rep *rep,
struct list_head *mrs);
void (*ro_unmap_sync)(struct rpcrdma_xprt *,
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 956e29c1438d..c8902f11efdd 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -527,6 +527,7 @@ static int xs_local_send_request(struct rpc_task *task)
xs_pktdump("packet data:",
req->rq_svec->iov_base, req->rq_svec->iov_len);
+ req->rq_xtime = ktime_get();
status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent,
true, &sent);
dprintk("RPC: %s(%u) = %d\n",
@@ -589,6 +590,7 @@ static int xs_udp_send_request(struct rpc_task *task)
if (!xprt_bound(xprt))
return -ENOTCONN;
+ req->rq_xtime = ktime_get();
status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
xdr, req->rq_bytes_sent, true, &sent);
@@ -678,6 +680,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
/* Continue transmitting the packet/record. We must be careful
* to cope with writespace callbacks arriving _after_ we have
* called sendmsg(). */
+ req->rq_xtime = ktime_get();
while (1) {
sent = 0;
status = xs_sendpages(transport->sock, NULL, 0, xdr,
@@ -1060,6 +1063,7 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
if (!rovr)
goto out_unlock;
xprt_pin_rqst(rovr);
+ xprt_update_rtt(rovr->rq_task);
spin_unlock(&xprt->recv_lock);
task = rovr->rq_task;
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index cce31ee876b6..50cee534fd64 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -10,6 +10,7 @@ space := $(empty) $(empty)
space_escape := _-_SPACE_-_
right_paren := )
left_paren := (
+pound := \#
###
# Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o
@@ -322,11 +323,11 @@ endif
# Replace >$< with >$$< to preserve $ when reloading the .cmd file
# (needed for make)
-# Replace >#< with >\#< to avoid starting a comment in the .cmd file
+# Replace >#< with >$(pound)< to avoid starting a comment in the .cmd file
# (needed for make)
# Replace >'< with >'\''< to be able to enclose the whole string in '...'
# (needed for the shell)
-make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1)))))
+make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1)))))
# Find any prerequisites that is newer than target or that does not exist.
# PHONY targets skipped in both cases.
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 77cce68c4d63..8bdb1dc4072c 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -430,9 +430,7 @@ quiet_cmd_asn1_compiler = ASN.1 $@
cmd_asn1_compiler = $(objtree)/scripts/asn1_compiler $< \
$(subst .h,.c,$@) $(subst .c,.h,$@)
-.PRECIOUS: $(objtree)/$(obj)/%-asn1.c $(objtree)/$(obj)/%-asn1.h
-
-$(obj)/%-asn1.c $(obj)/%-asn1.h: $(src)/%.asn1 $(objtree)/scripts/asn1_compiler
+$(obj)/%.asn1.c $(obj)/%.asn1.h: $(src)/%.asn1 $(objtree)/scripts/asn1_compiler
$(call cmd,asn1_compiler)
# Build the compiled-in targets
@@ -538,6 +536,21 @@ $(call multi_depend, $(multi-used-m), .o, -objs -y -m)
targets += $(multi-used-m)
targets := $(filter-out $(PHONY), $(targets))
+# Add intermediate targets:
+# When building objects with specific suffix patterns, add intermediate
+# targets that the final targets are derived from.
+intermediate_targets = $(foreach sfx, $(2), \
+ $(patsubst %$(strip $(1)),%$(sfx), \
+ $(filter %$(strip $(1)), $(targets))))
+# %.asn1.o <- %.asn1.[ch] <- %.asn1
+# %.dtb.o <- %.dtb.S <- %.dtb <- %.dts
+# %.lex.o <- %.lex.c <- %.l
+# %.tab.o <- %.tab.[ch] <- %.y
+targets += $(call intermediate_targets, .asn1.o, .asn1.c .asn1.h) \
+ $(call intermediate_targets, .dtb.o, .dtb.S .dtb) \
+ $(call intermediate_targets, .lex.o, .lex.c) \
+ $(call intermediate_targets, .tab.o, .tab.c .tab.h)
+
# Descending
# ---------------------------------------------------------------------------
@@ -574,6 +587,10 @@ $(shell mkdir -p $(obj-dirs))
endif
endif
+# Some files contained in $(targets) are intermediate artifacts.
+# We never want them to be removed automatically.
+.SECONDARY: $(targets)
+
# Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable se we can use it in if_changed and friends.
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index e6dc6ae2d7c4..aa971cc3f339 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -84,7 +84,7 @@ hostcxx_flags = -Wp,-MD,$(depfile) $(__hostcxx_flags)
# Create executable from a single .c file
# host-csingle -> Executable
quiet_cmd_host-csingle = HOSTCC $@
- cmd_host-csingle = $(HOSTCC) $(hostc_flags) -o $@ $< \
+ cmd_host-csingle = $(HOSTCC) $(hostc_flags) $(HOSTLDFLAGS) -o $@ $< \
$(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
$(host-csingle): $(obj)/%: $(src)/%.c FORCE
$(call if_changed_dep,host-csingle)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 45b9aa3ca39e..07d07409f16f 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -184,14 +184,7 @@ endef
quiet_cmd_flex = LEX $@
cmd_flex = $(LEX) -o$@ -L $<
-ifdef REGENERATE_PARSERS
-.PRECIOUS: $(src)/%.lex.c_shipped
-$(src)/%.lex.c_shipped: $(src)/%.l
- $(call cmd,flex)
-endif
-
-.PRECIOUS: $(obj)/%.lex.c
-$(filter %.lex.c,$(targets)): $(obj)/%.lex.c: $(src)/%.l FORCE
+$(obj)/%.lex.c: $(src)/%.l FORCE
$(call if_changed,flex)
# YACC
@@ -199,27 +192,13 @@ $(filter %.lex.c,$(targets)): $(obj)/%.lex.c: $(src)/%.l FORCE
quiet_cmd_bison = YACC $@
cmd_bison = $(YACC) -o$@ -t -l $<
-ifdef REGENERATE_PARSERS
-.PRECIOUS: $(src)/%.tab.c_shipped
-$(src)/%.tab.c_shipped: $(src)/%.y
- $(call cmd,bison)
-endif
-
-.PRECIOUS: $(obj)/%.tab.c
-$(filter %.tab.c,$(targets)): $(obj)/%.tab.c: $(src)/%.y FORCE
+$(obj)/%.tab.c: $(src)/%.y FORCE
$(call if_changed,bison)
quiet_cmd_bison_h = YACC $@
cmd_bison_h = bison -o/dev/null --defines=$@ -t -l $<
-ifdef REGENERATE_PARSERS
-.PRECIOUS: $(src)/%.tab.h_shipped
-$(src)/%.tab.h_shipped: $(src)/%.y
- $(call cmd,bison_h)
-endif
-
-.PRECIOUS: $(obj)/%.tab.h
-$(filter %.tab.h,$(targets)): $(obj)/%.tab.h: $(src)/%.y FORCE
+$(obj)/%.tab.h: $(src)/%.y FORCE
$(call if_changed,bison_h)
# Shipped files
@@ -297,8 +276,8 @@ cmd_dt_S_dtb= \
echo '.balign STRUCT_ALIGNMENT'; \
) > $@
-$(obj)/%.dtb.S: $(obj)/%.dtb
- $(call cmd,dt_S_dtb)
+$(obj)/%.dtb.S: $(obj)/%.dtb FORCE
+ $(call if_changed,dt_S_dtb)
quiet_cmd_dtc = DTC $@
cmd_dtc = mkdir -p $(dir ${dtc-tmp}) ; \
diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
index c1b7ef3e24c1..c146020fc783 100644
--- a/scripts/asn1_compiler.c
+++ b/scripts/asn1_compiler.c
@@ -1319,7 +1319,7 @@ static void render(FILE *out, FILE *hdr)
fprintf(out, " * ASN.1 parser for %s\n", grammar_name);
fprintf(out, " */\n");
fprintf(out, "#include <linux/asn1_ber_bytecode.h>\n");
- fprintf(out, "#include \"%s-asn1.h\"\n", grammar_name);
+ fprintf(out, "#include \"%s.asn1.h\"\n", grammar_name);
fprintf(out, "\n");
if (ferror(out)) {
perror(outputname);
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index d84a5674e95e..a923f05edb36 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -30,8 +30,8 @@ def getsizes(file, format):
if type in format:
# strip generated symbols
if name.startswith("__mod_"): continue
- if name.startswith("SyS_"): continue
- if name.startswith("compat_SyS_"): continue
+ if name.startswith("__se_sys"): continue
+ if name.startswith("__se_compat_sys"): continue
if name == "linux_banner": continue
# statics and some other optimizations adds random .NUMBER
name = re_NUMBER.sub('', name)
diff --git a/scripts/dtc/.gitignore b/scripts/dtc/.gitignore
index cdabdc95a6e7..2e6e60d64ede 100644
--- a/scripts/dtc/.gitignore
+++ b/scripts/dtc/.gitignore
@@ -1,4 +1 @@
dtc
-dtc-lexer.lex.c
-dtc-parser.tab.c
-dtc-parser.tab.h
diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile
index a88b8c9bf46d..9cac65b7419c 100644
--- a/scripts/dtc/Makefile
+++ b/scripts/dtc/Makefile
@@ -27,8 +27,3 @@ HOSTCFLAGS_dtc-parser.tab.o := $(HOSTCFLAGS_DTC)
# dependencies on generated files need to be listed explicitly
$(obj)/dtc-lexer.lex.o: $(obj)/dtc-parser.tab.h
-
-# generated files need to include *.cmd and be cleaned explicitly
-generated-files := dtc-lexer.lex.c dtc-parser.tab.c dtc-parser.tab.h
-targets := $(generated-files)
-clean-files := $(generated-files)
diff --git a/scripts/dtc/include-prefixes/cris b/scripts/dtc/include-prefixes/cris
deleted file mode 120000
index 736d998ba506..000000000000
--- a/scripts/dtc/include-prefixes/cris
+++ /dev/null
@@ -1 +0,0 @@
-../../../arch/cris/boot/dts \ No newline at end of file
diff --git a/scripts/dtc/include-prefixes/metag b/scripts/dtc/include-prefixes/metag
deleted file mode 120000
index 87a3c847db8f..000000000000
--- a/scripts/dtc/include-prefixes/metag
+++ /dev/null
@@ -1 +0,0 @@
-../../../arch/metag/boot/dts \ No newline at end of file
diff --git a/scripts/genksyms/.gitignore b/scripts/genksyms/.gitignore
index e7836b47f060..b119c7da2863 100644
--- a/scripts/genksyms/.gitignore
+++ b/scripts/genksyms/.gitignore
@@ -1,4 +1 @@
-*.lex.c
-*.tab.c
-*.tab.h
genksyms
diff --git a/scripts/genksyms/Makefile b/scripts/genksyms/Makefile
index 34d6ab1811a4..ef0287e42957 100644
--- a/scripts/genksyms/Makefile
+++ b/scripts/genksyms/Makefile
@@ -5,11 +5,34 @@ always := $(hostprogs-y)
genksyms-objs := genksyms.o parse.tab.o lex.lex.o
+# FIXME: fix the ambiguous grammar in parse.y and delete this hack
+#
+# Suppress shift/reduce, reduce/reduce conflicts warnings
+# unless W=1 is specified.
+#
+# Just in case, run "$(YACC) --version" without suppressing stderr
+# so that 'bison: not found' will be displayed if it is missing.
+ifeq ($(findstring 1,$(KBUILD_ENABLE_EXTRA_GCC_CHECKS)),)
+
+quiet_cmd_bison_no_warn = $(quet_cmd_bison)
+ cmd_bison_no_warn = $(YACC) --version >/dev/null; \
+ $(cmd_bison) 2>/dev/null
+
+$(obj)/parse.tab.c: $(src)/parse.y FORCE
+ $(call if_changed,bison_no_warn)
+
+quiet_cmd_bison_h_no_warn = $(quet_cmd_bison_h)
+ cmd_bison_h_no_warn = $(YACC) --version >/dev/null; \
+ $(cmd_bison_h) 2>/dev/null
+
+$(obj)/parse.tab.h: $(src)/parse.y FORCE
+ $(call if_changed,bison_h_no_warn)
+
+endif
+
# -I needed for generated C source (shipped source)
HOSTCFLAGS_parse.tab.o := -I$(src)
HOSTCFLAGS_lex.lex.o := -I$(src)
# dependencies on generated files need to be listed explicitly
$(obj)/lex.lex.o: $(obj)/parse.tab.h
-
-clean-files := lex.lex.c parse.tab.c parse.tab.h
diff --git a/scripts/genksyms/lex.lex.c_shipped b/scripts/genksyms/lex.lex.c_shipped
deleted file mode 100644
index ba2fda8dfdb2..000000000000
--- a/scripts/genksyms/lex.lex.c_shipped
+++ /dev/null
@@ -1,2291 +0,0 @@
-
-#line 3 "scripts/genksyms/lex.lex.c_shipped"
-
-#define YY_INT_ALIGNED short int
-
-/* A lexical scanner generated by flex */
-
-#define FLEX_SCANNER
-#define YY_FLEX_MAJOR_VERSION 2
-#define YY_FLEX_MINOR_VERSION 5
-#define YY_FLEX_SUBMINOR_VERSION 35
-#if YY_FLEX_SUBMINOR_VERSION > 0
-#define FLEX_BETA
-#endif
-
-/* First, we deal with platform-specific or compiler-specific issues. */
-
-/* begin standard C headers. */
-#include <stdio.h>
-#include <string.h>
-#include <errno.h>
-#include <stdlib.h>
-
-/* end standard C headers. */
-
-/* flex integer type definitions */
-
-#ifndef FLEXINT_H
-#define FLEXINT_H
-
-/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
-
-#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
-
-/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
- * if you want the limit (max/min) macros for int types.
- */
-#ifndef __STDC_LIMIT_MACROS
-#define __STDC_LIMIT_MACROS 1
-#endif
-
-#include <inttypes.h>
-typedef int8_t flex_int8_t;
-typedef uint8_t flex_uint8_t;
-typedef int16_t flex_int16_t;
-typedef uint16_t flex_uint16_t;
-typedef int32_t flex_int32_t;
-typedef uint32_t flex_uint32_t;
-#else
-typedef signed char flex_int8_t;
-typedef short int flex_int16_t;
-typedef int flex_int32_t;
-typedef unsigned char flex_uint8_t;
-typedef unsigned short int flex_uint16_t;
-typedef unsigned int flex_uint32_t;
-#endif /* ! C99 */
-
-/* Limits of integral types. */
-#ifndef INT8_MIN
-#define INT8_MIN (-128)
-#endif
-#ifndef INT16_MIN
-#define INT16_MIN (-32767-1)
-#endif
-#ifndef INT32_MIN
-#define INT32_MIN (-2147483647-1)
-#endif
-#ifndef INT8_MAX
-#define INT8_MAX (127)
-#endif
-#ifndef INT16_MAX
-#define INT16_MAX (32767)
-#endif
-#ifndef INT32_MAX
-#define INT32_MAX (2147483647)
-#endif
-#ifndef UINT8_MAX
-#define UINT8_MAX (255U)
-#endif
-#ifndef UINT16_MAX
-#define UINT16_MAX (65535U)
-#endif
-#ifndef UINT32_MAX
-#define UINT32_MAX (4294967295U)
-#endif
-
-#endif /* ! FLEXINT_H */
-
-#ifdef __cplusplus
-
-/* The "const" storage-class-modifier is valid. */
-#define YY_USE_CONST
-
-#else /* ! __cplusplus */
-
-/* C99 requires __STDC__ to be defined as 1. */
-#if defined (__STDC__)
-
-#define YY_USE_CONST
-
-#endif /* defined (__STDC__) */
-#endif /* ! __cplusplus */
-
-#ifdef YY_USE_CONST
-#define yyconst const
-#else
-#define yyconst
-#endif
-
-/* Returned upon end-of-file. */
-#define YY_NULL 0
-
-/* Promotes a possibly negative, possibly signed char to an unsigned
- * integer for use as an array index. If the signed char is negative,
- * we want to instead treat it as an 8-bit unsigned char, hence the
- * double cast.
- */
-#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)
-
-/* Enter a start condition. This macro really ought to take a parameter,
- * but we do it the disgusting crufty way forced on us by the ()-less
- * definition of BEGIN.
- */
-#define BEGIN (yy_start) = 1 + 2 *
-
-/* Translate the current start state into a value that can be later handed
- * to BEGIN to return to the state. The YYSTATE alias is for lex
- * compatibility.
- */
-#define YY_START (((yy_start) - 1) / 2)
-#define YYSTATE YY_START
-
-/* Action number for EOF rule of a given start state. */
-#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
-
-/* Special action meaning "start processing a new file". */
-#define YY_NEW_FILE yyrestart(yyin )
-
-#define YY_END_OF_BUFFER_CHAR 0
-
-/* Size of default input buffer. */
-#ifndef YY_BUF_SIZE
-#define YY_BUF_SIZE 16384
-#endif
-
-/* The state buf must be large enough to hold one state per character in the main buffer.
- */
-#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
-
-#ifndef YY_TYPEDEF_YY_BUFFER_STATE
-#define YY_TYPEDEF_YY_BUFFER_STATE
-typedef struct yy_buffer_state *YY_BUFFER_STATE;
-#endif
-
-extern int yyleng;
-
-extern FILE *yyin, *yyout;
-
-#define EOB_ACT_CONTINUE_SCAN 0
-#define EOB_ACT_END_OF_FILE 1
-#define EOB_ACT_LAST_MATCH 2
-
- #define YY_LESS_LINENO(n)
-
-/* Return all but the first "n" matched characters back to the input stream. */
-#define yyless(n) \
- do \
- { \
- /* Undo effects of setting up yytext. */ \
- int yyless_macro_arg = (n); \
- YY_LESS_LINENO(yyless_macro_arg);\
- *yy_cp = (yy_hold_char); \
- YY_RESTORE_YY_MORE_OFFSET \
- (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
- YY_DO_BEFORE_ACTION; /* set up yytext again */ \
- } \
- while ( 0 )
-
-#define unput(c) yyunput( c, (yytext_ptr) )
-
-#ifndef YY_TYPEDEF_YY_SIZE_T
-#define YY_TYPEDEF_YY_SIZE_T
-typedef size_t yy_size_t;
-#endif
-
-#ifndef YY_STRUCT_YY_BUFFER_STATE
-#define YY_STRUCT_YY_BUFFER_STATE
-struct yy_buffer_state
- {
- FILE *yy_input_file;
-
- char *yy_ch_buf; /* input buffer */
- char *yy_buf_pos; /* current position in input buffer */
-
- /* Size of input buffer in bytes, not including room for EOB
- * characters.
- */
- yy_size_t yy_buf_size;
-
- /* Number of characters read into yy_ch_buf, not including EOB
- * characters.
- */
- int yy_n_chars;
-
- /* Whether we "own" the buffer - i.e., we know we created it,
- * and can realloc() it to grow it, and should free() it to
- * delete it.
- */
- int yy_is_our_buffer;
-
- /* Whether this is an "interactive" input source; if so, and
- * if we're using stdio for input, then we want to use getc()
- * instead of fread(), to make sure we stop fetching input after
- * each newline.
- */
- int yy_is_interactive;
-
- /* Whether we're considered to be at the beginning of a line.
- * If so, '^' rules will be active on the next match, otherwise
- * not.
- */
- int yy_at_bol;
-
- int yy_bs_lineno; /**< The line count. */
- int yy_bs_column; /**< The column count. */
-
- /* Whether to try to fill the input buffer when we reach the
- * end of it.
- */
- int yy_fill_buffer;
-
- int yy_buffer_status;
-
-#define YY_BUFFER_NEW 0
-#define YY_BUFFER_NORMAL 1
- /* When an EOF's been seen but there's still some text to process
- * then we mark the buffer as YY_EOF_PENDING, to indicate that we
- * shouldn't try reading from the input source any more. We might
- * still have a bunch of tokens to match, though, because of
- * possible backing-up.
- *
- * When we actually see the EOF, we change the status to "new"
- * (via yyrestart()), so that the user can continue scanning by
- * just pointing yyin at a new input file.
- */
-#define YY_BUFFER_EOF_PENDING 2
-
- };
-#endif /* !YY_STRUCT_YY_BUFFER_STATE */
-
-/* Stack of input buffers. */
-static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */
-static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */
-static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */
-
-/* We provide macros for accessing buffer states in case in the
- * future we want to put the buffer states in a more general
- * "scanner state".
- *
- * Returns the top of the stack, or NULL.
- */
-#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \
- ? (yy_buffer_stack)[(yy_buffer_stack_top)] \
- : NULL)
-
-/* Same as previous macro, but useful when we know that the buffer stack is not
- * NULL or when we need an lvalue. For internal use only.
- */
-#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)]
-
-/* yy_hold_char holds the character lost when yytext is formed. */
-static char yy_hold_char;
-static int yy_n_chars; /* number of characters read into yy_ch_buf */
-int yyleng;
-
-/* Points to current character in buffer. */
-static char *yy_c_buf_p = (char *) 0;
-static int yy_init = 0; /* whether we need to initialize */
-static int yy_start = 0; /* start state number */
-
-/* Flag which is used to allow yywrap()'s to do buffer switches
- * instead of setting up a fresh yyin. A bit of a hack ...
- */
-static int yy_did_buffer_switch_on_eof;
-
-void yyrestart (FILE *input_file );
-void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer );
-YY_BUFFER_STATE yy_create_buffer (FILE *file,int size );
-void yy_delete_buffer (YY_BUFFER_STATE b );
-void yy_flush_buffer (YY_BUFFER_STATE b );
-void yypush_buffer_state (YY_BUFFER_STATE new_buffer );
-void yypop_buffer_state (void );
-
-static void yyensure_buffer_stack (void );
-static void yy_load_buffer_state (void );
-static void yy_init_buffer (YY_BUFFER_STATE b,FILE *file );
-
-#define YY_FLUSH_BUFFER yy_flush_buffer(YY_CURRENT_BUFFER )
-
-YY_BUFFER_STATE yy_scan_buffer (char *base,yy_size_t size );
-YY_BUFFER_STATE yy_scan_string (yyconst char *yy_str );
-YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,int len );
-
-void *yyalloc (yy_size_t );
-void *yyrealloc (void *,yy_size_t );
-void yyfree (void * );
-
-#define yy_new_buffer yy_create_buffer
-
-#define yy_set_interactive(is_interactive) \
- { \
- if ( ! YY_CURRENT_BUFFER ){ \
- yyensure_buffer_stack (); \
- YY_CURRENT_BUFFER_LVALUE = \
- yy_create_buffer(yyin,YY_BUF_SIZE ); \
- } \
- YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
- }
-
-#define yy_set_bol(at_bol) \
- { \
- if ( ! YY_CURRENT_BUFFER ){\
- yyensure_buffer_stack (); \
- YY_CURRENT_BUFFER_LVALUE = \
- yy_create_buffer(yyin,YY_BUF_SIZE ); \
- } \
- YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
- }
-
-#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
-
-/* Begin user sect3 */
-
-#define yywrap(n) 1
-#define YY_SKIP_YYWRAP
-
-typedef unsigned char YY_CHAR;
-
-FILE *yyin = (FILE *) 0, *yyout = (FILE *) 0;
-
-typedef int yy_state_type;
-
-extern int yylineno;
-
-int yylineno = 1;
-
-extern char *yytext;
-#define yytext_ptr yytext
-
-static yy_state_type yy_get_previous_state (void );
-static yy_state_type yy_try_NUL_trans (yy_state_type current_state );
-static int yy_get_next_buffer (void );
-static void yy_fatal_error (yyconst char msg[] );
-
-/* Done after the current pattern has been matched and before the
- * corresponding action - sets up yytext.
- */
-#define YY_DO_BEFORE_ACTION \
- (yytext_ptr) = yy_bp; \
- yyleng = (size_t) (yy_cp - yy_bp); \
- (yy_hold_char) = *yy_cp; \
- *yy_cp = '\0'; \
- (yy_c_buf_p) = yy_cp;
-
-#define YY_NUM_RULES 13
-#define YY_END_OF_BUFFER 14
-/* This struct is not used in this scanner,
- but its presence is necessary. */
-struct yy_trans_info
- {
- flex_int32_t yy_verify;
- flex_int32_t yy_nxt;
- };
-static yyconst flex_int16_t yy_accept[73] =
- { 0,
- 0, 0, 14, 12, 4, 3, 12, 7, 12, 12,
- 12, 12, 12, 9, 9, 12, 12, 7, 12, 12,
- 4, 0, 5, 0, 7, 8, 0, 6, 0, 0,
- 10, 10, 9, 0, 0, 9, 9, 0, 9, 0,
- 0, 0, 0, 2, 0, 0, 11, 0, 10, 0,
- 10, 9, 9, 0, 0, 0, 10, 10, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 0
- } ;
-
-static yyconst flex_int32_t yy_ec[256] =
- { 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
- 4, 4, 4, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 2, 1, 5, 6, 7, 8, 9, 10, 1,
- 1, 8, 11, 1, 12, 13, 8, 14, 15, 15,
- 15, 15, 15, 15, 15, 16, 16, 1, 1, 17,
- 18, 19, 1, 1, 20, 20, 20, 20, 21, 22,
- 7, 7, 7, 7, 7, 23, 7, 7, 7, 7,
- 7, 7, 7, 7, 24, 7, 7, 25, 7, 7,
- 1, 26, 1, 8, 7, 1, 20, 20, 20, 20,
-
- 21, 22, 7, 7, 7, 7, 7, 27, 7, 7,
- 7, 7, 7, 7, 7, 7, 24, 7, 7, 25,
- 7, 7, 1, 28, 1, 8, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1
- } ;
-
-static yyconst flex_int32_t yy_meta[29] =
- { 0,
- 1, 1, 2, 1, 1, 1, 3, 1, 1, 1,
- 4, 4, 5, 6, 6, 6, 1, 1, 1, 7,
- 8, 7, 3, 3, 3, 1, 3, 1
- } ;
-
-static yyconst flex_int16_t yy_base[85] =
- { 0,
- 0, 145, 150, 266, 27, 266, 25, 0, 131, 23,
- 23, 16, 23, 39, 31, 25, 39, 60, 22, 65,
- 57, 43, 266, 0, 0, 266, 61, 266, 0, 128,
- 74, 0, 113, 59, 62, 113, 52, 0, 0, 72,
- 66, 110, 100, 266, 73, 74, 266, 70, 266, 90,
- 103, 266, 84, 129, 108, 113, 143, 266, 107, 66,
- 118, 137, 168, 120, 80, 91, 145, 143, 83, 41,
- 266, 266, 190, 196, 204, 212, 220, 228, 232, 237,
- 238, 243, 249, 257
- } ;
-
-static yyconst flex_int16_t yy_def[85] =
- { 0,
- 72, 1, 72, 72, 72, 72, 73, 74, 72, 72,
- 75, 72, 72, 72, 14, 72, 72, 74, 72, 76,
- 72, 73, 72, 77, 74, 72, 75, 72, 78, 72,
- 72, 31, 14, 79, 80, 72, 72, 81, 15, 73,
- 75, 76, 76, 72, 73, 75, 72, 82, 72, 72,
- 72, 72, 81, 76, 54, 72, 72, 72, 76, 54,
- 76, 76, 76, 54, 83, 76, 63, 83, 84, 84,
- 72, 0, 72, 72, 72, 72, 72, 72, 72, 72,
- 72, 72, 72, 72
- } ;
-
-static yyconst flex_int16_t yy_nxt[295] =
- { 0,
- 4, 5, 6, 5, 7, 4, 8, 9, 10, 11,
- 9, 12, 13, 14, 15, 15, 16, 9, 17, 8,
- 8, 8, 18, 8, 8, 4, 8, 19, 21, 23,
- 21, 26, 28, 26, 26, 30, 31, 31, 31, 26,
- 26, 26, 26, 71, 39, 39, 39, 23, 29, 26,
- 24, 32, 33, 33, 34, 72, 26, 26, 21, 35,
- 21, 36, 37, 38, 40, 36, 43, 44, 24, 41,
- 28, 32, 50, 50, 52, 28, 23, 23, 52, 35,
- 56, 56, 44, 28, 42, 71, 29, 31, 31, 31,
- 42, 29, 59, 44, 48, 49, 49, 24, 24, 29,
-
- 49, 43, 44, 51, 51, 51, 36, 37, 59, 44,
- 36, 65, 44, 54, 55, 55, 51, 51, 51, 59,
- 44, 64, 64, 64, 58, 58, 57, 57, 57, 58,
- 59, 44, 42, 64, 64, 64, 52, 72, 59, 44,
- 47, 66, 60, 60, 42, 44, 59, 69, 26, 72,
- 20, 61, 62, 63, 72, 61, 57, 57, 57, 66,
- 72, 72, 72, 66, 49, 49, 72, 61, 62, 49,
- 44, 61, 72, 72, 72, 72, 72, 72, 72, 72,
- 72, 67, 67, 67, 72, 72, 72, 67, 67, 67,
- 22, 22, 22, 22, 22, 22, 22, 22, 25, 72,
-
- 72, 25, 25, 25, 27, 27, 27, 27, 27, 27,
- 27, 27, 42, 42, 42, 42, 42, 42, 42, 42,
- 45, 72, 45, 45, 45, 45, 45, 45, 46, 72,
- 46, 46, 46, 46, 46, 46, 34, 34, 72, 34,
- 51, 72, 51, 53, 53, 53, 57, 72, 57, 68,
- 68, 68, 68, 68, 68, 68, 68, 70, 70, 70,
- 70, 70, 70, 70, 70, 3, 72, 72, 72, 72,
- 72, 72, 72, 72, 72, 72, 72, 72, 72, 72,
- 72, 72, 72, 72, 72, 72, 72, 72, 72, 72,
- 72, 72, 72, 72
-
- } ;
-
-static yyconst flex_int16_t yy_chk[295] =
- { 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 5, 7,
- 5, 10, 11, 12, 12, 13, 13, 13, 13, 19,
- 10, 16, 16, 70, 15, 15, 15, 22, 11, 19,
- 7, 14, 14, 14, 14, 15, 17, 17, 21, 14,
- 21, 14, 14, 14, 18, 14, 20, 20, 22, 18,
- 27, 34, 35, 35, 37, 41, 40, 45, 37, 34,
- 48, 48, 65, 46, 65, 69, 27, 31, 31, 31,
- 60, 41, 66, 66, 31, 31, 31, 40, 45, 46,
-
- 31, 43, 43, 50, 50, 50, 53, 53, 59, 59,
- 53, 59, 42, 43, 43, 43, 51, 51, 51, 61,
- 61, 55, 55, 55, 51, 51, 56, 56, 56, 51,
- 54, 54, 55, 64, 64, 64, 36, 33, 62, 62,
- 30, 61, 54, 54, 64, 68, 67, 68, 9, 3,
- 2, 54, 54, 54, 0, 54, 57, 57, 57, 62,
- 0, 0, 0, 62, 57, 57, 0, 67, 67, 57,
- 63, 67, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 63, 63, 63, 0, 0, 0, 63, 63, 63,
- 73, 73, 73, 73, 73, 73, 73, 73, 74, 0,
-
- 0, 74, 74, 74, 75, 75, 75, 75, 75, 75,
- 75, 75, 76, 76, 76, 76, 76, 76, 76, 76,
- 77, 0, 77, 77, 77, 77, 77, 77, 78, 0,
- 78, 78, 78, 78, 78, 78, 79, 79, 0, 79,
- 80, 0, 80, 81, 81, 81, 82, 0, 82, 83,
- 83, 83, 83, 83, 83, 83, 83, 84, 84, 84,
- 84, 84, 84, 84, 84, 72, 72, 72, 72, 72,
- 72, 72, 72, 72, 72, 72, 72, 72, 72, 72,
- 72, 72, 72, 72, 72, 72, 72, 72, 72, 72,
- 72, 72, 72, 72
-
- } ;
-
-static yy_state_type yy_last_accepting_state;
-static char *yy_last_accepting_cpos;
-
-extern int yy_flex_debug;
-int yy_flex_debug = 0;
-
-/* The intent behind this definition is that it'll catch
- * any uses of REJECT which flex missed.
- */
-#define REJECT reject_used_but_not_detected
-#define yymore() yymore_used_but_not_detected
-#define YY_MORE_ADJ 0
-#define YY_RESTORE_YY_MORE_OFFSET
-char *yytext;
-/* Lexical analysis for genksyms.
- Copyright 1996, 1997 Linux International.
-
- New implementation contributed by Richard Henderson <rth@tamu.edu>
- Based on original work by Bjorn Ekwall <bj0rn@blox.se>
-
- Taken from Linux modutils 2.4.22.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the
- Free Software Foundation; either version 2 of the License, or (at your
- option) any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software Foundation,
- Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
-
-#include <limits.h>
-#include <stdlib.h>
-#include <string.h>
-#include <ctype.h>
-
-#include "genksyms.h"
-#include "parse.tab.h"
-
-/* We've got a two-level lexer here. We let flex do basic tokenization
- and then we categorize those basic tokens in the second stage. */
-#define YY_DECL static int yylex1(void)
-
-/* We don't do multiple input files. */
-#define YY_NO_INPUT 1
-
-#define INITIAL 0
-
-#ifndef YY_NO_UNISTD_H
-/* Special case for "unistd.h", since it is non-ANSI. We include it way
- * down here because we want the user's section 1 to have been scanned first.
- * The user has a chance to override it with an option.
- */
-#include <unistd.h>
-#endif
-
-#ifndef YY_EXTRA_TYPE
-#define YY_EXTRA_TYPE void *
-#endif
-
-static int yy_init_globals (void );
-
-/* Accessor methods to globals.
- These are made visible to non-reentrant scanners for convenience. */
-
-int yylex_destroy (void );
-
-int yyget_debug (void );
-
-void yyset_debug (int debug_flag );
-
-YY_EXTRA_TYPE yyget_extra (void );
-
-void yyset_extra (YY_EXTRA_TYPE user_defined );
-
-FILE *yyget_in (void );
-
-void yyset_in (FILE * in_str );
-
-FILE *yyget_out (void );
-
-void yyset_out (FILE * out_str );
-
-int yyget_leng (void );
-
-char *yyget_text (void );
-
-int yyget_lineno (void );
-
-void yyset_lineno (int line_number );
-
-/* Macros after this point can all be overridden by user definitions in
- * section 1.
- */
-
-#ifndef YY_SKIP_YYWRAP
-#ifdef __cplusplus
-extern "C" int yywrap (void );
-#else
-extern int yywrap (void );
-#endif
-#endif
-
- static void yyunput (int c,char *buf_ptr );
-
-#ifndef yytext_ptr
-static void yy_flex_strncpy (char *,yyconst char *,int );
-#endif
-
-#ifdef YY_NEED_STRLEN
-static int yy_flex_strlen (yyconst char * );
-#endif
-
-#ifndef YY_NO_INPUT
-
-#ifdef __cplusplus
-static int yyinput (void );
-#else
-static int input (void );
-#endif
-
-#endif
-
-/* Amount of stuff to slurp up with each read. */
-#ifndef YY_READ_BUF_SIZE
-#define YY_READ_BUF_SIZE 8192
-#endif
-
-/* Copy whatever the last rule matched to the standard output. */
-#ifndef ECHO
-/* This used to be an fputs(), but since the string might contain NUL's,
- * we now use fwrite().
- */
-#define ECHO fwrite( yytext, yyleng, 1, yyout )
-#endif
-
-/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
- * is returned in "result".
- */
-#ifndef YY_INPUT
-#define YY_INPUT(buf,result,max_size) \
- if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
- { \
- int c = '*'; \
- int n; \
- for ( n = 0; n < max_size && \
- (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
- buf[n] = (char) c; \
- if ( c == '\n' ) \
- buf[n++] = (char) c; \
- if ( c == EOF && ferror( yyin ) ) \
- YY_FATAL_ERROR( "input in flex scanner failed" ); \
- result = n; \
- } \
- else \
- { \
- errno=0; \
- while ( (result = fread(buf, 1, max_size, yyin))==0 && ferror(yyin)) \
- { \
- if( errno != EINTR) \
- { \
- YY_FATAL_ERROR( "input in flex scanner failed" ); \
- break; \
- } \
- errno=0; \
- clearerr(yyin); \
- } \
- }\
-\
-
-#endif
-
-/* No semi-colon after return; correct usage is to write "yyterminate();" -
- * we don't want an extra ';' after the "return" because that will cause
- * some compilers to complain about unreachable statements.
- */
-#ifndef yyterminate
-#define yyterminate() return YY_NULL
-#endif
-
-/* Number of entries by which start-condition stack grows. */
-#ifndef YY_START_STACK_INCR
-#define YY_START_STACK_INCR 25
-#endif
-
-/* Report a fatal error. */
-#ifndef YY_FATAL_ERROR
-#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )
-#endif
-
-/* end tables serialization structures and prototypes */
-
-/* Default declaration of generated scanner - a define so the user can
- * easily add parameters.
- */
-#ifndef YY_DECL
-#define YY_DECL_IS_OURS 1
-
-extern int yylex (void);
-
-#define YY_DECL int yylex (void)
-#endif /* !YY_DECL */
-
-/* Code executed at the beginning of each rule, after yytext and yyleng
- * have been set up.
- */
-#ifndef YY_USER_ACTION
-#define YY_USER_ACTION
-#endif
-
-/* Code executed at the end of each rule. */
-#ifndef YY_BREAK
-#define YY_BREAK break;
-#endif
-
-#define YY_RULE_SETUP \
- if ( yyleng > 0 ) \
- YY_CURRENT_BUFFER_LVALUE->yy_at_bol = \
- (yytext[yyleng - 1] == '\n'); \
- YY_USER_ACTION
-
-/** The main scanner function which does all the work.
- */
-YY_DECL
-{
- register yy_state_type yy_current_state;
- register char *yy_cp, *yy_bp;
- register int yy_act;
-
- /* Keep track of our location in the original source files. */
-
- if ( !(yy_init) )
- {
- (yy_init) = 1;
-
-#ifdef YY_USER_INIT
- YY_USER_INIT;
-#endif
-
- if ( ! (yy_start) )
- (yy_start) = 1; /* first start state */
-
- if ( ! yyin )
- yyin = stdin;
-
- if ( ! yyout )
- yyout = stdout;
-
- if ( ! YY_CURRENT_BUFFER ) {
- yyensure_buffer_stack ();
- YY_CURRENT_BUFFER_LVALUE =
- yy_create_buffer(yyin,YY_BUF_SIZE );
- }
-
- yy_load_buffer_state( );
- }
-
- while ( 1 ) /* loops until end-of-file is reached */
- {
- yy_cp = (yy_c_buf_p);
-
- /* Support of yytext. */
- *yy_cp = (yy_hold_char);
-
- /* yy_bp points to the position in yy_ch_buf of the start of
- * the current run.
- */
- yy_bp = yy_cp;
-
- yy_current_state = (yy_start);
- yy_current_state += YY_AT_BOL();
-yy_match:
- do
- {
- register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)];
- if ( yy_accept[yy_current_state] )
- {
- (yy_last_accepting_state) = yy_current_state;
- (yy_last_accepting_cpos) = yy_cp;
- }
- while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
- {
- yy_current_state = (int) yy_def[yy_current_state];
- if ( yy_current_state >= 73 )
- yy_c = yy_meta[(unsigned int) yy_c];
- }
- yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
- ++yy_cp;
- }
- while ( yy_base[yy_current_state] != 266 );
-
-yy_find_action:
- yy_act = yy_accept[yy_current_state];
- if ( yy_act == 0 )
- { /* have to back up */
- yy_cp = (yy_last_accepting_cpos);
- yy_current_state = (yy_last_accepting_state);
- yy_act = yy_accept[yy_current_state];
- }
-
- YY_DO_BEFORE_ACTION;
-
-do_action: /* This label is used only to access EOF actions. */
-
- switch ( yy_act )
- { /* beginning of action switch */
- case 0: /* must back up */
- /* undo the effects of YY_DO_BEFORE_ACTION */
- *yy_cp = (yy_hold_char);
- yy_cp = (yy_last_accepting_cpos);
- yy_current_state = (yy_last_accepting_state);
- goto yy_find_action;
-
-case 1:
-/* rule 1 can match eol */
-YY_RULE_SETUP
-return FILENAME;
- YY_BREAK
-case 2:
-/* rule 2 can match eol */
-YY_RULE_SETUP
-cur_line++;
- YY_BREAK
-case 3:
-/* rule 3 can match eol */
-YY_RULE_SETUP
-cur_line++;
- YY_BREAK
-/* Ignore all other whitespace. */
-case 4:
-YY_RULE_SETUP
-;
- YY_BREAK
-case 5:
-/* rule 5 can match eol */
-YY_RULE_SETUP
-return STRING;
- YY_BREAK
-case 6:
-/* rule 6 can match eol */
-YY_RULE_SETUP
-return CHAR;
- YY_BREAK
-case 7:
-YY_RULE_SETUP
-return IDENT;
- YY_BREAK
-/* The Pedant requires that the other C multi-character tokens be
- recognized as tokens. We don't actually use them since we don't
- parse expressions, but we do want whitespace to be arranged
- around them properly. */
-case 8:
-YY_RULE_SETUP
-return OTHER;
- YY_BREAK
-case 9:
-YY_RULE_SETUP
-return INT;
- YY_BREAK
-case 10:
-YY_RULE_SETUP
-return REAL;
- YY_BREAK
-case 11:
-YY_RULE_SETUP
-return DOTS;
- YY_BREAK
-/* All other tokens are single characters. */
-case 12:
-YY_RULE_SETUP
-return yytext[0];
- YY_BREAK
-case 13:
-YY_RULE_SETUP
-ECHO;
- YY_BREAK
-case YY_STATE_EOF(INITIAL):
- yyterminate();
-
- case YY_END_OF_BUFFER:
- {
- /* Amount of text matched not including the EOB char. */
- int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1;
-
- /* Undo the effects of YY_DO_BEFORE_ACTION. */
- *yy_cp = (yy_hold_char);
- YY_RESTORE_YY_MORE_OFFSET
-
- if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
- {
- /* We're scanning a new file or input source. It's
- * possible that this happened because the user
- * just pointed yyin at a new source and called
- * yylex(). If so, then we have to assure
- * consistency between YY_CURRENT_BUFFER and our
- * globals. Here is the right place to do so, because
- * this is the first action (other than possibly a
- * back-up) that will match for the new input source.
- */
- (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
- YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin;
- YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
- }
-
- /* Note that here we test for yy_c_buf_p "<=" to the position
- * of the first EOB in the buffer, since yy_c_buf_p will
- * already have been incremented past the NUL character
- * (since all states make transitions on EOB to the
- * end-of-buffer state). Contrast this with the test
- * in input().
- */
- if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
- { /* This was really a NUL. */
- yy_state_type yy_next_state;
-
- (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text;
-
- yy_current_state = yy_get_previous_state( );
-
- /* Okay, we're now positioned to make the NUL
- * transition. We couldn't have
- * yy_get_previous_state() go ahead and do it
- * for us because it doesn't know how to deal
- * with the possibility of jamming (and we don't
- * want to build jamming into it because then it
- * will run more slowly).
- */
-
- yy_next_state = yy_try_NUL_trans( yy_current_state );
-
- yy_bp = (yytext_ptr) + YY_MORE_ADJ;
-
- if ( yy_next_state )
- {
- /* Consume the NUL. */
- yy_cp = ++(yy_c_buf_p);
- yy_current_state = yy_next_state;
- goto yy_match;
- }
-
- else
- {
- yy_cp = (yy_c_buf_p);
- goto yy_find_action;
- }
- }
-
- else switch ( yy_get_next_buffer( ) )
- {
- case EOB_ACT_END_OF_FILE:
- {
- (yy_did_buffer_switch_on_eof) = 0;
-
- if ( yywrap( ) )
- {
- /* Note: because we've taken care in
- * yy_get_next_buffer() to have set up
- * yytext, we can now set up
- * yy_c_buf_p so that if some total
- * hoser (like flex itself) wants to
- * call the scanner after we return the
- * YY_NULL, it'll still work - another
- * YY_NULL will get returned.
- */
- (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ;
-
- yy_act = YY_STATE_EOF(YY_START);
- goto do_action;
- }
-
- else
- {
- if ( ! (yy_did_buffer_switch_on_eof) )
- YY_NEW_FILE;
- }
- break;
- }
-
- case EOB_ACT_CONTINUE_SCAN:
- (yy_c_buf_p) =
- (yytext_ptr) + yy_amount_of_matched_text;
-
- yy_current_state = yy_get_previous_state( );
-
- yy_cp = (yy_c_buf_p);
- yy_bp = (yytext_ptr) + YY_MORE_ADJ;
- goto yy_match;
-
- case EOB_ACT_LAST_MATCH:
- (yy_c_buf_p) =
- &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)];
-
- yy_current_state = yy_get_previous_state( );
-
- yy_cp = (yy_c_buf_p);
- yy_bp = (yytext_ptr) + YY_MORE_ADJ;
- goto yy_find_action;
- }
- break;
- }
-
- default:
- YY_FATAL_ERROR(
- "fatal flex scanner internal error--no action found" );
- } /* end of action switch */
- } /* end of scanning one token */
-} /* end of yylex */
-
-/* yy_get_next_buffer - try to read in a new buffer
- *
- * Returns a code representing an action:
- * EOB_ACT_LAST_MATCH -
- * EOB_ACT_CONTINUE_SCAN - continue scanning from current position
- * EOB_ACT_END_OF_FILE - end of file
- */
-static int yy_get_next_buffer (void)
-{
- register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
- register char *source = (yytext_ptr);
- register int number_to_move, i;
- int ret_val;
-
- if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )
- YY_FATAL_ERROR(
- "fatal flex scanner internal error--end of buffer missed" );
-
- if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
- { /* Don't try to fill the buffer, so this is an EOF. */
- if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 )
- {
- /* We matched a single character, the EOB, so
- * treat this as a final EOF.
- */
- return EOB_ACT_END_OF_FILE;
- }
-
- else
- {
- /* We matched some text prior to the EOB, first
- * process it.
- */
- return EOB_ACT_LAST_MATCH;
- }
- }
-
- /* Try to read more data. */
-
- /* First move last chars to start of buffer. */
- number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1;
-
- for ( i = 0; i < number_to_move; ++i )
- *(dest++) = *(source++);
-
- if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
- /* don't do the read, it's not guaranteed to return an EOF,
- * just force an EOF
- */
- YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0;
-
- else
- {
- int num_to_read =
- YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
-
- while ( num_to_read <= 0 )
- { /* Not enough room in the buffer - grow it. */
-
- /* just a shorter name for the current buffer */
- YY_BUFFER_STATE b = YY_CURRENT_BUFFER;
-
- int yy_c_buf_p_offset =
- (int) ((yy_c_buf_p) - b->yy_ch_buf);
-
- if ( b->yy_is_our_buffer )
- {
- int new_size = b->yy_buf_size * 2;
-
- if ( new_size <= 0 )
- b->yy_buf_size += b->yy_buf_size / 8;
- else
- b->yy_buf_size *= 2;
-
- b->yy_ch_buf = (char *)
- /* Include room in for 2 EOB chars. */
- yyrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 );
- }
- else
- /* Can't grow it, we don't own it. */
- b->yy_ch_buf = 0;
-
- if ( ! b->yy_ch_buf )
- YY_FATAL_ERROR(
- "fatal error - scanner input buffer overflow" );
-
- (yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset];
-
- num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
- number_to_move - 1;
-
- }
-
- if ( num_to_read > YY_READ_BUF_SIZE )
- num_to_read = YY_READ_BUF_SIZE;
-
- /* Read in more data. */
- YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
- (yy_n_chars), (size_t) num_to_read );
-
- YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
- }
-
- if ( (yy_n_chars) == 0 )
- {
- if ( number_to_move == YY_MORE_ADJ )
- {
- ret_val = EOB_ACT_END_OF_FILE;
- yyrestart(yyin );
- }
-
- else
- {
- ret_val = EOB_ACT_LAST_MATCH;
- YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
- YY_BUFFER_EOF_PENDING;
- }
- }
-
- else
- ret_val = EOB_ACT_CONTINUE_SCAN;
-
- if ((yy_size_t) ((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
- /* Extend the array by 50%, plus the number we really need. */
- yy_size_t new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1);
- YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size );
- if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
- YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" );
- }
-
- (yy_n_chars) += number_to_move;
- YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR;
- YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR;
-
- (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
-
- return ret_val;
-}
-
-/* yy_get_previous_state - get the state just before the EOB char was reached */
-
- static yy_state_type yy_get_previous_state (void)
-{
- register yy_state_type yy_current_state;
- register char *yy_cp;
-
- yy_current_state = (yy_start);
- yy_current_state += YY_AT_BOL();
-
- for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )
- {
- register YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1);
- if ( yy_accept[yy_current_state] )
- {
- (yy_last_accepting_state) = yy_current_state;
- (yy_last_accepting_cpos) = yy_cp;
- }
- while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
- {
- yy_current_state = (int) yy_def[yy_current_state];
- if ( yy_current_state >= 73 )
- yy_c = yy_meta[(unsigned int) yy_c];
- }
- yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
- }
-
- return yy_current_state;
-}
-
-/* yy_try_NUL_trans - try to make a transition on the NUL character
- *
- * synopsis
- * next_state = yy_try_NUL_trans( current_state );
- */
- static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state )
-{
- register int yy_is_jam;
- register char *yy_cp = (yy_c_buf_p);
-
- register YY_CHAR yy_c = 1;
- if ( yy_accept[yy_current_state] )
- {
- (yy_last_accepting_state) = yy_current_state;
- (yy_last_accepting_cpos) = yy_cp;
- }
- while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
- {
- yy_current_state = (int) yy_def[yy_current_state];
- if ( yy_current_state >= 73 )
- yy_c = yy_meta[(unsigned int) yy_c];
- }
- yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
- yy_is_jam = (yy_current_state == 72);
-
- return yy_is_jam ? 0 : yy_current_state;
-}
-
- static void yyunput (int c, register char * yy_bp )
-{
- register char *yy_cp;
-
- yy_cp = (yy_c_buf_p);
-
- /* undo effects of setting up yytext */
- *yy_cp = (yy_hold_char);
-
- if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
- { /* need to shift things up to make room */
- /* +2 for EOB chars. */
- register int number_to_move = (yy_n_chars) + 2;
- register char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[
- YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2];
- register char *source =
- &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move];
-
- while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
- *--dest = *--source;
-
- yy_cp += (int) (dest - source);
- yy_bp += (int) (dest - source);
- YY_CURRENT_BUFFER_LVALUE->yy_n_chars =
- (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_buf_size;
-
- if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
- YY_FATAL_ERROR( "flex scanner push-back overflow" );
- }
-
- *--yy_cp = (char) c;
-
- (yytext_ptr) = yy_bp;
- (yy_hold_char) = *yy_cp;
- (yy_c_buf_p) = yy_cp;
-}
-
-#ifndef YY_NO_INPUT
-#ifdef __cplusplus
- static int yyinput (void)
-#else
- static int input (void)
-#endif
-
-{
- int c;
-
- *(yy_c_buf_p) = (yy_hold_char);
-
- if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR )
- {
- /* yy_c_buf_p now points to the character we want to return.
- * If this occurs *before* the EOB characters, then it's a
- * valid NUL; if not, then we've hit the end of the buffer.
- */
- if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
- /* This was really a NUL. */
- *(yy_c_buf_p) = '\0';
-
- else
- { /* need more input */
- int offset = (yy_c_buf_p) - (yytext_ptr);
- ++(yy_c_buf_p);
-
- switch ( yy_get_next_buffer( ) )
- {
- case EOB_ACT_LAST_MATCH:
- /* This happens because yy_g_n_b()
- * sees that we've accumulated a
- * token and flags that we need to
- * try matching the token before
- * proceeding. But for input(),
- * there's no matching to consider.
- * So convert the EOB_ACT_LAST_MATCH
- * to EOB_ACT_END_OF_FILE.
- */
-
- /* Reset buffer status. */
- yyrestart(yyin );
-
- /*FALLTHROUGH*/
-
- case EOB_ACT_END_OF_FILE:
- {
- if ( yywrap( ) )
- return EOF;
-
- if ( ! (yy_did_buffer_switch_on_eof) )
- YY_NEW_FILE;
-#ifdef __cplusplus
- return yyinput();
-#else
- return input();
-#endif
- }
-
- case EOB_ACT_CONTINUE_SCAN:
- (yy_c_buf_p) = (yytext_ptr) + offset;
- break;
- }
- }
- }
-
- c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */
- *(yy_c_buf_p) = '\0'; /* preserve yytext */
- (yy_hold_char) = *++(yy_c_buf_p);
-
- YY_CURRENT_BUFFER_LVALUE->yy_at_bol = (c == '\n');
-
- return c;
-}
-#endif /* ifndef YY_NO_INPUT */
-
-/** Immediately switch to a different input stream.
- * @param input_file A readable stream.
- *
- * @note This function does not reset the start condition to @c INITIAL .
- */
- void yyrestart (FILE * input_file )
-{
-
- if ( ! YY_CURRENT_BUFFER ){
- yyensure_buffer_stack ();
- YY_CURRENT_BUFFER_LVALUE =
- yy_create_buffer(yyin,YY_BUF_SIZE );
- }
-
- yy_init_buffer(YY_CURRENT_BUFFER,input_file );
- yy_load_buffer_state( );
-}
-
-/** Switch to a different input buffer.
- * @param new_buffer The new input buffer.
- *
- */
- void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer )
-{
-
- /* TODO. We should be able to replace this entire function body
- * with
- * yypop_buffer_state();
- * yypush_buffer_state(new_buffer);
- */
- yyensure_buffer_stack ();
- if ( YY_CURRENT_BUFFER == new_buffer )
- return;
-
- if ( YY_CURRENT_BUFFER )
- {
- /* Flush out information for old buffer. */
- *(yy_c_buf_p) = (yy_hold_char);
- YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
- YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
- }
-
- YY_CURRENT_BUFFER_LVALUE = new_buffer;
- yy_load_buffer_state( );
-
- /* We don't actually know whether we did this switch during
- * EOF (yywrap()) processing, but the only time this flag
- * is looked at is after yywrap() is called, so it's safe
- * to go ahead and always set it.
- */
- (yy_did_buffer_switch_on_eof) = 1;
-}
-
-static void yy_load_buffer_state (void)
-{
- (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
- (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
- yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
- (yy_hold_char) = *(yy_c_buf_p);
-}
-
-/** Allocate and initialize an input buffer state.
- * @param file A readable stream.
- * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
- *
- * @return the allocated buffer state.
- */
- YY_BUFFER_STATE yy_create_buffer (FILE * file, int size )
-{
- YY_BUFFER_STATE b;
-
- b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state ) );
- if ( ! b )
- YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
-
- b->yy_buf_size = size;
-
- /* yy_ch_buf has to be 2 characters longer than the size given because
- * we need to put in 2 end-of-buffer characters.
- */
- b->yy_ch_buf = (char *) yyalloc(b->yy_buf_size + 2 );
- if ( ! b->yy_ch_buf )
- YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
-
- b->yy_is_our_buffer = 1;
-
- yy_init_buffer(b,file );
-
- return b;
-}
-
-/** Destroy the buffer.
- * @param b a buffer created with yy_create_buffer()
- *
- */
- void yy_delete_buffer (YY_BUFFER_STATE b )
-{
-
- if ( ! b )
- return;
-
- if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
- YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
-
- if ( b->yy_is_our_buffer )
- yyfree((void *) b->yy_ch_buf );
-
- yyfree((void *) b );
-}
-
-#ifndef __cplusplus
-extern int isatty (int );
-#endif /* __cplusplus */
-
-/* Initializes or reinitializes a buffer.
- * This function is sometimes called more than once on the same buffer,
- * such as during a yyrestart() or at EOF.
- */
- static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file )
-
-{
- int oerrno = errno;
-
- yy_flush_buffer(b );
-
- b->yy_input_file = file;
- b->yy_fill_buffer = 1;
-
- /* If b is the current buffer, then yy_init_buffer was _probably_
- * called from yyrestart() or through yy_get_next_buffer.
- * In that case, we don't want to reset the lineno or column.
- */
- if (b != YY_CURRENT_BUFFER){
- b->yy_bs_lineno = 1;
- b->yy_bs_column = 0;
- }
-
- b->yy_is_interactive = file ? (isatty( fileno(file) ) > 0) : 0;
-
- errno = oerrno;
-}
-
-/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
- * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
- *
- */
- void yy_flush_buffer (YY_BUFFER_STATE b )
-{
- if ( ! b )
- return;
-
- b->yy_n_chars = 0;
-
- /* We always need two end-of-buffer characters. The first causes
- * a transition to the end-of-buffer state. The second causes
- * a jam in that state.
- */
- b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
- b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
-
- b->yy_buf_pos = &b->yy_ch_buf[0];
-
- b->yy_at_bol = 1;
- b->yy_buffer_status = YY_BUFFER_NEW;
-
- if ( b == YY_CURRENT_BUFFER )
- yy_load_buffer_state( );
-}
-
-/** Pushes the new state onto the stack. The new state becomes
- * the current state. This function will allocate the stack
- * if necessary.
- * @param new_buffer The new state.
- *
- */
-void yypush_buffer_state (YY_BUFFER_STATE new_buffer )
-{
- if (new_buffer == NULL)
- return;
-
- yyensure_buffer_stack();
-
- /* This block is copied from yy_switch_to_buffer. */
- if ( YY_CURRENT_BUFFER )
- {
- /* Flush out information for old buffer. */
- *(yy_c_buf_p) = (yy_hold_char);
- YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
- YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
- }
-
- /* Only push if top exists. Otherwise, replace top. */
- if (YY_CURRENT_BUFFER)
- (yy_buffer_stack_top)++;
- YY_CURRENT_BUFFER_LVALUE = new_buffer;
-
- /* copied from yy_switch_to_buffer. */
- yy_load_buffer_state( );
- (yy_did_buffer_switch_on_eof) = 1;
-}
-
-/** Removes and deletes the top of the stack, if present.
- * The next element becomes the new top.
- *
- */
-void yypop_buffer_state (void)
-{
- if (!YY_CURRENT_BUFFER)
- return;
-
- yy_delete_buffer(YY_CURRENT_BUFFER );
- YY_CURRENT_BUFFER_LVALUE = NULL;
- if ((yy_buffer_stack_top) > 0)
- --(yy_buffer_stack_top);
-
- if (YY_CURRENT_BUFFER) {
- yy_load_buffer_state( );
- (yy_did_buffer_switch_on_eof) = 1;
- }
-}
-
-/* Allocates the stack if it does not exist.
- * Guarantees space for at least one push.
- */
-static void yyensure_buffer_stack (void)
-{
- int num_to_alloc;
-
- if (!(yy_buffer_stack)) {
-
- /* First allocation is just for 2 elements, since we don't know if this
- * scanner will even need a stack. We use 2 instead of 1 to avoid an
- * immediate realloc on the next call.
- */
- num_to_alloc = 1;
- (yy_buffer_stack) = (struct yy_buffer_state**)yyalloc
- (num_to_alloc * sizeof(struct yy_buffer_state*)
- );
- if ( ! (yy_buffer_stack) )
- YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
-
- memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*));
-
- (yy_buffer_stack_max) = num_to_alloc;
- (yy_buffer_stack_top) = 0;
- return;
- }
-
- if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){
-
- /* Increase the buffer to prepare for a possible push. */
- int grow_size = 8 /* arbitrary grow size */;
-
- num_to_alloc = (yy_buffer_stack_max) + grow_size;
- (yy_buffer_stack) = (struct yy_buffer_state**)yyrealloc
- ((yy_buffer_stack),
- num_to_alloc * sizeof(struct yy_buffer_state*)
- );
- if ( ! (yy_buffer_stack) )
- YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
-
- /* zero only the new slots.*/
- memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*));
- (yy_buffer_stack_max) = num_to_alloc;
- }
-}
-
-/** Setup the input buffer state to scan directly from a user-specified character buffer.
- * @param base the character buffer
- * @param size the size in bytes of the character buffer
- *
- * @return the newly allocated buffer state object.
- */
-YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size )
-{
- YY_BUFFER_STATE b;
-
- if ( size < 2 ||
- base[size-2] != YY_END_OF_BUFFER_CHAR ||
- base[size-1] != YY_END_OF_BUFFER_CHAR )
- /* They forgot to leave room for the EOB's. */
- return 0;
-
- b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state ) );
- if ( ! b )
- YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" );
-
- b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */
- b->yy_buf_pos = b->yy_ch_buf = base;
- b->yy_is_our_buffer = 0;
- b->yy_input_file = 0;
- b->yy_n_chars = b->yy_buf_size;
- b->yy_is_interactive = 0;
- b->yy_at_bol = 1;
- b->yy_fill_buffer = 0;
- b->yy_buffer_status = YY_BUFFER_NEW;
-
- yy_switch_to_buffer(b );
-
- return b;
-}
-
-/** Setup the input buffer state to scan a string. The next call to yylex() will
- * scan from a @e copy of @a str.
- * @param yystr a NUL-terminated string to scan
- *
- * @return the newly allocated buffer state object.
- * @note If you want to scan bytes that may contain NUL values, then use
- * yy_scan_bytes() instead.
- */
-YY_BUFFER_STATE yy_scan_string (yyconst char * yystr )
-{
-
- return yy_scan_bytes(yystr,strlen(yystr) );
-}
-
-/** Setup the input buffer state to scan the given bytes. The next call to yylex() will
- * scan from a @e copy of @a bytes.
- * @param bytes the byte buffer to scan
- * @param len the number of bytes in the buffer pointed to by @a bytes.
- *
- * @return the newly allocated buffer state object.
- */
-YY_BUFFER_STATE yy_scan_bytes (yyconst char * yybytes, int _yybytes_len )
-{
- YY_BUFFER_STATE b;
- char *buf;
- yy_size_t n;
- int i;
-
- /* Get memory for full buffer, including space for trailing EOB's. */
- n = _yybytes_len + 2;
- buf = (char *) yyalloc(n );
- if ( ! buf )
- YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" );
-
- for ( i = 0; i < _yybytes_len; ++i )
- buf[i] = yybytes[i];
-
- buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
-
- b = yy_scan_buffer(buf,n );
- if ( ! b )
- YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" );
-
- /* It's okay to grow etc. this buffer, and we should throw it
- * away when we're done.
- */
- b->yy_is_our_buffer = 1;
-
- return b;
-}
-
-#ifndef YY_EXIT_FAILURE
-#define YY_EXIT_FAILURE 2
-#endif
-
-static void yy_fatal_error (yyconst char* msg )
-{
- (void) fprintf( stderr, "%s\n", msg );
- exit( YY_EXIT_FAILURE );
-}
-
-/* Redefine yyless() so it works in section 3 code. */
-
-#undef yyless
-#define yyless(n) \
- do \
- { \
- /* Undo effects of setting up yytext. */ \
- int yyless_macro_arg = (n); \
- YY_LESS_LINENO(yyless_macro_arg);\
- yytext[yyleng] = (yy_hold_char); \
- (yy_c_buf_p) = yytext + yyless_macro_arg; \
- (yy_hold_char) = *(yy_c_buf_p); \
- *(yy_c_buf_p) = '\0'; \
- yyleng = yyless_macro_arg; \
- } \
- while ( 0 )
-
-/* Accessor methods (get/set functions) to struct members. */
-
-/** Get the current line number.
- *
- */
-int yyget_lineno (void)
-{
-
- return yylineno;
-}
-
-/** Get the input stream.
- *
- */
-FILE *yyget_in (void)
-{
- return yyin;
-}
-
-/** Get the output stream.
- *
- */
-FILE *yyget_out (void)
-{
- return yyout;
-}
-
-/** Get the length of the current token.
- *
- */
-int yyget_leng (void)
-{
- return yyleng;
-}
-
-/** Get the current token.
- *
- */
-
-char *yyget_text (void)
-{
- return yytext;
-}
-
-/** Set the current line number.
- * @param line_number
- *
- */
-void yyset_lineno (int line_number )
-{
-
- yylineno = line_number;
-}
-
-/** Set the input stream. This does not discard the current
- * input buffer.
- * @param in_str A readable stream.
- *
- * @see yy_switch_to_buffer
- */
-void yyset_in (FILE * in_str )
-{
- yyin = in_str ;
-}
-
-void yyset_out (FILE * out_str )
-{
- yyout = out_str ;
-}
-
-int yyget_debug (void)
-{
- return yy_flex_debug;
-}
-
-void yyset_debug (int bdebug )
-{
- yy_flex_debug = bdebug ;
-}
-
-static int yy_init_globals (void)
-{
- /* Initialization is the same as for the non-reentrant scanner.
- * This function is called from yylex_destroy(), so don't allocate here.
- */
-
- (yy_buffer_stack) = 0;
- (yy_buffer_stack_top) = 0;
- (yy_buffer_stack_max) = 0;
- (yy_c_buf_p) = (char *) 0;
- (yy_init) = 0;
- (yy_start) = 0;
-
-/* Defined in main.c */
-#ifdef YY_STDINIT
- yyin = stdin;
- yyout = stdout;
-#else
- yyin = (FILE *) 0;
- yyout = (FILE *) 0;
-#endif
-
- /* For future reference: Set errno on error, since we are called by
- * yylex_init()
- */
- return 0;
-}
-
-/* yylex_destroy is for both reentrant and non-reentrant scanners. */
-int yylex_destroy (void)
-{
-
- /* Pop the buffer stack, destroying each element. */
- while(YY_CURRENT_BUFFER){
- yy_delete_buffer(YY_CURRENT_BUFFER );
- YY_CURRENT_BUFFER_LVALUE = NULL;
- yypop_buffer_state();
- }
-
- /* Destroy the stack itself. */
- yyfree((yy_buffer_stack) );
- (yy_buffer_stack) = NULL;
-
- /* Reset the globals. This is important in a non-reentrant scanner so the next time
- * yylex() is called, initialization will occur. */
- yy_init_globals( );
-
- return 0;
-}
-
-/*
- * Internal utility routines.
- */
-
-#ifndef yytext_ptr
-static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
-{
- register int i;
- for ( i = 0; i < n; ++i )
- s1[i] = s2[i];
-}
-#endif
-
-#ifdef YY_NEED_STRLEN
-static int yy_flex_strlen (yyconst char * s )
-{
- register int n;
- for ( n = 0; s[n]; ++n )
- ;
-
- return n;
-}
-#endif
-
-void *yyalloc (yy_size_t size )
-{
- return (void *) malloc( size );
-}
-
-void *yyrealloc (void * ptr, yy_size_t size )
-{
- /* The cast to (char *) in the following accommodates both
- * implementations that use char* generic pointers, and those
- * that use void* generic pointers. It works with the latter
- * because both ANSI C and C++ allow castless assignment from
- * any pointer type to void*, and deal with argument conversions
- * as though doing an assignment.
- */
- return (void *) realloc( (char *) ptr, size );
-}
-
-void yyfree (void * ptr )
-{
- free( (char *) ptr ); /* see yyrealloc() for (char *) cast */
-}
-
-#define YYTABLES_NAME "yytables"
-
-/* Bring in the keyword recognizer. */
-
-#include "keywords.c"
-
-/* Macros to append to our phrase collection list. */
-
-/*
- * We mark any token, that that equals to a known enumerator, as
- * SYM_ENUM_CONST. The parser will change this for struct and union tags later,
- * the only problem is struct and union members:
- * enum e { a, b }; struct s { int a, b; }
- * but in this case, the only effect will be, that the ABI checksums become
- * more volatile, which is acceptable. Also, such collisions are quite rare,
- * so far it was only observed in include/linux/telephony.h.
- */
-#define _APP(T,L) do { \
- cur_node = next_node; \
- next_node = xmalloc(sizeof(*next_node)); \
- next_node->next = cur_node; \
- cur_node->string = memcpy(xmalloc(L+1), T, L+1); \
- cur_node->tag = \
- find_symbol(cur_node->string, SYM_ENUM_CONST, 1)?\
- SYM_ENUM_CONST : SYM_NORMAL ; \
- cur_node->in_source_file = in_source_file; \
- } while (0)
-
-#define APP _APP(yytext, yyleng)
-
-/* The second stage lexer. Here we incorporate knowledge of the state
- of the parser to tailor the tokens that are returned. */
-
-int
-yylex(void)
-{
- static enum {
- ST_NOTSTARTED, ST_NORMAL, ST_ATTRIBUTE, ST_ASM, ST_TYPEOF, ST_TYPEOF_1,
- ST_BRACKET, ST_BRACE, ST_EXPRESSION,
- ST_TABLE_1, ST_TABLE_2, ST_TABLE_3, ST_TABLE_4,
- ST_TABLE_5, ST_TABLE_6
- } lexstate = ST_NOTSTARTED;
-
- static int suppress_type_lookup, dont_want_brace_phrase;
- static struct string_list *next_node;
-
- int token, count = 0;
- struct string_list *cur_node;
-
- if (lexstate == ST_NOTSTARTED)
- {
- next_node = xmalloc(sizeof(*next_node));
- next_node->next = NULL;
- lexstate = ST_NORMAL;
- }
-
-repeat:
- token = yylex1();
-
- if (token == 0)
- return 0;
- else if (token == FILENAME)
- {
- char *file, *e;
-
- /* Save the filename and line number for later error messages. */
-
- if (cur_filename)
- free(cur_filename);
-
- file = strchr(yytext, '\"')+1;
- e = strchr(file, '\"');
- *e = '\0';
- cur_filename = memcpy(xmalloc(e-file+1), file, e-file+1);
- cur_line = atoi(yytext+2);
-
- if (!source_file) {
- source_file = xstrdup(cur_filename);
- in_source_file = 1;
- } else {
- in_source_file = (strcmp(cur_filename, source_file) == 0);
- }
-
- goto repeat;
- }
-
- switch (lexstate)
- {
- case ST_NORMAL:
- switch (token)
- {
- case IDENT:
- APP;
- {
- int r = is_reserved_word(yytext, yyleng);
- if (r >= 0)
- {
- switch (token = r)
- {
- case ATTRIBUTE_KEYW:
- lexstate = ST_ATTRIBUTE;
- count = 0;
- goto repeat;
- case ASM_KEYW:
- lexstate = ST_ASM;
- count = 0;
- goto repeat;
- case TYPEOF_KEYW:
- lexstate = ST_TYPEOF;
- count = 0;
- goto repeat;
-
- case STRUCT_KEYW:
- case UNION_KEYW:
- case ENUM_KEYW:
- dont_want_brace_phrase = 3;
- suppress_type_lookup = 2;
- goto fini;
-
- case EXPORT_SYMBOL_KEYW:
- goto fini;
- }
- }
- if (!suppress_type_lookup)
- {
- if (find_symbol(yytext, SYM_TYPEDEF, 1))
- token = TYPE;
- }
- }
- break;
-
- case '[':
- APP;
- lexstate = ST_BRACKET;
- count = 1;
- goto repeat;
-
- case '{':
- APP;
- if (dont_want_brace_phrase)
- break;
- lexstate = ST_BRACE;
- count = 1;
- goto repeat;
-
- case '=': case ':':
- APP;
- lexstate = ST_EXPRESSION;
- break;
-
- case DOTS:
- default:
- APP;
- break;
- }
- break;
-
- case ST_ATTRIBUTE:
- APP;
- switch (token)
- {
- case '(':
- ++count;
- goto repeat;
- case ')':
- if (--count == 0)
- {
- lexstate = ST_NORMAL;
- token = ATTRIBUTE_PHRASE;
- break;
- }
- goto repeat;
- default:
- goto repeat;
- }
- break;
-
- case ST_ASM:
- APP;
- switch (token)
- {
- case '(':
- ++count;
- goto repeat;
- case ')':
- if (--count == 0)
- {
- lexstate = ST_NORMAL;
- token = ASM_PHRASE;
- break;
- }
- goto repeat;
- default:
- goto repeat;
- }
- break;
-
- case ST_TYPEOF_1:
- if (token == IDENT)
- {
- if (is_reserved_word(yytext, yyleng) >= 0
- || find_symbol(yytext, SYM_TYPEDEF, 1))
- {
- yyless(0);
- unput('(');
- lexstate = ST_NORMAL;
- token = TYPEOF_KEYW;
- break;
- }
- _APP("(", 1);
- }
- lexstate = ST_TYPEOF;
- /* FALLTHRU */
-
- case ST_TYPEOF:
- switch (token)
- {
- case '(':
- if ( ++count == 1 )
- lexstate = ST_TYPEOF_1;
- else
- APP;
- goto repeat;
- case ')':
- APP;
- if (--count == 0)
- {
- lexstate = ST_NORMAL;
- token = TYPEOF_PHRASE;
- break;
- }
- goto repeat;
- default:
- APP;
- goto repeat;
- }
- break;
-
- case ST_BRACKET:
- APP;
- switch (token)
- {
- case '[':
- ++count;
- goto repeat;
- case ']':
- if (--count == 0)
- {
- lexstate = ST_NORMAL;
- token = BRACKET_PHRASE;
- break;
- }
- goto repeat;
- default:
- goto repeat;
- }
- break;
-
- case ST_BRACE:
- APP;
- switch (token)
- {
- case '{':
- ++count;
- goto repeat;
- case '}':
- if (--count == 0)
- {
- lexstate = ST_NORMAL;
- token = BRACE_PHRASE;
- break;
- }
- goto repeat;
- default:
- goto repeat;
- }
- break;
-
- case ST_EXPRESSION:
- switch (token)
- {
- case '(': case '[': case '{':
- ++count;
- APP;
- goto repeat;
- case '}':
- /* is this the last line of an enum declaration? */
- if (count == 0)
- {
- /* Put back the token we just read so's we can find it again
- after registering the expression. */
- unput(token);
-
- lexstate = ST_NORMAL;
- token = EXPRESSION_PHRASE;
- break;
- }
- /* FALLTHRU */
- case ')': case ']':
- --count;
- APP;
- goto repeat;
- case ',': case ';':
- if (count == 0)
- {
- /* Put back the token we just read so's we can find it again
- after registering the expression. */
- unput(token);
-
- lexstate = ST_NORMAL;
- token = EXPRESSION_PHRASE;
- break;
- }
- APP;
- goto repeat;
- default:
- APP;
- goto repeat;
- }
- break;
-
- case ST_TABLE_1:
- goto repeat;
-
- case ST_TABLE_2:
- if (token == IDENT && yyleng == 1 && yytext[0] == 'X')
- {
- token = EXPORT_SYMBOL_KEYW;
- lexstate = ST_TABLE_5;
- APP;
- break;
- }
- lexstate = ST_TABLE_6;
- /* FALLTHRU */
-
- case ST_TABLE_6:
- switch (token)
- {
- case '{': case '[': case '(':
- ++count;
- break;
- case '}': case ']': case ')':
- --count;
- break;
- case ',':
- if (count == 0)
- lexstate = ST_TABLE_2;
- break;
- };
- goto repeat;
-
- case ST_TABLE_3:
- goto repeat;
-
- case ST_TABLE_4:
- if (token == ';')
- lexstate = ST_NORMAL;
- goto repeat;
-
- case ST_TABLE_5:
- switch (token)
- {
- case ',':
- token = ';';
- lexstate = ST_TABLE_2;
- APP;
- break;
- default:
- APP;
- break;
- }
- break;
-
- default:
- exit(1);
- }
-fini:
-
- if (suppress_type_lookup > 0)
- --suppress_type_lookup;
- if (dont_want_brace_phrase > 0)
- --dont_want_brace_phrase;
-
- yylval = &next_node->next;
-
- return token;
-}
-
diff --git a/scripts/genksyms/parse.tab.c_shipped b/scripts/genksyms/parse.tab.c_shipped
deleted file mode 100644
index d02258bafe7b..000000000000
--- a/scripts/genksyms/parse.tab.c_shipped
+++ /dev/null
@@ -1,2394 +0,0 @@
-/* A Bison parser, made by GNU Bison 2.7. */
-
-/* Bison implementation for Yacc-like parsers in C
-
- Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-/* As a special exception, you may create a larger work that contains
- part or all of the Bison parser skeleton and distribute that work
- under terms of your choice, so long as that work isn't itself a
- parser generator using the skeleton or a modified version thereof
- as a parser skeleton. Alternatively, if you modify or redistribute
- the parser skeleton itself, you may (at your option) remove this
- special exception, which will cause the skeleton and the resulting
- Bison output files to be licensed under the GNU General Public
- License without this special exception.
-
- This special exception was added by the Free Software Foundation in
- version 2.2 of Bison. */
-
-/* C LALR(1) parser skeleton written by Richard Stallman, by
- simplifying the original so-called "semantic" parser. */
-
-/* All symbols defined below should begin with yy or YY, to avoid
- infringing on user name space. This should be done even for local
- variables, as they might otherwise be expanded by user macros.
- There are some unavoidable exceptions within include files to
- define necessary library symbols; they are noted "INFRINGES ON
- USER NAME SPACE" below. */
-
-/* Identify Bison output. */
-#define YYBISON 1
-
-/* Bison version. */
-#define YYBISON_VERSION "2.7"
-
-/* Skeleton name. */
-#define YYSKELETON_NAME "yacc.c"
-
-/* Pure parsers. */
-#define YYPURE 0
-
-/* Push parsers. */
-#define YYPUSH 0
-
-/* Pull parsers. */
-#define YYPULL 1
-
-
-
-
-/* Copy the first part of user declarations. */
-
-
-
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-#include "genksyms.h"
-
-static int is_typedef;
-static int is_extern;
-static char *current_name;
-static struct string_list *decl_spec;
-
-static void yyerror(const char *);
-
-static inline void
-remove_node(struct string_list **p)
-{
- struct string_list *node = *p;
- *p = node->next;
- free_node(node);
-}
-
-static inline void
-remove_list(struct string_list **pb, struct string_list **pe)
-{
- struct string_list *b = *pb, *e = *pe;
- *pb = e;
- free_list(b, e);
-}
-
-/* Record definition of a struct/union/enum */
-static void record_compound(struct string_list **keyw,
- struct string_list **ident,
- struct string_list **body,
- enum symbol_type type)
-{
- struct string_list *b = *body, *i = *ident, *r;
-
- if (i->in_source_file) {
- remove_node(keyw);
- (*ident)->tag = type;
- remove_list(body, ident);
- return;
- }
- r = copy_node(i); r->tag = type;
- r->next = (*keyw)->next; *body = r; (*keyw)->next = NULL;
- add_symbol(i->string, type, b, is_extern);
-}
-
-
-
-
-# ifndef YY_NULL
-# if defined __cplusplus && 201103L <= __cplusplus
-# define YY_NULL nullptr
-# else
-# define YY_NULL 0
-# endif
-# endif
-
-/* Enabling verbose error messages. */
-#ifdef YYERROR_VERBOSE
-# undef YYERROR_VERBOSE
-# define YYERROR_VERBOSE 1
-#else
-# define YYERROR_VERBOSE 0
-#endif
-
-
-/* Enabling traces. */
-#ifndef YYDEBUG
-# define YYDEBUG 1
-#endif
-#if YYDEBUG
-extern int yydebug;
-#endif
-
-/* Tokens. */
-#ifndef YYTOKENTYPE
-# define YYTOKENTYPE
- /* Put the tokens into the symbol table, so that GDB and other debuggers
- know about them. */
- enum yytokentype {
- ASM_KEYW = 258,
- ATTRIBUTE_KEYW = 259,
- AUTO_KEYW = 260,
- BOOL_KEYW = 261,
- CHAR_KEYW = 262,
- CONST_KEYW = 263,
- DOUBLE_KEYW = 264,
- ENUM_KEYW = 265,
- EXTERN_KEYW = 266,
- EXTENSION_KEYW = 267,
- FLOAT_KEYW = 268,
- INLINE_KEYW = 269,
- INT_KEYW = 270,
- LONG_KEYW = 271,
- REGISTER_KEYW = 272,
- RESTRICT_KEYW = 273,
- SHORT_KEYW = 274,
- SIGNED_KEYW = 275,
- STATIC_KEYW = 276,
- STRUCT_KEYW = 277,
- TYPEDEF_KEYW = 278,
- UNION_KEYW = 279,
- UNSIGNED_KEYW = 280,
- VOID_KEYW = 281,
- VOLATILE_KEYW = 282,
- TYPEOF_KEYW = 283,
- VA_LIST_KEYW = 284,
- EXPORT_SYMBOL_KEYW = 285,
- ASM_PHRASE = 286,
- ATTRIBUTE_PHRASE = 287,
- TYPEOF_PHRASE = 288,
- BRACE_PHRASE = 289,
- BRACKET_PHRASE = 290,
- EXPRESSION_PHRASE = 291,
- CHAR = 292,
- DOTS = 293,
- IDENT = 294,
- INT = 295,
- REAL = 296,
- STRING = 297,
- TYPE = 298,
- OTHER = 299,
- FILENAME = 300
- };
-#endif
-
-
-#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
-typedef int YYSTYPE;
-# define YYSTYPE_IS_TRIVIAL 1
-# define yystype YYSTYPE /* obsolescent; will be withdrawn */
-# define YYSTYPE_IS_DECLARED 1
-#endif
-
-extern YYSTYPE yylval;
-
-#ifdef YYPARSE_PARAM
-#if defined __STDC__ || defined __cplusplus
-int yyparse (void *YYPARSE_PARAM);
-#else
-int yyparse ();
-#endif
-#else /* ! YYPARSE_PARAM */
-#if defined __STDC__ || defined __cplusplus
-int yyparse (void);
-#else
-int yyparse ();
-#endif
-#endif /* ! YYPARSE_PARAM */
-
-
-
-/* Copy the second part of user declarations. */
-
-
-
-#ifdef short
-# undef short
-#endif
-
-#ifdef YYTYPE_UINT8
-typedef YYTYPE_UINT8 yytype_uint8;
-#else
-typedef unsigned char yytype_uint8;
-#endif
-
-#ifdef YYTYPE_INT8
-typedef YYTYPE_INT8 yytype_int8;
-#elif (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-typedef signed char yytype_int8;
-#else
-typedef short int yytype_int8;
-#endif
-
-#ifdef YYTYPE_UINT16
-typedef YYTYPE_UINT16 yytype_uint16;
-#else
-typedef unsigned short int yytype_uint16;
-#endif
-
-#ifdef YYTYPE_INT16
-typedef YYTYPE_INT16 yytype_int16;
-#else
-typedef short int yytype_int16;
-#endif
-
-#ifndef YYSIZE_T
-# ifdef __SIZE_TYPE__
-# define YYSIZE_T __SIZE_TYPE__
-# elif defined size_t
-# define YYSIZE_T size_t
-# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
-# define YYSIZE_T size_t
-# else
-# define YYSIZE_T unsigned int
-# endif
-#endif
-
-#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
-
-#ifndef YY_
-# if defined YYENABLE_NLS && YYENABLE_NLS
-# if ENABLE_NLS
-# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
-# define YY_(Msgid) dgettext ("bison-runtime", Msgid)
-# endif
-# endif
-# ifndef YY_
-# define YY_(Msgid) Msgid
-# endif
-#endif
-
-/* Suppress unused-variable warnings by "using" E. */
-#if ! defined lint || defined __GNUC__
-# define YYUSE(E) ((void) (E))
-#else
-# define YYUSE(E) /* empty */
-#endif
-
-/* Identity function, used to suppress warnings about constant conditions. */
-#ifndef lint
-# define YYID(N) (N)
-#else
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static int
-YYID (int yyi)
-#else
-static int
-YYID (yyi)
- int yyi;
-#endif
-{
- return yyi;
-}
-#endif
-
-#if ! defined yyoverflow || YYERROR_VERBOSE
-
-/* The parser invokes alloca or malloc; define the necessary symbols. */
-
-# ifdef YYSTACK_USE_ALLOCA
-# if YYSTACK_USE_ALLOCA
-# ifdef __GNUC__
-# define YYSTACK_ALLOC __builtin_alloca
-# elif defined __BUILTIN_VA_ARG_INCR
-# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
-# elif defined _AIX
-# define YYSTACK_ALLOC __alloca
-# elif defined _MSC_VER
-# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
-# define alloca _alloca
-# else
-# define YYSTACK_ALLOC alloca
-# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
- /* Use EXIT_SUCCESS as a witness for stdlib.h. */
-# ifndef EXIT_SUCCESS
-# define EXIT_SUCCESS 0
-# endif
-# endif
-# endif
-# endif
-# endif
-
-# ifdef YYSTACK_ALLOC
- /* Pacify GCC's `empty if-body' warning. */
-# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
-# ifndef YYSTACK_ALLOC_MAXIMUM
- /* The OS might guarantee only one guard page at the bottom of the stack,
- and a page size can be as small as 4096 bytes. So we cannot safely
- invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
- to allow for a few compiler-allocated temporary stack slots. */
-# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
-# endif
-# else
-# define YYSTACK_ALLOC YYMALLOC
-# define YYSTACK_FREE YYFREE
-# ifndef YYSTACK_ALLOC_MAXIMUM
-# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
-# endif
-# if (defined __cplusplus && ! defined EXIT_SUCCESS \
- && ! ((defined YYMALLOC || defined malloc) \
- && (defined YYFREE || defined free)))
-# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-# ifndef EXIT_SUCCESS
-# define EXIT_SUCCESS 0
-# endif
-# endif
-# ifndef YYMALLOC
-# define YYMALLOC malloc
-# if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
-# endif
-# endif
-# ifndef YYFREE
-# define YYFREE free
-# if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-void free (void *); /* INFRINGES ON USER NAME SPACE */
-# endif
-# endif
-# endif
-#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
-
-
-#if (! defined yyoverflow \
- && (! defined __cplusplus \
- || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
-
-/* A type that is properly aligned for any stack member. */
-union yyalloc
-{
- yytype_int16 yyss_alloc;
- YYSTYPE yyvs_alloc;
-};
-
-/* The size of the maximum gap between one aligned stack and the next. */
-# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
-
-/* The size of an array large to enough to hold all stacks, each with
- N elements. */
-# define YYSTACK_BYTES(N) \
- ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
- + YYSTACK_GAP_MAXIMUM)
-
-# define YYCOPY_NEEDED 1
-
-/* Relocate STACK from its old location to the new one. The
- local variables YYSIZE and YYSTACKSIZE give the old and new number of
- elements in the stack, and YYPTR gives the new location of the
- stack. Advance YYPTR to a properly aligned location for the next
- stack. */
-# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
- do \
- { \
- YYSIZE_T yynewbytes; \
- YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
- Stack = &yyptr->Stack_alloc; \
- yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
- yyptr += yynewbytes / sizeof (*yyptr); \
- } \
- while (YYID (0))
-
-#endif
-
-#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
-/* Copy COUNT objects from SRC to DST. The source and destination do
- not overlap. */
-# ifndef YYCOPY
-# if defined __GNUC__ && 1 < __GNUC__
-# define YYCOPY(Dst, Src, Count) \
- __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src)))
-# else
-# define YYCOPY(Dst, Src, Count) \
- do \
- { \
- YYSIZE_T yyi; \
- for (yyi = 0; yyi < (Count); yyi++) \
- (Dst)[yyi] = (Src)[yyi]; \
- } \
- while (YYID (0))
-# endif
-# endif
-#endif /* !YYCOPY_NEEDED */
-
-/* YYFINAL -- State number of the termination state. */
-#define YYFINAL 4
-/* YYLAST -- Last index in YYTABLE. */
-#define YYLAST 522
-
-/* YYNTOKENS -- Number of terminals. */
-#define YYNTOKENS 55
-/* YYNNTS -- Number of nonterminals. */
-#define YYNNTS 49
-/* YYNRULES -- Number of rules. */
-#define YYNRULES 133
-/* YYNRULES -- Number of states. */
-#define YYNSTATES 187
-
-/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
-#define YYUNDEFTOK 2
-#define YYMAXUTOK 300
-
-#define YYTRANSLATE(YYX) \
- ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
-
-/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
-static const yytype_uint8 yytranslate[] =
-{
- 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 49, 50, 51, 2, 48, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 54, 46,
- 2, 52, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 53, 2, 47, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
- 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
- 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
- 45
-};
-
-#if YYDEBUG
-/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
- YYRHS. */
-static const yytype_uint16 yyprhs[] =
-{
- 0, 0, 3, 5, 8, 9, 12, 13, 18, 19,
- 23, 25, 27, 29, 31, 34, 37, 41, 42, 44,
- 46, 50, 55, 56, 58, 60, 63, 65, 67, 69,
- 71, 73, 75, 77, 79, 81, 86, 88, 91, 94,
- 97, 101, 105, 109, 112, 115, 118, 120, 122, 124,
- 126, 128, 130, 132, 134, 136, 138, 140, 142, 145,
- 146, 148, 150, 153, 155, 157, 159, 161, 164, 166,
- 168, 170, 175, 180, 183, 187, 190, 192, 194, 196,
- 201, 206, 209, 213, 217, 220, 222, 226, 227, 229,
- 231, 235, 238, 241, 243, 244, 246, 248, 253, 258,
- 261, 265, 269, 273, 274, 276, 279, 283, 287, 288,
- 290, 292, 295, 299, 302, 303, 305, 307, 311, 314,
- 317, 319, 322, 323, 326, 330, 335, 337, 341, 343,
- 347, 350, 351, 353
-};
-
-/* YYRHS -- A `-1'-separated list of the rules' RHS. */
-static const yytype_int8 yyrhs[] =
-{
- 56, 0, -1, 57, -1, 56, 57, -1, -1, 58,
- 59, -1, -1, 12, 23, 60, 62, -1, -1, 23,
- 61, 62, -1, 62, -1, 86, -1, 101, -1, 103,
- -1, 1, 46, -1, 1, 47, -1, 66, 63, 46,
- -1, -1, 64, -1, 65, -1, 64, 48, 65, -1,
- 76, 102, 97, 87, -1, -1, 67, -1, 68, -1,
- 67, 68, -1, 69, -1, 70, -1, 5, -1, 17,
- -1, 21, -1, 11, -1, 14, -1, 71, -1, 75,
- -1, 28, 49, 83, 50, -1, 33, -1, 22, 39,
- -1, 24, 39, -1, 10, 39, -1, 22, 39, 89,
- -1, 24, 39, 89, -1, 10, 39, 98, -1, 10,
- 98, -1, 22, 89, -1, 24, 89, -1, 7, -1,
- 19, -1, 15, -1, 16, -1, 20, -1, 25, -1,
- 13, -1, 9, -1, 26, -1, 6, -1, 29, -1,
- 43, -1, 51, 73, -1, -1, 74, -1, 75, -1,
- 74, 75, -1, 8, -1, 27, -1, 32, -1, 18,
- -1, 72, 76, -1, 77, -1, 39, -1, 43, -1,
- 77, 49, 80, 50, -1, 77, 49, 1, 50, -1,
- 77, 35, -1, 49, 76, 50, -1, 72, 78, -1,
- 79, -1, 39, -1, 43, -1, 79, 49, 80, 50,
- -1, 79, 49, 1, 50, -1, 79, 35, -1, 49,
- 78, 50, -1, 49, 1, 50, -1, 81, 38, -1,
- 81, -1, 82, 48, 38, -1, -1, 82, -1, 83,
- -1, 82, 48, 83, -1, 67, 84, -1, 72, 84,
- -1, 85, -1, -1, 39, -1, 43, -1, 85, 49,
- 80, 50, -1, 85, 49, 1, 50, -1, 85, 35,
- -1, 49, 84, 50, -1, 49, 1, 50, -1, 66,
- 76, 34, -1, -1, 88, -1, 52, 36, -1, 53,
- 90, 47, -1, 53, 1, 47, -1, -1, 91, -1,
- 92, -1, 91, 92, -1, 66, 93, 46, -1, 1,
- 46, -1, -1, 94, -1, 95, -1, 94, 48, 95,
- -1, 78, 97, -1, 39, 96, -1, 96, -1, 54,
- 36, -1, -1, 97, 32, -1, 53, 99, 47, -1,
- 53, 99, 48, 47, -1, 100, -1, 99, 48, 100,
- -1, 39, -1, 39, 52, 36, -1, 31, 46, -1,
- -1, 31, -1, 30, 49, 39, 50, 46, -1
-};
-
-/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
-static const yytype_uint16 yyrline[] =
-{
- 0, 125, 125, 126, 130, 130, 136, 136, 138, 138,
- 140, 141, 142, 143, 144, 145, 149, 163, 164, 168,
- 176, 189, 195, 196, 200, 201, 205, 211, 215, 216,
- 217, 218, 219, 223, 224, 225, 226, 230, 232, 234,
- 238, 240, 242, 247, 250, 251, 255, 256, 257, 258,
- 259, 260, 261, 262, 263, 264, 265, 266, 270, 275,
- 276, 280, 281, 285, 285, 285, 286, 294, 295, 299,
- 308, 317, 319, 321, 323, 330, 331, 335, 336, 337,
- 339, 341, 343, 345, 350, 351, 352, 356, 357, 361,
- 362, 367, 372, 374, 378, 379, 387, 391, 393, 395,
- 397, 399, 404, 413, 414, 419, 424, 425, 429, 430,
- 434, 435, 439, 441, 446, 447, 451, 452, 456, 457,
- 458, 462, 466, 467, 471, 472, 476, 477, 480, 485,
- 493, 497, 498, 502
-};
-#endif
-
-#if YYDEBUG || YYERROR_VERBOSE || 0
-/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
- First, the terminals, then, starting at YYNTOKENS, nonterminals. */
-static const char *const yytname[] =
-{
- "$end", "error", "$undefined", "ASM_KEYW", "ATTRIBUTE_KEYW",
- "AUTO_KEYW", "BOOL_KEYW", "CHAR_KEYW", "CONST_KEYW", "DOUBLE_KEYW",
- "ENUM_KEYW", "EXTERN_KEYW", "EXTENSION_KEYW", "FLOAT_KEYW",
- "INLINE_KEYW", "INT_KEYW", "LONG_KEYW", "REGISTER_KEYW", "RESTRICT_KEYW",
- "SHORT_KEYW", "SIGNED_KEYW", "STATIC_KEYW", "STRUCT_KEYW",
- "TYPEDEF_KEYW", "UNION_KEYW", "UNSIGNED_KEYW", "VOID_KEYW",
- "VOLATILE_KEYW", "TYPEOF_KEYW", "VA_LIST_KEYW", "EXPORT_SYMBOL_KEYW",
- "ASM_PHRASE", "ATTRIBUTE_PHRASE", "TYPEOF_PHRASE", "BRACE_PHRASE",
- "BRACKET_PHRASE", "EXPRESSION_PHRASE", "CHAR", "DOTS", "IDENT", "INT",
- "REAL", "STRING", "TYPE", "OTHER", "FILENAME", "';'", "'}'", "','",
- "'('", "')'", "'*'", "'='", "'{'", "':'", "$accept", "declaration_seq",
- "declaration", "$@1", "declaration1", "$@2", "$@3", "simple_declaration",
- "init_declarator_list_opt", "init_declarator_list", "init_declarator",
- "decl_specifier_seq_opt", "decl_specifier_seq", "decl_specifier",
- "storage_class_specifier", "type_specifier", "simple_type_specifier",
- "ptr_operator", "cvar_qualifier_seq_opt", "cvar_qualifier_seq",
- "cvar_qualifier", "declarator", "direct_declarator", "nested_declarator",
- "direct_nested_declarator", "parameter_declaration_clause",
- "parameter_declaration_list_opt", "parameter_declaration_list",
- "parameter_declaration", "m_abstract_declarator",
- "direct_m_abstract_declarator", "function_definition", "initializer_opt",
- "initializer", "class_body", "member_specification_opt",
- "member_specification", "member_declaration",
- "member_declarator_list_opt", "member_declarator_list",
- "member_declarator", "member_bitfield_declarator", "attribute_opt",
- "enum_body", "enumerator_list", "enumerator", "asm_definition",
- "asm_phrase_opt", "export_definition", YY_NULL
-};
-#endif
-
-# ifdef YYPRINT
-/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
- token YYLEX-NUM. */
-static const yytype_uint16 yytoknum[] =
-{
- 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
- 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
- 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
- 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
- 295, 296, 297, 298, 299, 300, 59, 125, 44, 40,
- 41, 42, 61, 123, 58
-};
-# endif
-
-/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
-static const yytype_uint8 yyr1[] =
-{
- 0, 55, 56, 56, 58, 57, 60, 59, 61, 59,
- 59, 59, 59, 59, 59, 59, 62, 63, 63, 64,
- 64, 65, 66, 66, 67, 67, 68, 68, 69, 69,
- 69, 69, 69, 70, 70, 70, 70, 70, 70, 70,
- 70, 70, 70, 70, 70, 70, 71, 71, 71, 71,
- 71, 71, 71, 71, 71, 71, 71, 71, 72, 73,
- 73, 74, 74, 75, 75, 75, 75, 76, 76, 77,
- 77, 77, 77, 77, 77, 78, 78, 79, 79, 79,
- 79, 79, 79, 79, 80, 80, 80, 81, 81, 82,
- 82, 83, 84, 84, 85, 85, 85, 85, 85, 85,
- 85, 85, 86, 87, 87, 88, 89, 89, 90, 90,
- 91, 91, 92, 92, 93, 93, 94, 94, 95, 95,
- 95, 96, 97, 97, 98, 98, 99, 99, 100, 100,
- 101, 102, 102, 103
-};
-
-/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
-static const yytype_uint8 yyr2[] =
-{
- 0, 2, 1, 2, 0, 2, 0, 4, 0, 3,
- 1, 1, 1, 1, 2, 2, 3, 0, 1, 1,
- 3, 4, 0, 1, 1, 2, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 4, 1, 2, 2, 2,
- 3, 3, 3, 2, 2, 2, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 2, 0,
- 1, 1, 2, 1, 1, 1, 1, 2, 1, 1,
- 1, 4, 4, 2, 3, 2, 1, 1, 1, 4,
- 4, 2, 3, 3, 2, 1, 3, 0, 1, 1,
- 3, 2, 2, 1, 0, 1, 1, 4, 4, 2,
- 3, 3, 3, 0, 1, 2, 3, 3, 0, 1,
- 1, 2, 3, 2, 0, 1, 1, 3, 2, 2,
- 1, 2, 0, 2, 3, 4, 1, 3, 1, 3,
- 2, 0, 1, 5
-};
-
-/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM.
- Performed when YYTABLE doesn't specify something else to do. Zero
- means the default is an error. */
-static const yytype_uint8 yydefact[] =
-{
- 4, 4, 2, 0, 1, 3, 0, 28, 55, 46,
- 63, 53, 0, 31, 0, 52, 32, 48, 49, 29,
- 66, 47, 50, 30, 0, 8, 0, 51, 54, 64,
- 0, 56, 0, 0, 65, 36, 57, 5, 10, 17,
- 23, 24, 26, 27, 33, 34, 11, 12, 13, 14,
- 15, 39, 0, 43, 6, 37, 0, 44, 22, 38,
- 45, 0, 0, 130, 69, 70, 0, 59, 0, 18,
- 19, 0, 131, 68, 25, 42, 128, 0, 126, 22,
- 40, 0, 114, 0, 0, 110, 9, 17, 41, 94,
- 0, 0, 0, 58, 60, 61, 16, 0, 67, 132,
- 102, 122, 73, 0, 0, 124, 0, 7, 113, 107,
- 77, 78, 0, 0, 0, 122, 76, 0, 115, 116,
- 120, 106, 0, 111, 131, 95, 57, 0, 94, 91,
- 93, 35, 0, 74, 62, 20, 103, 0, 0, 85,
- 88, 89, 129, 125, 127, 119, 0, 77, 0, 121,
- 75, 118, 81, 0, 112, 0, 0, 96, 0, 92,
- 99, 0, 133, 123, 0, 21, 104, 72, 71, 84,
- 0, 83, 82, 0, 0, 117, 101, 100, 0, 0,
- 105, 86, 90, 80, 79, 98, 97
-};
-
-/* YYDEFGOTO[NTERM-NUM]. */
-static const yytype_int16 yydefgoto[] =
-{
- -1, 1, 2, 3, 37, 79, 58, 38, 68, 69,
- 70, 82, 40, 41, 42, 43, 44, 71, 93, 94,
- 45, 124, 73, 115, 116, 138, 139, 140, 141, 129,
- 130, 46, 165, 166, 57, 83, 84, 85, 117, 118,
- 119, 120, 136, 53, 77, 78, 47, 101, 48
-};
-
-/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
- STATE-NUM. */
-#define YYPACT_NINF -94
-static const yytype_int16 yypact[] =
-{
- -94, 15, -94, 208, -94, -94, 34, -94, -94, -94,
- -94, -94, -27, -94, -5, -94, -94, -94, -94, -94,
- -94, -94, -94, -94, -25, -94, -16, -94, -94, -94,
- -4, -94, 19, -24, -94, -94, -94, -94, -94, 24,
- 479, -94, -94, -94, -94, -94, -94, -94, -94, -94,
- -94, 29, 48, -94, -94, 37, 106, -94, 479, 37,
- -94, 479, 54, -94, -94, -94, 24, -2, 49, 53,
- -94, 24, -14, -11, -94, -94, 47, 38, -94, 479,
- -94, 51, 23, 55, 157, -94, -94, 24, -94, 393,
- 56, 58, 68, -94, -2, -94, -94, 24, -94, -94,
- -94, -94, -94, 255, 67, -94, 5, -94, -94, -94,
- 50, -94, 7, 69, 40, -94, -8, 83, 88, -94,
- -94, -94, 91, -94, 109, -94, -94, 4, 45, -94,
- 16, -94, 95, -94, -94, -94, -23, 92, 93, 108,
- 96, -94, -94, -94, -94, -94, 97, -94, 98, -94,
- -94, 118, -94, 301, -94, 23, 101, -94, 104, -94,
- -94, 347, -94, -94, 120, -94, -94, -94, -94, -94,
- 440, -94, -94, 111, 119, -94, -94, -94, 130, 137,
- -94, -94, -94, -94, -94, -94, -94
-};
-
-/* YYPGOTO[NTERM-NUM]. */
-static const yytype_int16 yypgoto[] =
-{
- -94, -94, 158, -94, -94, -94, -94, -45, -94, -94,
- 94, -1, -61, -29, -94, -94, -94, -79, -94, -94,
- -63, -7, -94, -93, -94, -92, -94, -94, -60, -57,
- -94, -94, -94, -94, -19, -94, -94, 110, -94, -94,
- 33, 82, 78, 144, -94, 99, -94, -94, -94
-};
-
-/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
- positive, shift that token. If negative, reduce the rule which
- number is the opposite. If YYTABLE_NINF, syntax error. */
-#define YYTABLE_NINF -110
-static const yytype_int16 yytable[] =
-{
- 89, 90, 39, 114, 95, 156, 10, 60, 146, 163,
- 128, 74, 51, 86, 55, 4, 20, 99, 54, 148,
- 100, 150, 63, 59, 102, 29, 52, 152, 56, 164,
- 34, 134, 72, 114, 107, 114, 80, 56, 103, -94,
- 88, 153, 89, 125, 76, 61, 147, 157, 128, 128,
- 111, 160, 143, 127, -94, 67, 112, 87, 67, 92,
- 74, 174, 110, 64, 98, 161, 111, 65, 62, 179,
- 158, 159, 112, 66, 67, 67, 114, 113, 87, 147,
- 49, 50, 52, 111, 125, 105, 106, 76, 157, 112,
- 56, 67, 89, 91, 127, 96, 67, 108, 109, 104,
- 89, 97, 121, 142, 113, 149, 131, 81, 132, 89,
- 182, 7, 8, 9, 10, 11, 12, 13, 133, 15,
- 16, 17, 18, 19, 20, 21, 22, 23, 24, 154,
- 26, 27, 28, 29, 30, 31, 155, 108, 34, 35,
- 99, 162, 167, 168, 170, -22, 169, 171, 172, 36,
- 163, 176, -22, -108, 177, -22, 180, -22, 122, 5,
- -22, 183, 7, 8, 9, 10, 11, 12, 13, 184,
- 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
- 185, 26, 27, 28, 29, 30, 31, 186, 175, 34,
- 35, 135, 145, 151, 123, 75, -22, 0, 0, 0,
- 36, 0, 0, -22, -109, 144, -22, 0, -22, 6,
- 0, -22, 0, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
- 34, 35, 0, 0, 0, 0, 0, -22, 0, 0,
- 0, 36, 0, 0, -22, 0, 137, -22, 0, -22,
- 7, 8, 9, 10, 11, 12, 13, 0, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24, 0, 26,
- 27, 28, 29, 30, 31, 0, 0, 34, 35, 0,
- 0, 0, 0, -87, 0, 0, 0, 0, 36, 0,
- 0, 0, 173, 0, 0, -87, 7, 8, 9, 10,
- 11, 12, 13, 0, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 24, 0, 26, 27, 28, 29, 30,
- 31, 0, 0, 34, 35, 0, 0, 0, 0, -87,
- 0, 0, 0, 0, 36, 0, 0, 0, 178, 0,
- 0, -87, 7, 8, 9, 10, 11, 12, 13, 0,
- 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
- 0, 26, 27, 28, 29, 30, 31, 0, 0, 34,
- 35, 0, 0, 0, 0, -87, 0, 0, 0, 0,
- 36, 0, 0, 0, 0, 0, 0, -87, 7, 8,
- 9, 10, 11, 12, 13, 0, 15, 16, 17, 18,
- 19, 20, 21, 22, 23, 24, 0, 26, 27, 28,
- 29, 30, 31, 0, 0, 34, 35, 0, 0, 0,
- 0, 0, 125, 0, 0, 0, 126, 0, 0, 0,
- 0, 0, 127, 0, 67, 7, 8, 9, 10, 11,
- 12, 13, 0, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 0, 26, 27, 28, 29, 30, 31,
- 0, 0, 34, 35, 0, 0, 0, 0, 181, 0,
- 0, 0, 0, 36, 7, 8, 9, 10, 11, 12,
- 13, 0, 15, 16, 17, 18, 19, 20, 21, 22,
- 23, 24, 0, 26, 27, 28, 29, 30, 31, 0,
- 0, 34, 35, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 36
-};
-
-#define yypact_value_is_default(Yystate) \
- (!!((Yystate) == (-94)))
-
-#define yytable_value_is_error(Yytable_value) \
- YYID (0)
-
-static const yytype_int16 yycheck[] =
-{
- 61, 61, 3, 82, 67, 1, 8, 26, 1, 32,
- 89, 40, 39, 58, 39, 0, 18, 31, 23, 112,
- 34, 114, 46, 39, 35, 27, 53, 35, 53, 52,
- 32, 94, 39, 112, 79, 114, 55, 53, 49, 35,
- 59, 49, 103, 39, 39, 49, 39, 43, 127, 128,
- 43, 35, 47, 49, 50, 51, 49, 58, 51, 66,
- 89, 153, 39, 39, 71, 49, 43, 43, 49, 161,
- 127, 128, 49, 49, 51, 51, 155, 54, 79, 39,
- 46, 47, 53, 43, 39, 47, 48, 39, 43, 49,
- 53, 51, 153, 39, 49, 46, 51, 46, 47, 52,
- 161, 48, 47, 36, 54, 36, 50, 1, 50, 170,
- 170, 5, 6, 7, 8, 9, 10, 11, 50, 13,
- 14, 15, 16, 17, 18, 19, 20, 21, 22, 46,
- 24, 25, 26, 27, 28, 29, 48, 46, 32, 33,
- 31, 46, 50, 50, 48, 39, 38, 50, 50, 43,
- 32, 50, 46, 47, 50, 49, 36, 51, 1, 1,
- 54, 50, 5, 6, 7, 8, 9, 10, 11, 50,
- 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
- 50, 24, 25, 26, 27, 28, 29, 50, 155, 32,
- 33, 97, 110, 115, 84, 51, 39, -1, -1, -1,
- 43, -1, -1, 46, 47, 106, 49, -1, 51, 1,
- -1, 54, -1, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, -1, -1, -1, -1, -1, 39, -1, -1,
- -1, 43, -1, -1, 46, -1, 1, 49, -1, 51,
- 5, 6, 7, 8, 9, 10, 11, -1, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, -1, 24,
- 25, 26, 27, 28, 29, -1, -1, 32, 33, -1,
- -1, -1, -1, 38, -1, -1, -1, -1, 43, -1,
- -1, -1, 1, -1, -1, 50, 5, 6, 7, 8,
- 9, 10, 11, -1, 13, 14, 15, 16, 17, 18,
- 19, 20, 21, 22, -1, 24, 25, 26, 27, 28,
- 29, -1, -1, 32, 33, -1, -1, -1, -1, 38,
- -1, -1, -1, -1, 43, -1, -1, -1, 1, -1,
- -1, 50, 5, 6, 7, 8, 9, 10, 11, -1,
- 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
- -1, 24, 25, 26, 27, 28, 29, -1, -1, 32,
- 33, -1, -1, -1, -1, 38, -1, -1, -1, -1,
- 43, -1, -1, -1, -1, -1, -1, 50, 5, 6,
- 7, 8, 9, 10, 11, -1, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, -1, 24, 25, 26,
- 27, 28, 29, -1, -1, 32, 33, -1, -1, -1,
- -1, -1, 39, -1, -1, -1, 43, -1, -1, -1,
- -1, -1, 49, -1, 51, 5, 6, 7, 8, 9,
- 10, 11, -1, 13, 14, 15, 16, 17, 18, 19,
- 20, 21, 22, -1, 24, 25, 26, 27, 28, 29,
- -1, -1, 32, 33, -1, -1, -1, -1, 38, -1,
- -1, -1, -1, 43, 5, 6, 7, 8, 9, 10,
- 11, -1, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, -1, 24, 25, 26, 27, 28, 29, -1,
- -1, 32, 33, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 43
-};
-
-/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
- symbol of state STATE-NUM. */
-static const yytype_uint8 yystos[] =
-{
- 0, 56, 57, 58, 0, 57, 1, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
- 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
- 28, 29, 30, 31, 32, 33, 43, 59, 62, 66,
- 67, 68, 69, 70, 71, 75, 86, 101, 103, 46,
- 47, 39, 53, 98, 23, 39, 53, 89, 61, 39,
- 89, 49, 49, 46, 39, 43, 49, 51, 63, 64,
- 65, 72, 76, 77, 68, 98, 39, 99, 100, 60,
- 89, 1, 66, 90, 91, 92, 62, 66, 89, 67,
- 83, 39, 76, 73, 74, 75, 46, 48, 76, 31,
- 34, 102, 35, 49, 52, 47, 48, 62, 46, 47,
- 39, 43, 49, 54, 72, 78, 79, 93, 94, 95,
- 96, 47, 1, 92, 76, 39, 43, 49, 72, 84,
- 85, 50, 50, 50, 75, 65, 97, 1, 80, 81,
- 82, 83, 36, 47, 100, 96, 1, 39, 78, 36,
- 78, 97, 35, 49, 46, 48, 1, 43, 84, 84,
- 35, 49, 46, 32, 52, 87, 88, 50, 50, 38,
- 48, 50, 50, 1, 80, 95, 50, 50, 1, 80,
- 36, 38, 83, 50, 50, 50, 50
-};
-
-#define yyerrok (yyerrstatus = 0)
-#define yyclearin (yychar = YYEMPTY)
-#define YYEMPTY (-2)
-#define YYEOF 0
-
-#define YYACCEPT goto yyacceptlab
-#define YYABORT goto yyabortlab
-#define YYERROR goto yyerrorlab
-
-
-/* Like YYERROR except do call yyerror. This remains here temporarily
- to ease the transition to the new meaning of YYERROR, for GCC.
- Once GCC version 2 has supplanted version 1, this can go. However,
- YYFAIL appears to be in use. Nevertheless, it is formally deprecated
- in Bison 2.4.2's NEWS entry, where a plan to phase it out is
- discussed. */
-
-#define YYFAIL goto yyerrlab
-#if defined YYFAIL
- /* This is here to suppress warnings from the GCC cpp's
- -Wunused-macros. Normally we don't worry about that warning, but
- some users do, and we want to make it easy for users to remove
- YYFAIL uses, which will produce warnings from Bison 2.5. */
-#endif
-
-#define YYRECOVERING() (!!yyerrstatus)
-
-#define YYBACKUP(Token, Value) \
-do \
- if (yychar == YYEMPTY) \
- { \
- yychar = (Token); \
- yylval = (Value); \
- YYPOPSTACK (yylen); \
- yystate = *yyssp; \
- goto yybackup; \
- } \
- else \
- { \
- yyerror (YY_("syntax error: cannot back up")); \
- YYERROR; \
- } \
-while (YYID (0))
-
-/* Error token number */
-#define YYTERROR 1
-#define YYERRCODE 256
-
-
-/* This macro is provided for backward compatibility. */
-#ifndef YY_LOCATION_PRINT
-# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
-#endif
-
-
-/* YYLEX -- calling `yylex' with the right arguments. */
-#ifdef YYLEX_PARAM
-# define YYLEX yylex (YYLEX_PARAM)
-#else
-# define YYLEX yylex ()
-#endif
-
-/* Enable debugging if requested. */
-#if YYDEBUG
-
-# ifndef YYFPRINTF
-# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
-# define YYFPRINTF fprintf
-# endif
-
-# define YYDPRINTF(Args) \
-do { \
- if (yydebug) \
- YYFPRINTF Args; \
-} while (YYID (0))
-
-# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
-do { \
- if (yydebug) \
- { \
- YYFPRINTF (stderr, "%s ", Title); \
- yy_symbol_print (stderr, \
- Type, Value); \
- YYFPRINTF (stderr, "\n"); \
- } \
-} while (YYID (0))
-
-
-/*--------------------------------.
-| Print this symbol on YYOUTPUT. |
-`--------------------------------*/
-
-/*ARGSUSED*/
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static void
-yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
-#else
-static void
-yy_symbol_value_print (yyoutput, yytype, yyvaluep)
- FILE *yyoutput;
- int yytype;
- YYSTYPE const * const yyvaluep;
-#endif
-{
- FILE *yyo = yyoutput;
- YYUSE (yyo);
- if (!yyvaluep)
- return;
-# ifdef YYPRINT
- if (yytype < YYNTOKENS)
- YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
-# else
- YYUSE (yyoutput);
-# endif
- switch (yytype)
- {
- default:
- break;
- }
-}
-
-
-/*--------------------------------.
-| Print this symbol on YYOUTPUT. |
-`--------------------------------*/
-
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static void
-yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
-#else
-static void
-yy_symbol_print (yyoutput, yytype, yyvaluep)
- FILE *yyoutput;
- int yytype;
- YYSTYPE const * const yyvaluep;
-#endif
-{
- if (yytype < YYNTOKENS)
- YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
- else
- YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
-
- yy_symbol_value_print (yyoutput, yytype, yyvaluep);
- YYFPRINTF (yyoutput, ")");
-}
-
-/*------------------------------------------------------------------.
-| yy_stack_print -- Print the state stack from its BOTTOM up to its |
-| TOP (included). |
-`------------------------------------------------------------------*/
-
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static void
-yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
-#else
-static void
-yy_stack_print (yybottom, yytop)
- yytype_int16 *yybottom;
- yytype_int16 *yytop;
-#endif
-{
- YYFPRINTF (stderr, "Stack now");
- for (; yybottom <= yytop; yybottom++)
- {
- int yybot = *yybottom;
- YYFPRINTF (stderr, " %d", yybot);
- }
- YYFPRINTF (stderr, "\n");
-}
-
-# define YY_STACK_PRINT(Bottom, Top) \
-do { \
- if (yydebug) \
- yy_stack_print ((Bottom), (Top)); \
-} while (YYID (0))
-
-
-/*------------------------------------------------.
-| Report that the YYRULE is going to be reduced. |
-`------------------------------------------------*/
-
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static void
-yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
-#else
-static void
-yy_reduce_print (yyvsp, yyrule)
- YYSTYPE *yyvsp;
- int yyrule;
-#endif
-{
- int yynrhs = yyr2[yyrule];
- int yyi;
- unsigned long int yylno = yyrline[yyrule];
- YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
- yyrule - 1, yylno);
- /* The symbols being reduced. */
- for (yyi = 0; yyi < yynrhs; yyi++)
- {
- YYFPRINTF (stderr, " $%d = ", yyi + 1);
- yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
- &(yyvsp[(yyi + 1) - (yynrhs)])
- );
- YYFPRINTF (stderr, "\n");
- }
-}
-
-# define YY_REDUCE_PRINT(Rule) \
-do { \
- if (yydebug) \
- yy_reduce_print (yyvsp, Rule); \
-} while (YYID (0))
-
-/* Nonzero means print parse trace. It is left uninitialized so that
- multiple parsers can coexist. */
-int yydebug;
-#else /* !YYDEBUG */
-# define YYDPRINTF(Args)
-# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
-# define YY_STACK_PRINT(Bottom, Top)
-# define YY_REDUCE_PRINT(Rule)
-#endif /* !YYDEBUG */
-
-
-/* YYINITDEPTH -- initial size of the parser's stacks. */
-#ifndef YYINITDEPTH
-# define YYINITDEPTH 200
-#endif
-
-/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
- if the built-in stack extension method is used).
-
- Do not make this value too large; the results are undefined if
- YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
- evaluated with infinite-precision integer arithmetic. */
-
-#ifndef YYMAXDEPTH
-# define YYMAXDEPTH 10000
-#endif
-
-
-#if YYERROR_VERBOSE
-
-# ifndef yystrlen
-# if defined __GLIBC__ && defined _STRING_H
-# define yystrlen strlen
-# else
-/* Return the length of YYSTR. */
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static YYSIZE_T
-yystrlen (const char *yystr)
-#else
-static YYSIZE_T
-yystrlen (yystr)
- const char *yystr;
-#endif
-{
- YYSIZE_T yylen;
- for (yylen = 0; yystr[yylen]; yylen++)
- continue;
- return yylen;
-}
-# endif
-# endif
-
-# ifndef yystpcpy
-# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
-# define yystpcpy stpcpy
-# else
-/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
- YYDEST. */
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static char *
-yystpcpy (char *yydest, const char *yysrc)
-#else
-static char *
-yystpcpy (yydest, yysrc)
- char *yydest;
- const char *yysrc;
-#endif
-{
- char *yyd = yydest;
- const char *yys = yysrc;
-
- while ((*yyd++ = *yys++) != '\0')
- continue;
-
- return yyd - 1;
-}
-# endif
-# endif
-
-# ifndef yytnamerr
-/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
- quotes and backslashes, so that it's suitable for yyerror. The
- heuristic is that double-quoting is unnecessary unless the string
- contains an apostrophe, a comma, or backslash (other than
- backslash-backslash). YYSTR is taken from yytname. If YYRES is
- null, do not copy; instead, return the length of what the result
- would have been. */
-static YYSIZE_T
-yytnamerr (char *yyres, const char *yystr)
-{
- if (*yystr == '"')
- {
- YYSIZE_T yyn = 0;
- char const *yyp = yystr;
-
- for (;;)
- switch (*++yyp)
- {
- case '\'':
- case ',':
- goto do_not_strip_quotes;
-
- case '\\':
- if (*++yyp != '\\')
- goto do_not_strip_quotes;
- /* Fall through. */
- default:
- if (yyres)
- yyres[yyn] = *yyp;
- yyn++;
- break;
-
- case '"':
- if (yyres)
- yyres[yyn] = '\0';
- return yyn;
- }
- do_not_strip_quotes: ;
- }
-
- if (! yyres)
- return yystrlen (yystr);
-
- return yystpcpy (yyres, yystr) - yyres;
-}
-# endif
-
-/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
- about the unexpected token YYTOKEN for the state stack whose top is
- YYSSP.
-
- Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is
- not large enough to hold the message. In that case, also set
- *YYMSG_ALLOC to the required number of bytes. Return 2 if the
- required number of bytes is too large to store. */
-static int
-yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
- yytype_int16 *yyssp, int yytoken)
-{
- YYSIZE_T yysize0 = yytnamerr (YY_NULL, yytname[yytoken]);
- YYSIZE_T yysize = yysize0;
- enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
- /* Internationalized format string. */
- const char *yyformat = YY_NULL;
- /* Arguments of yyformat. */
- char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
- /* Number of reported tokens (one for the "unexpected", one per
- "expected"). */
- int yycount = 0;
-
- /* There are many possibilities here to consider:
- - Assume YYFAIL is not used. It's too flawed to consider. See
- <http://lists.gnu.org/archive/html/bison-patches/2009-12/msg00024.html>
- for details. YYERROR is fine as it does not invoke this
- function.
- - If this state is a consistent state with a default action, then
- the only way this function was invoked is if the default action
- is an error action. In that case, don't check for expected
- tokens because there are none.
- - The only way there can be no lookahead present (in yychar) is if
- this state is a consistent state with a default action. Thus,
- detecting the absence of a lookahead is sufficient to determine
- that there is no unexpected or expected token to report. In that
- case, just report a simple "syntax error".
- - Don't assume there isn't a lookahead just because this state is a
- consistent state with a default action. There might have been a
- previous inconsistent state, consistent state with a non-default
- action, or user semantic action that manipulated yychar.
- - Of course, the expected token list depends on states to have
- correct lookahead information, and it depends on the parser not
- to perform extra reductions after fetching a lookahead from the
- scanner and before detecting a syntax error. Thus, state merging
- (from LALR or IELR) and default reductions corrupt the expected
- token list. However, the list is correct for canonical LR with
- one exception: it will still contain any token that will not be
- accepted due to an error action in a later state.
- */
- if (yytoken != YYEMPTY)
- {
- int yyn = yypact[*yyssp];
- yyarg[yycount++] = yytname[yytoken];
- if (!yypact_value_is_default (yyn))
- {
- /* Start YYX at -YYN if negative to avoid negative indexes in
- YYCHECK. In other words, skip the first -YYN actions for
- this state because they are default actions. */
- int yyxbegin = yyn < 0 ? -yyn : 0;
- /* Stay within bounds of both yycheck and yytname. */
- int yychecklim = YYLAST - yyn + 1;
- int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
- int yyx;
-
- for (yyx = yyxbegin; yyx < yyxend; ++yyx)
- if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
- && !yytable_value_is_error (yytable[yyx + yyn]))
- {
- if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
- {
- yycount = 1;
- yysize = yysize0;
- break;
- }
- yyarg[yycount++] = yytname[yyx];
- {
- YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULL, yytname[yyx]);
- if (! (yysize <= yysize1
- && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
- return 2;
- yysize = yysize1;
- }
- }
- }
- }
-
- switch (yycount)
- {
-# define YYCASE_(N, S) \
- case N: \
- yyformat = S; \
- break
- YYCASE_(0, YY_("syntax error"));
- YYCASE_(1, YY_("syntax error, unexpected %s"));
- YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
- YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
- YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
- YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
-# undef YYCASE_
- }
-
- {
- YYSIZE_T yysize1 = yysize + yystrlen (yyformat);
- if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
- return 2;
- yysize = yysize1;
- }
-
- if (*yymsg_alloc < yysize)
- {
- *yymsg_alloc = 2 * yysize;
- if (! (yysize <= *yymsg_alloc
- && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
- *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
- return 1;
- }
-
- /* Avoid sprintf, as that infringes on the user's name space.
- Don't have undefined behavior even if the translation
- produced a string with the wrong number of "%s"s. */
- {
- char *yyp = *yymsg;
- int yyi = 0;
- while ((*yyp = *yyformat) != '\0')
- if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
- {
- yyp += yytnamerr (yyp, yyarg[yyi++]);
- yyformat += 2;
- }
- else
- {
- yyp++;
- yyformat++;
- }
- }
- return 0;
-}
-#endif /* YYERROR_VERBOSE */
-
-/*-----------------------------------------------.
-| Release the memory associated to this symbol. |
-`-----------------------------------------------*/
-
-/*ARGSUSED*/
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static void
-yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
-#else
-static void
-yydestruct (yymsg, yytype, yyvaluep)
- const char *yymsg;
- int yytype;
- YYSTYPE *yyvaluep;
-#endif
-{
- YYUSE (yyvaluep);
-
- if (!yymsg)
- yymsg = "Deleting";
- YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
-
- switch (yytype)
- {
-
- default:
- break;
- }
-}
-
-
-
-
-/* The lookahead symbol. */
-int yychar;
-
-
-#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
-# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
-# define YY_IGNORE_MAYBE_UNINITIALIZED_END
-#endif
-#ifndef YY_INITIAL_VALUE
-# define YY_INITIAL_VALUE(Value) /* Nothing. */
-#endif
-
-/* The semantic value of the lookahead symbol. */
-YYSTYPE yylval YY_INITIAL_VALUE(yyval_default);
-
-/* Number of syntax errors so far. */
-int yynerrs;
-
-
-/*----------.
-| yyparse. |
-`----------*/
-
-#ifdef YYPARSE_PARAM
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-int
-yyparse (void *YYPARSE_PARAM)
-#else
-int
-yyparse (YYPARSE_PARAM)
- void *YYPARSE_PARAM;
-#endif
-#else /* ! YYPARSE_PARAM */
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-int
-yyparse (void)
-#else
-int
-yyparse ()
-
-#endif
-#endif
-{
- int yystate;
- /* Number of tokens to shift before error messages enabled. */
- int yyerrstatus;
-
- /* The stacks and their tools:
- `yyss': related to states.
- `yyvs': related to semantic values.
-
- Refer to the stacks through separate pointers, to allow yyoverflow
- to reallocate them elsewhere. */
-
- /* The state stack. */
- yytype_int16 yyssa[YYINITDEPTH];
- yytype_int16 *yyss;
- yytype_int16 *yyssp;
-
- /* The semantic value stack. */
- YYSTYPE yyvsa[YYINITDEPTH];
- YYSTYPE *yyvs;
- YYSTYPE *yyvsp;
-
- YYSIZE_T yystacksize;
-
- int yyn;
- int yyresult;
- /* Lookahead token as an internal (translated) token number. */
- int yytoken = 0;
- /* The variables used to return semantic value and location from the
- action routines. */
- YYSTYPE yyval;
-
-#if YYERROR_VERBOSE
- /* Buffer for error messages, and its allocated size. */
- char yymsgbuf[128];
- char *yymsg = yymsgbuf;
- YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
-#endif
-
-#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
-
- /* The number of symbols on the RHS of the reduced rule.
- Keep to zero when no symbol should be popped. */
- int yylen = 0;
-
- yyssp = yyss = yyssa;
- yyvsp = yyvs = yyvsa;
- yystacksize = YYINITDEPTH;
-
- YYDPRINTF ((stderr, "Starting parse\n"));
-
- yystate = 0;
- yyerrstatus = 0;
- yynerrs = 0;
- yychar = YYEMPTY; /* Cause a token to be read. */
- goto yysetstate;
-
-/*------------------------------------------------------------.
-| yynewstate -- Push a new state, which is found in yystate. |
-`------------------------------------------------------------*/
- yynewstate:
- /* In all cases, when you get here, the value and location stacks
- have just been pushed. So pushing a state here evens the stacks. */
- yyssp++;
-
- yysetstate:
- *yyssp = yystate;
-
- if (yyss + yystacksize - 1 <= yyssp)
- {
- /* Get the current used size of the three stacks, in elements. */
- YYSIZE_T yysize = yyssp - yyss + 1;
-
-#ifdef yyoverflow
- {
- /* Give user a chance to reallocate the stack. Use copies of
- these so that the &'s don't force the real ones into
- memory. */
- YYSTYPE *yyvs1 = yyvs;
- yytype_int16 *yyss1 = yyss;
-
- /* Each stack pointer address is followed by the size of the
- data in use in that stack, in bytes. This used to be a
- conditional around just the two extra args, but that might
- be undefined if yyoverflow is a macro. */
- yyoverflow (YY_("memory exhausted"),
- &yyss1, yysize * sizeof (*yyssp),
- &yyvs1, yysize * sizeof (*yyvsp),
- &yystacksize);
-
- yyss = yyss1;
- yyvs = yyvs1;
- }
-#else /* no yyoverflow */
-# ifndef YYSTACK_RELOCATE
- goto yyexhaustedlab;
-# else
- /* Extend the stack our own way. */
- if (YYMAXDEPTH <= yystacksize)
- goto yyexhaustedlab;
- yystacksize *= 2;
- if (YYMAXDEPTH < yystacksize)
- yystacksize = YYMAXDEPTH;
-
- {
- yytype_int16 *yyss1 = yyss;
- union yyalloc *yyptr =
- (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
- if (! yyptr)
- goto yyexhaustedlab;
- YYSTACK_RELOCATE (yyss_alloc, yyss);
- YYSTACK_RELOCATE (yyvs_alloc, yyvs);
-# undef YYSTACK_RELOCATE
- if (yyss1 != yyssa)
- YYSTACK_FREE (yyss1);
- }
-# endif
-#endif /* no yyoverflow */
-
- yyssp = yyss + yysize - 1;
- yyvsp = yyvs + yysize - 1;
-
- YYDPRINTF ((stderr, "Stack size increased to %lu\n",
- (unsigned long int) yystacksize));
-
- if (yyss + yystacksize - 1 <= yyssp)
- YYABORT;
- }
-
- YYDPRINTF ((stderr, "Entering state %d\n", yystate));
-
- if (yystate == YYFINAL)
- YYACCEPT;
-
- goto yybackup;
-
-/*-----------.
-| yybackup. |
-`-----------*/
-yybackup:
-
- /* Do appropriate processing given the current state. Read a
- lookahead token if we need one and don't already have one. */
-
- /* First try to decide what to do without reference to lookahead token. */
- yyn = yypact[yystate];
- if (yypact_value_is_default (yyn))
- goto yydefault;
-
- /* Not known => get a lookahead token if don't already have one. */
-
- /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
- if (yychar == YYEMPTY)
- {
- YYDPRINTF ((stderr, "Reading a token: "));
- yychar = YYLEX;
- }
-
- if (yychar <= YYEOF)
- {
- yychar = yytoken = YYEOF;
- YYDPRINTF ((stderr, "Now at end of input.\n"));
- }
- else
- {
- yytoken = YYTRANSLATE (yychar);
- YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
- }
-
- /* If the proper action on seeing token YYTOKEN is to reduce or to
- detect an error, take that action. */
- yyn += yytoken;
- if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
- goto yydefault;
- yyn = yytable[yyn];
- if (yyn <= 0)
- {
- if (yytable_value_is_error (yyn))
- goto yyerrlab;
- yyn = -yyn;
- goto yyreduce;
- }
-
- /* Count tokens shifted since error; after three, turn off error
- status. */
- if (yyerrstatus)
- yyerrstatus--;
-
- /* Shift the lookahead token. */
- YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
-
- /* Discard the shifted token. */
- yychar = YYEMPTY;
-
- yystate = yyn;
- YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
- *++yyvsp = yylval;
- YY_IGNORE_MAYBE_UNINITIALIZED_END
-
- goto yynewstate;
-
-
-/*-----------------------------------------------------------.
-| yydefault -- do the default action for the current state. |
-`-----------------------------------------------------------*/
-yydefault:
- yyn = yydefact[yystate];
- if (yyn == 0)
- goto yyerrlab;
- goto yyreduce;
-
-
-/*-----------------------------.
-| yyreduce -- Do a reduction. |
-`-----------------------------*/
-yyreduce:
- /* yyn is the number of a rule to reduce with. */
- yylen = yyr2[yyn];
-
- /* If YYLEN is nonzero, implement the default value of the action:
- `$$ = $1'.
-
- Otherwise, the following line sets YYVAL to garbage.
- This behavior is undocumented and Bison
- users should not rely upon it. Assigning to YYVAL
- unconditionally makes the parser a bit smaller, and it avoids a
- GCC warning that YYVAL may be used uninitialized. */
- yyval = yyvsp[1-yylen];
-
-
- YY_REDUCE_PRINT (yyn);
- switch (yyn)
- {
- case 4:
-
- { is_typedef = 0; is_extern = 0; current_name = NULL; decl_spec = NULL; }
- break;
-
- case 5:
-
- { free_list(*(yyvsp[(2) - (2)]), NULL); *(yyvsp[(2) - (2)]) = NULL; }
- break;
-
- case 6:
-
- { is_typedef = 1; }
- break;
-
- case 7:
-
- { (yyval) = (yyvsp[(4) - (4)]); }
- break;
-
- case 8:
-
- { is_typedef = 1; }
- break;
-
- case 9:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 14:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 15:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 16:
-
- { if (current_name) {
- struct string_list *decl = (*(yyvsp[(3) - (3)]))->next;
- (*(yyvsp[(3) - (3)]))->next = NULL;
- add_symbol(current_name,
- is_typedef ? SYM_TYPEDEF : SYM_NORMAL,
- decl, is_extern);
- current_name = NULL;
- }
- (yyval) = (yyvsp[(3) - (3)]);
- }
- break;
-
- case 17:
-
- { (yyval) = NULL; }
- break;
-
- case 19:
-
- { struct string_list *decl = *(yyvsp[(1) - (1)]);
- *(yyvsp[(1) - (1)]) = NULL;
- add_symbol(current_name,
- is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern);
- current_name = NULL;
- (yyval) = (yyvsp[(1) - (1)]);
- }
- break;
-
- case 20:
-
- { struct string_list *decl = *(yyvsp[(3) - (3)]);
- *(yyvsp[(3) - (3)]) = NULL;
- free_list(*(yyvsp[(2) - (3)]), NULL);
- *(yyvsp[(2) - (3)]) = decl_spec;
- add_symbol(current_name,
- is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern);
- current_name = NULL;
- (yyval) = (yyvsp[(3) - (3)]);
- }
- break;
-
- case 21:
-
- { (yyval) = (yyvsp[(4) - (4)]) ? (yyvsp[(4) - (4)]) : (yyvsp[(3) - (4)]) ? (yyvsp[(3) - (4)]) : (yyvsp[(2) - (4)]) ? (yyvsp[(2) - (4)]) : (yyvsp[(1) - (4)]); }
- break;
-
- case 22:
-
- { decl_spec = NULL; }
- break;
-
- case 24:
-
- { decl_spec = *(yyvsp[(1) - (1)]); }
- break;
-
- case 25:
-
- { decl_spec = *(yyvsp[(2) - (2)]); }
- break;
-
- case 26:
-
- { /* Version 2 checksumming ignores storage class, as that
- is really irrelevant to the linkage. */
- remove_node((yyvsp[(1) - (1)]));
- (yyval) = (yyvsp[(1) - (1)]);
- }
- break;
-
- case 31:
-
- { is_extern = 1; (yyval) = (yyvsp[(1) - (1)]); }
- break;
-
- case 32:
-
- { is_extern = 0; (yyval) = (yyvsp[(1) - (1)]); }
- break;
-
- case 37:
-
- { remove_node((yyvsp[(1) - (2)])); (*(yyvsp[(2) - (2)]))->tag = SYM_STRUCT; (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 38:
-
- { remove_node((yyvsp[(1) - (2)])); (*(yyvsp[(2) - (2)]))->tag = SYM_UNION; (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 39:
-
- { remove_node((yyvsp[(1) - (2)])); (*(yyvsp[(2) - (2)]))->tag = SYM_ENUM; (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 40:
-
- { record_compound((yyvsp[(1) - (3)]), (yyvsp[(2) - (3)]), (yyvsp[(3) - (3)]), SYM_STRUCT); (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 41:
-
- { record_compound((yyvsp[(1) - (3)]), (yyvsp[(2) - (3)]), (yyvsp[(3) - (3)]), SYM_UNION); (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 42:
-
- { record_compound((yyvsp[(1) - (3)]), (yyvsp[(2) - (3)]), (yyvsp[(3) - (3)]), SYM_ENUM); (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 43:
-
- { add_symbol(NULL, SYM_ENUM, NULL, 0); (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 44:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 45:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 57:
-
- { (*(yyvsp[(1) - (1)]))->tag = SYM_TYPEDEF; (yyval) = (yyvsp[(1) - (1)]); }
- break;
-
- case 58:
-
- { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
- break;
-
- case 59:
-
- { (yyval) = NULL; }
- break;
-
- case 62:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 66:
-
- { /* restrict has no effect in prototypes so ignore it */
- remove_node((yyvsp[(1) - (1)]));
- (yyval) = (yyvsp[(1) - (1)]);
- }
- break;
-
- case 67:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 69:
-
- { if (current_name != NULL) {
- error_with_pos("unexpected second declaration name");
- YYERROR;
- } else {
- current_name = (*(yyvsp[(1) - (1)]))->string;
- (yyval) = (yyvsp[(1) - (1)]);
- }
- }
- break;
-
- case 70:
-
- { if (current_name != NULL) {
- error_with_pos("unexpected second declaration name");
- YYERROR;
- } else {
- current_name = (*(yyvsp[(1) - (1)]))->string;
- (yyval) = (yyvsp[(1) - (1)]);
- }
- }
- break;
-
- case 71:
-
- { (yyval) = (yyvsp[(4) - (4)]); }
- break;
-
- case 72:
-
- { (yyval) = (yyvsp[(4) - (4)]); }
- break;
-
- case 73:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 74:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 75:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 79:
-
- { (yyval) = (yyvsp[(4) - (4)]); }
- break;
-
- case 80:
-
- { (yyval) = (yyvsp[(4) - (4)]); }
- break;
-
- case 81:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 82:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 83:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 84:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 86:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 87:
-
- { (yyval) = NULL; }
- break;
-
- case 90:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 91:
-
- { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
- break;
-
- case 92:
-
- { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
- break;
-
- case 94:
-
- { (yyval) = NULL; }
- break;
-
- case 95:
-
- { /* For version 2 checksums, we don't want to remember
- private parameter names. */
- remove_node((yyvsp[(1) - (1)]));
- (yyval) = (yyvsp[(1) - (1)]);
- }
- break;
-
- case 96:
-
- { remove_node((yyvsp[(1) - (1)]));
- (yyval) = (yyvsp[(1) - (1)]);
- }
- break;
-
- case 97:
-
- { (yyval) = (yyvsp[(4) - (4)]); }
- break;
-
- case 98:
-
- { (yyval) = (yyvsp[(4) - (4)]); }
- break;
-
- case 99:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 100:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 101:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 102:
-
- { struct string_list *decl = *(yyvsp[(2) - (3)]);
- *(yyvsp[(2) - (3)]) = NULL;
- add_symbol(current_name, SYM_NORMAL, decl, is_extern);
- (yyval) = (yyvsp[(3) - (3)]);
- }
- break;
-
- case 103:
-
- { (yyval) = NULL; }
- break;
-
- case 105:
-
- { remove_list((yyvsp[(2) - (2)]), &(*(yyvsp[(1) - (2)]))->next); (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 106:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 107:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 108:
-
- { (yyval) = NULL; }
- break;
-
- case 111:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 112:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 113:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 114:
-
- { (yyval) = NULL; }
- break;
-
- case 117:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 118:
-
- { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
- break;
-
- case 119:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 121:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 122:
-
- { (yyval) = NULL; }
- break;
-
- case 124:
-
- { (yyval) = (yyvsp[(3) - (3)]); }
- break;
-
- case 125:
-
- { (yyval) = (yyvsp[(4) - (4)]); }
- break;
-
- case 128:
-
- {
- const char *name = strdup((*(yyvsp[(1) - (1)]))->string);
- add_symbol(name, SYM_ENUM_CONST, NULL, 0);
- }
- break;
-
- case 129:
-
- {
- const char *name = strdup((*(yyvsp[(1) - (3)]))->string);
- struct string_list *expr = copy_list_range(*(yyvsp[(3) - (3)]), *(yyvsp[(2) - (3)]));
- add_symbol(name, SYM_ENUM_CONST, expr, 0);
- }
- break;
-
- case 130:
-
- { (yyval) = (yyvsp[(2) - (2)]); }
- break;
-
- case 131:
-
- { (yyval) = NULL; }
- break;
-
- case 133:
-
- { export_symbol((*(yyvsp[(3) - (5)]))->string); (yyval) = (yyvsp[(5) - (5)]); }
- break;
-
-
-
- default: break;
- }
- /* User semantic actions sometimes alter yychar, and that requires
- that yytoken be updated with the new translation. We take the
- approach of translating immediately before every use of yytoken.
- One alternative is translating here after every semantic action,
- but that translation would be missed if the semantic action invokes
- YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
- if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
- incorrect destructor might then be invoked immediately. In the
- case of YYERROR or YYBACKUP, subsequent parser actions might lead
- to an incorrect destructor call or verbose syntax error message
- before the lookahead is translated. */
- YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
-
- YYPOPSTACK (yylen);
- yylen = 0;
- YY_STACK_PRINT (yyss, yyssp);
-
- *++yyvsp = yyval;
-
- /* Now `shift' the result of the reduction. Determine what state
- that goes to, based on the state we popped back to and the rule
- number reduced by. */
-
- yyn = yyr1[yyn];
-
- yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
- if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
- yystate = yytable[yystate];
- else
- yystate = yydefgoto[yyn - YYNTOKENS];
-
- goto yynewstate;
-
-
-/*------------------------------------.
-| yyerrlab -- here on detecting error |
-`------------------------------------*/
-yyerrlab:
- /* Make sure we have latest lookahead translation. See comments at
- user semantic actions for why this is necessary. */
- yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
-
- /* If not already recovering from an error, report this error. */
- if (!yyerrstatus)
- {
- ++yynerrs;
-#if ! YYERROR_VERBOSE
- yyerror (YY_("syntax error"));
-#else
-# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
- yyssp, yytoken)
- {
- char const *yymsgp = YY_("syntax error");
- int yysyntax_error_status;
- yysyntax_error_status = YYSYNTAX_ERROR;
- if (yysyntax_error_status == 0)
- yymsgp = yymsg;
- else if (yysyntax_error_status == 1)
- {
- if (yymsg != yymsgbuf)
- YYSTACK_FREE (yymsg);
- yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
- if (!yymsg)
- {
- yymsg = yymsgbuf;
- yymsg_alloc = sizeof yymsgbuf;
- yysyntax_error_status = 2;
- }
- else
- {
- yysyntax_error_status = YYSYNTAX_ERROR;
- yymsgp = yymsg;
- }
- }
- yyerror (yymsgp);
- if (yysyntax_error_status == 2)
- goto yyexhaustedlab;
- }
-# undef YYSYNTAX_ERROR
-#endif
- }
-
-
-
- if (yyerrstatus == 3)
- {
- /* If just tried and failed to reuse lookahead token after an
- error, discard it. */
-
- if (yychar <= YYEOF)
- {
- /* Return failure if at end of input. */
- if (yychar == YYEOF)
- YYABORT;
- }
- else
- {
- yydestruct ("Error: discarding",
- yytoken, &yylval);
- yychar = YYEMPTY;
- }
- }
-
- /* Else will try to reuse lookahead token after shifting the error
- token. */
- goto yyerrlab1;
-
-
-/*---------------------------------------------------.
-| yyerrorlab -- error raised explicitly by YYERROR. |
-`---------------------------------------------------*/
-yyerrorlab:
-
- /* Pacify compilers like GCC when the user code never invokes
- YYERROR and the label yyerrorlab therefore never appears in user
- code. */
- if (/*CONSTCOND*/ 0)
- goto yyerrorlab;
-
- /* Do not reclaim the symbols of the rule which action triggered
- this YYERROR. */
- YYPOPSTACK (yylen);
- yylen = 0;
- YY_STACK_PRINT (yyss, yyssp);
- yystate = *yyssp;
- goto yyerrlab1;
-
-
-/*-------------------------------------------------------------.
-| yyerrlab1 -- common code for both syntax error and YYERROR. |
-`-------------------------------------------------------------*/
-yyerrlab1:
- yyerrstatus = 3; /* Each real token shifted decrements this. */
-
- for (;;)
- {
- yyn = yypact[yystate];
- if (!yypact_value_is_default (yyn))
- {
- yyn += YYTERROR;
- if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
- {
- yyn = yytable[yyn];
- if (0 < yyn)
- break;
- }
- }
-
- /* Pop the current state because it cannot handle the error token. */
- if (yyssp == yyss)
- YYABORT;
-
-
- yydestruct ("Error: popping",
- yystos[yystate], yyvsp);
- YYPOPSTACK (1);
- yystate = *yyssp;
- YY_STACK_PRINT (yyss, yyssp);
- }
-
- YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
- *++yyvsp = yylval;
- YY_IGNORE_MAYBE_UNINITIALIZED_END
-
-
- /* Shift the error token. */
- YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
-
- yystate = yyn;
- goto yynewstate;
-
-
-/*-------------------------------------.
-| yyacceptlab -- YYACCEPT comes here. |
-`-------------------------------------*/
-yyacceptlab:
- yyresult = 0;
- goto yyreturn;
-
-/*-----------------------------------.
-| yyabortlab -- YYABORT comes here. |
-`-----------------------------------*/
-yyabortlab:
- yyresult = 1;
- goto yyreturn;
-
-#if !defined yyoverflow || YYERROR_VERBOSE
-/*-------------------------------------------------.
-| yyexhaustedlab -- memory exhaustion comes here. |
-`-------------------------------------------------*/
-yyexhaustedlab:
- yyerror (YY_("memory exhausted"));
- yyresult = 2;
- /* Fall through. */
-#endif
-
-yyreturn:
- if (yychar != YYEMPTY)
- {
- /* Make sure we have latest lookahead translation. See comments at
- user semantic actions for why this is necessary. */
- yytoken = YYTRANSLATE (yychar);
- yydestruct ("Cleanup: discarding lookahead",
- yytoken, &yylval);
- }
- /* Do not reclaim the symbols of the rule which action triggered
- this YYABORT or YYACCEPT. */
- YYPOPSTACK (yylen);
- YY_STACK_PRINT (yyss, yyssp);
- while (yyssp != yyss)
- {
- yydestruct ("Cleanup: popping",
- yystos[*yyssp], yyvsp);
- YYPOPSTACK (1);
- }
-#ifndef yyoverflow
- if (yyss != yyssa)
- YYSTACK_FREE (yyss);
-#endif
-#if YYERROR_VERBOSE
- if (yymsg != yymsgbuf)
- YYSTACK_FREE (yymsg);
-#endif
- /* Make sure YYID is used. */
- return YYID (yyresult);
-}
-
-
-
-
-
-static void
-yyerror(const char *e)
-{
- error_with_pos("%s", e);
-}
diff --git a/scripts/genksyms/parse.tab.h_shipped b/scripts/genksyms/parse.tab.h_shipped
deleted file mode 100644
index 46a5e124eda1..000000000000
--- a/scripts/genksyms/parse.tab.h_shipped
+++ /dev/null
@@ -1,119 +0,0 @@
-/* A Bison parser, made by GNU Bison 2.7. */
-
-/* Bison interface for Yacc-like parsers in C
-
- Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-/* As a special exception, you may create a larger work that contains
- part or all of the Bison parser skeleton and distribute that work
- under terms of your choice, so long as that work isn't itself a
- parser generator using the skeleton or a modified version thereof
- as a parser skeleton. Alternatively, if you modify or redistribute
- the parser skeleton itself, you may (at your option) remove this
- special exception, which will cause the skeleton and the resulting
- Bison output files to be licensed under the GNU General Public
- License without this special exception.
-
- This special exception was added by the Free Software Foundation in
- version 2.2 of Bison. */
-
-#ifndef YY_YY_SCRIPTS_GENKSYMS_PARSE_TAB_H_SHIPPED_INCLUDED
-# define YY_YY_SCRIPTS_GENKSYMS_PARSE_TAB_H_SHIPPED_INCLUDED
-/* Enabling traces. */
-#ifndef YYDEBUG
-# define YYDEBUG 1
-#endif
-#if YYDEBUG
-extern int yydebug;
-#endif
-
-/* Tokens. */
-#ifndef YYTOKENTYPE
-# define YYTOKENTYPE
- /* Put the tokens into the symbol table, so that GDB and other debuggers
- know about them. */
- enum yytokentype {
- ASM_KEYW = 258,
- ATTRIBUTE_KEYW = 259,
- AUTO_KEYW = 260,
- BOOL_KEYW = 261,
- CHAR_KEYW = 262,
- CONST_KEYW = 263,
- DOUBLE_KEYW = 264,
- ENUM_KEYW = 265,
- EXTERN_KEYW = 266,
- EXTENSION_KEYW = 267,
- FLOAT_KEYW = 268,
- INLINE_KEYW = 269,
- INT_KEYW = 270,
- LONG_KEYW = 271,
- REGISTER_KEYW = 272,
- RESTRICT_KEYW = 273,
- SHORT_KEYW = 274,
- SIGNED_KEYW = 275,
- STATIC_KEYW = 276,
- STRUCT_KEYW = 277,
- TYPEDEF_KEYW = 278,
- UNION_KEYW = 279,
- UNSIGNED_KEYW = 280,
- VOID_KEYW = 281,
- VOLATILE_KEYW = 282,
- TYPEOF_KEYW = 283,
- VA_LIST_KEYW = 284,
- EXPORT_SYMBOL_KEYW = 285,
- ASM_PHRASE = 286,
- ATTRIBUTE_PHRASE = 287,
- TYPEOF_PHRASE = 288,
- BRACE_PHRASE = 289,
- BRACKET_PHRASE = 290,
- EXPRESSION_PHRASE = 291,
- CHAR = 292,
- DOTS = 293,
- IDENT = 294,
- INT = 295,
- REAL = 296,
- STRING = 297,
- TYPE = 298,
- OTHER = 299,
- FILENAME = 300
- };
-#endif
-
-
-#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
-typedef int YYSTYPE;
-# define YYSTYPE_IS_TRIVIAL 1
-# define yystype YYSTYPE /* obsolescent; will be withdrawn */
-# define YYSTYPE_IS_DECLARED 1
-#endif
-
-extern YYSTYPE yylval;
-
-#ifdef YYPARSE_PARAM
-#if defined __STDC__ || defined __cplusplus
-int yyparse (void *YYPARSE_PARAM);
-#else
-int yyparse ();
-#endif
-#else /* ! YYPARSE_PARAM */
-#if defined __STDC__ || defined __cplusplus
-int yyparse (void);
-#else
-int yyparse ();
-#endif
-#endif /* ! YYPARSE_PARAM */
-
-#endif /* !YY_YY_SCRIPTS_GENKSYMS_PARSE_TAB_H_SHIPPED_INCLUDED */
diff --git a/scripts/kconfig/.gitignore b/scripts/kconfig/.gitignore
index a76856e559c0..2da579edcbaf 100644
--- a/scripts/kconfig/.gitignore
+++ b/scripts/kconfig/.gitignore
@@ -1,9 +1,6 @@
#
# Generated files
#
-*.lex.c
-*.tab.c
-*.tab.h
*.moc
gconf.glade.h
*.pot
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index f9bdd02c06a2..5def8779d7d8 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -207,9 +207,9 @@ gconf-objs := gconf.o zconf.tab.o
hostprogs-y := conf nconf mconf kxgettext qconf gconf
-targets += zconf.tab.c zconf.lex.c
+targets += zconf.lex.c
clean-files := qconf.moc .tmp_qtcheck .tmp_gtkcheck
-clean-files += zconf.tab.c zconf.lex.c gconf.glade.h
+clean-files += gconf.glade.h
clean-files += config.pot linux.pot
# Check that we have the required ncurses stuff installed for lxdialog (menuconfig)
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 4e08121a35fb..283eeedaa4fa 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -422,8 +422,18 @@ static void check_conf(struct menu *menu)
if (sym_is_changable(sym) ||
(sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) {
if (input_mode == listnewconfig) {
- if (sym->name && !sym_is_choice_value(sym)) {
- printf("%s%s\n", CONFIG_, sym->name);
+ if (sym->name) {
+ const char *str;
+
+ if (sym->type == S_STRING) {
+ str = sym_get_string_value(sym);
+ str = sym_escape_string_value(str);
+ printf("%s%s=%s\n", CONFIG_, sym->name, str);
+ free((void *)str);
+ } else {
+ str = sym_get_string_value(sym);
+ printf("%s%s=%s\n", CONFIG_, sym->name, str);
+ }
}
} else {
if (!conf_cnt++)
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index 9fbcf5ed0ca7..73503ebce632 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -24,6 +24,7 @@
# Remove hyphens since they have special meaning in RPM filenames
KERNELPATH := kernel-$(subst -,_,$(KERNELRELEASE))
KDEB_SOURCENAME ?= linux-$(KERNELRELEASE)
+KBUILD_PKG_ROOTCMD ?="fakeroot -u"
export KDEB_SOURCENAME
# Include only those top-level files that are needed by make, plus the GPL copy
TAR_CONTENT := $(KBUILD_ALLDIRS) .config .scmversion Makefile \
@@ -66,35 +67,20 @@ binrpm-pkg: FORCE
clean-files += $(objtree)/*.spec
-# Deb target
-# ---------------------------------------------------------------------------
-quiet_cmd_builddeb = BUILDDEB
- cmd_builddeb = set -e; \
- test `id -u` = 0 || \
- test -n "$(KBUILD_PKG_ROOTCMD)" || { \
- which fakeroot >/dev/null 2>&1 && \
- KBUILD_PKG_ROOTCMD="fakeroot -u"; \
- } || { \
- echo; \
- echo "builddeb must be run as root (or using fakeroot)."; \
- echo "KBUILD_PKG_ROOTCMD is unset and fakeroot not found."; \
- echo "Try setting KBUILD_PKG_ROOTCMD to a command to acquire"; \
- echo "root privileges (e.g., 'fakeroot -u' or 'sudo')."; \
- false; \
- } && \
- \
- $$KBUILD_PKG_ROOTCMD $(CONFIG_SHELL) \
- $(srctree)/scripts/package/builddeb $@
-
deb-pkg: FORCE
$(MAKE) clean
+ $(CONFIG_SHELL) $(srctree)/scripts/package/mkdebian
$(call cmd,src_tar,$(KDEB_SOURCENAME))
- $(MAKE) KBUILD_SRC=
- +$(call cmd,builddeb)
+ origversion=$$(dpkg-parsechangelog -SVersion |sed 's/-[^-]*$$//');\
+ mv $(KDEB_SOURCENAME).tar.gz ../$(KDEB_SOURCENAME)_$${origversion}.orig.tar.gz
+ +dpkg-buildpackage -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch) -i.git -us -uc
bindeb-pkg: FORCE
- $(MAKE) KBUILD_SRC=
- +$(call cmd,builddeb)
+ $(CONFIG_SHELL) $(srctree)/scripts/package/mkdebian
+ +dpkg-buildpackage -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch) -b -nc -uc
+
+intdeb-pkg: FORCE
+ +$(CONFIG_SHELL) $(srctree)/scripts/package/builddeb
clean-dirs += $(objtree)/debian/
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 13fabb1f81db..90c9a8ac7adb 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -30,67 +30,11 @@ create_package() {
chmod -R a+rX "$pdir"
# Create the package
- dpkg-gencontrol $forcearch -Vkernel:debarch="${debarch}" -p$pname -P"$pdir"
+ dpkg-gencontrol -p$pname -P"$pdir"
dpkg --build "$pdir" ..
}
-set_debarch() {
- # Attempt to find the correct Debian architecture
- case "$UTS_MACHINE" in
- i386|ia64|alpha)
- debarch="$UTS_MACHINE" ;;
- x86_64)
- debarch=amd64 ;;
- sparc*)
- debarch=sparc ;;
- s390*)
- debarch=s390$(grep -q CONFIG_64BIT=y $KCONFIG_CONFIG && echo x || true) ;;
- ppc*)
- debarch=$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo ppc64el || echo powerpc) ;;
- parisc*)
- debarch=hppa ;;
- mips*)
- debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;;
- aarch64|arm64)
- debarch=arm64 ;;
- arm*)
- if grep -q CONFIG_AEABI=y $KCONFIG_CONFIG; then
- if grep -q CONFIG_VFP=y $KCONFIG_CONFIG; then
- debarch=armhf
- else
- debarch=armel
- fi
- else
- debarch=arm
- fi
- ;;
- *)
- debarch=$(dpkg --print-architecture)
- echo "" >&2
- echo "** ** ** WARNING ** ** **" >&2
- echo "" >&2
- echo "Your architecture doesn't have its equivalent" >&2
- echo "Debian userspace architecture defined!" >&2
- echo "Falling back to using your current userspace instead!" >&2
- echo "Please add support for $UTS_MACHINE to ${0} ..." >&2
- echo "" >&2
- esac
- if [ -n "$KBUILD_DEBARCH" ] ; then
- debarch="$KBUILD_DEBARCH"
- fi
- forcearch="-DArchitecture=$debarch"
-
-}
-
-# Some variables and settings used throughout the script
version=$KERNELRELEASE
-revision=$(cat .version)
-if [ -n "$KDEB_PKGVERSION" ]; then
- packageversion=$KDEB_PKGVERSION
-else
- packageversion=$version-$revision
-fi
-sourcename=$KDEB_SOURCENAME
tmpdir="$objtree/debian/tmp"
kernel_headers_dir="$objtree/debian/hdrtmp"
libc_headers_dir="$objtree/debian/headertmp"
@@ -99,9 +43,6 @@ packagename=linux-image-$version
kernel_headers_packagename=linux-headers-$version
libc_headers_packagename=linux-libc-dev
dbg_packagename=$packagename-dbg
-debarch=
-forcearch=
-set_debarch
if [ "$ARCH" = "um" ] ; then
packagename=user-mode-linux-$version
@@ -212,105 +153,6 @@ EOF
chmod 755 "$tmpdir/DEBIAN/$script"
done
-# Try to determine maintainer and email values
-if [ -n "$DEBEMAIL" ]; then
- email=$DEBEMAIL
-elif [ -n "$EMAIL" ]; then
- email=$EMAIL
-else
- email=$(id -nu)@$(hostname -f 2>/dev/null || hostname)
-fi
-if [ -n "$DEBFULLNAME" ]; then
- name=$DEBFULLNAME
-elif [ -n "$NAME" ]; then
- name=$NAME
-else
- name="Anonymous"
-fi
-maintainer="$name <$email>"
-
-# Try to determine distribution
-if [ -n "$KDEB_CHANGELOG_DIST" ]; then
- distribution=$KDEB_CHANGELOG_DIST
-# In some cases lsb_release returns the codename as n/a, which breaks dpkg-parsechangelog
-elif distribution=$(lsb_release -cs 2>/dev/null) && [ -n "$distribution" ] && [ "$distribution" != "n/a" ]; then
- : # nothing to do in this case
-else
- distribution="unstable"
- echo >&2 "Using default distribution of 'unstable' in the changelog"
- echo >&2 "Install lsb-release or set \$KDEB_CHANGELOG_DIST explicitly"
-fi
-
-# Generate a simple changelog template
-cat <<EOF > debian/changelog
-$sourcename ($packageversion) $distribution; urgency=low
-
- * Custom built Linux kernel.
-
- -- $maintainer $(date -R)
-EOF
-
-# Generate copyright file
-cat <<EOF > debian/copyright
-This is a packacked upstream version of the Linux kernel.
-
-The sources may be found at most Linux archive sites, including:
-https://www.kernel.org/pub/linux/kernel
-
-Copyright: 1991 - 2017 Linus Torvalds and others.
-
-The git repository for mainline kernel development is at:
-git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 dated June, 1991.
-
-On Debian GNU/Linux systems, the complete text of the GNU General Public
-License version 2 can be found in \`/usr/share/common-licenses/GPL-2'.
-EOF
-
-
-build_depends="bc, kmod, cpio "
-
-# Generate a control file
-cat <<EOF > debian/control
-Source: $sourcename
-Section: kernel
-Priority: optional
-Maintainer: $maintainer
-Build-Depends: $build_depends
-Homepage: http://www.kernel.org/
-EOF
-
-if [ "$ARCH" = "um" ]; then
- cat <<EOF >> debian/control
-
-Package: $packagename
-Architecture: any
-Description: User Mode Linux kernel, version $version
- User-mode Linux is a port of the Linux kernel to its own system call
- interface. It provides a kind of virtual machine, which runs Linux
- as a user process under another Linux kernel. This is useful for
- kernel development, sandboxes, jails, experimentation, and
- many other things.
- .
- This package contains the Linux kernel, modules and corresponding other
- files, version: $version.
-EOF
-
-else
- cat <<EOF >> debian/control
-
-Package: $packagename
-Architecture: any
-Description: Linux kernel, version $version
- This package contains the Linux kernel, modules and corresponding other
- files, version: $version.
-EOF
-
-fi
-
# Build kernel header package
(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
(cd $srctree; find arch/*/include include scripts -type f -o -type l) >> "$objtree/debian/hdrsrcfiles"
@@ -331,27 +173,6 @@ mkdir -p "$destdir"
ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
-cat <<EOF >> debian/control
-
-Package: $kernel_headers_packagename
-Architecture: any
-Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch}
- This package provides kernel header files for $KERNELRELEASE on \${kernel:debarch}
- .
- This is useful for people who need to build external modules
-EOF
-
-cat <<EOF >> debian/control
-
-Package: $libc_headers_packagename
-Section: devel
-Provides: linux-kernel-headers
-Architecture: any
-Description: Linux support headers for userspace development
- This package provides userspaces headers from the Linux kernel. These headers
- are used by the installed headers for GNU glibc and other system libraries.
-EOF
-
if [ "$ARCH" != "um" ]; then
create_package "$kernel_headers_packagename" "$kernel_headers_dir"
create_package "$libc_headers_packagename" "$libc_headers_dir"
@@ -370,47 +191,7 @@ if [ -n "$BUILD_DEBUG" ] ; then
ln -s ../lib/modules/$version/vmlinux $dbg_dir/usr/lib/debug/boot/vmlinux-$version
# kdump-tools
ln -s lib/modules/$version/vmlinux $dbg_dir/usr/lib/debug/vmlinux-$version
-
- cat <<EOF >> debian/control
-
-Package: $dbg_packagename
-Section: debug
-Architecture: any
-Description: Linux kernel debugging symbols for $version
- This package will come in handy if you need to debug the kernel. It provides
- all the necessary debug symbols for the kernel and its modules.
-EOF
-
create_package "$dbg_packagename" "$dbg_dir"
fi
-if [ "x$1" = "xdeb-pkg" ]
-then
- cat <<EOF > debian/rules
-#!/usr/bin/make -f
-
-build:
- \$(MAKE)
-
-binary-arch:
- \$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg
-
-clean:
- rm -rf debian/*tmp debian/files
- mv debian/ debian.backup # debian/ might be cleaned away
- \$(MAKE) clean
- mv debian.backup debian
-
-binary: binary-arch
-EOF
- mv ${sourcename}.tar.gz ../${sourcename}_${version}.orig.tar.gz
- tar caf ../${sourcename}_${packageversion}.debian.tar.gz debian/{copyright,rules,changelog,control}
- dpkg-source -cdebian/control -ldebian/changelog --format="3.0 (custom)" --target-format="3.0 (quilt)" \
- -b / ../${sourcename}_${version}.orig.tar.gz ../${sourcename}_${packageversion}.debian.tar.gz
- mv ${sourcename}_${packageversion}*dsc ..
- dpkg-genchanges -Vkernel:debarch="${debarch}" > ../${sourcename}_${packageversion}_${debarch}.changes
-else
- dpkg-genchanges -b -Vkernel:debarch="${debarch}" > ../${sourcename}_${packageversion}_${debarch}.changes
-fi
-
exit 0
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
new file mode 100755
index 000000000000..6adb3a16ba3b
--- /dev/null
+++ b/scripts/package/mkdebian
@@ -0,0 +1,189 @@
+#!/bin/sh
+#
+# Copyright 2003 Wichert Akkerman <wichert@wiggy.net>
+#
+# Simple script to generate a debian/ directory for a Linux kernel.
+
+set -e
+
+set_debarch() {
+ # Attempt to find the correct Debian architecture
+ case "$UTS_MACHINE" in
+ i386|ia64|alpha)
+ debarch="$UTS_MACHINE" ;;
+ x86_64)
+ debarch=amd64 ;;
+ sparc*)
+ debarch=sparc ;;
+ s390*)
+ debarch=s390$(grep -q CONFIG_64BIT=y $KCONFIG_CONFIG && echo x || true) ;;
+ ppc*)
+ debarch=$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo ppc64el || echo powerpc) ;;
+ parisc*)
+ debarch=hppa ;;
+ mips*)
+ debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;;
+ aarch64|arm64)
+ debarch=arm64 ;;
+ arm*)
+ if grep -q CONFIG_AEABI=y $KCONFIG_CONFIG; then
+ if grep -q CONFIG_VFP=y $KCONFIG_CONFIG; then
+ debarch=armhf
+ else
+ debarch=armel
+ fi
+ else
+ debarch=arm
+ fi
+ ;;
+ *)
+ debarch=$(dpkg --print-architecture)
+ echo "" >&2
+ echo "** ** ** WARNING ** ** **" >&2
+ echo "" >&2
+ echo "Your architecture doesn't have its equivalent" >&2
+ echo "Debian userspace architecture defined!" >&2
+ echo "Falling back to using your current userspace instead!" >&2
+ echo "Please add support for $UTS_MACHINE to ${0} ..." >&2
+ echo "" >&2
+ esac
+ if [ -n "$KBUILD_DEBARCH" ] ; then
+ debarch="$KBUILD_DEBARCH"
+ fi
+}
+
+# Some variables and settings used throughout the script
+version=$KERNELRELEASE
+if [ -n "$KDEB_PKGVERSION" ]; then
+ packageversion=$KDEB_PKGVERSION
+else
+ revision=$(cat .version 2>/dev/null||echo 1)
+ packageversion=$version-$revision
+fi
+sourcename=$KDEB_SOURCENAME
+packagename=linux-image-$version
+kernel_headers_packagename=linux-headers-$version
+dbg_packagename=$packagename-dbg
+debarch=
+set_debarch
+
+if [ "$ARCH" = "um" ] ; then
+ packagename=user-mode-linux-$version
+fi
+
+# Try to determine maintainer and email values
+if [ -n "$DEBEMAIL" ]; then
+ email=$DEBEMAIL
+elif [ -n "$EMAIL" ]; then
+ email=$EMAIL
+else
+ email=$(id -nu)@$(hostname -f 2>/dev/null || hostname)
+fi
+if [ -n "$DEBFULLNAME" ]; then
+ name=$DEBFULLNAME
+elif [ -n "$NAME" ]; then
+ name=$NAME
+else
+ name="Anonymous"
+fi
+maintainer="$name <$email>"
+
+# Try to determine distribution
+if [ -n "$KDEB_CHANGELOG_DIST" ]; then
+ distribution=$KDEB_CHANGELOG_DIST
+# In some cases lsb_release returns the codename as n/a, which breaks dpkg-parsechangelog
+elif distribution=$(lsb_release -cs 2>/dev/null) && [ -n "$distribution" ] && [ "$distribution" != "n/a" ]; then
+ : # nothing to do in this case
+else
+ distribution="unstable"
+ echo >&2 "Using default distribution of 'unstable' in the changelog"
+ echo >&2 "Install lsb-release or set \$KDEB_CHANGELOG_DIST explicitly"
+fi
+
+mkdir -p debian/
+echo $debarch > debian/arch
+
+# Generate a simple changelog template
+cat <<EOF > debian/changelog
+$sourcename ($packageversion) $distribution; urgency=low
+
+ * Custom built Linux kernel.
+
+ -- $maintainer $(date -R)
+EOF
+
+# Generate copyright file
+cat <<EOF > debian/copyright
+This is a packacked upstream version of the Linux kernel.
+
+The sources may be found at most Linux archive sites, including:
+https://www.kernel.org/pub/linux/kernel
+
+Copyright: 1991 - 2018 Linus Torvalds and others.
+
+The git repository for mainline kernel development is at:
+git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 dated June, 1991.
+
+On Debian GNU/Linux systems, the complete text of the GNU General Public
+License version 2 can be found in \`/usr/share/common-licenses/GPL-2'.
+EOF
+
+# Generate a control file
+cat <<EOF > debian/control
+Source: $sourcename
+Section: kernel
+Priority: optional
+Maintainer: $maintainer
+Build-Depends: bc, kmod, cpio
+Homepage: http://www.kernel.org/
+
+Package: $packagename
+Architecture: $debarch
+Description: Linux kernel, version $version
+ This package contains the Linux kernel, modules and corresponding other
+ files, version: $version.
+
+Package: $kernel_headers_packagename
+Architecture: $debarch
+Description: Linux kernel headers for $version on $debarch
+ This package provides kernel header files for $version on $debarch
+ .
+ This is useful for people who need to build external modules
+
+Package: linux-libc-dev
+Section: devel
+Provides: linux-kernel-headers
+Architecture: $debarch
+Description: Linux support headers for userspace development
+ This package provides userspaces headers from the Linux kernel. These headers
+ are used by the installed headers for GNU glibc and other system libraries.
+
+Package: $dbg_packagename
+Section: debug
+Architecture: $debarch
+Description: Linux kernel debugging symbols for $version
+ This package will come in handy if you need to debug the kernel. It provides
+ all the necessary debug symbols for the kernel and its modules.
+EOF
+
+cat <<EOF > debian/rules
+#!$(command -v $MAKE) -f
+
+build:
+ \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC=
+
+binary-arch:
+ \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC= intdeb-pkg
+
+clean:
+ rm -rf debian/*tmp debian/files
+ \$(MAKE) clean
+
+binary: binary-arch
+EOF
+
+exit 0
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 61427c6f2209..e05646dc24dc 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -118,6 +118,8 @@ $S$M ln -sf /usr/src/kernels/$KERNELRELEASE source
%preun
if [ -x /sbin/new-kernel-pkg ]; then
new-kernel-pkg --remove $KERNELRELEASE --rminitrd --initrdfile=/boot/initramfs-$KERNELRELEASE.img
+ elif [ -x /usr/bin/kernel-install ]; then
+ kernel-install remove $KERNELRELEASE
fi
%postun
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore
index 9cdec70d72b8..d5b291e94264 100644
--- a/security/apparmor/.gitignore
+++ b/security/apparmor/.gitignore
@@ -1,5 +1,6 @@
#
# Generated include files
#
+net_names.h
capability_names.h
rlim_names.h
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
index 9a6b4033d52b..ff23fcfefe19 100644
--- a/security/apparmor/Makefile
+++ b/security/apparmor/Makefile
@@ -3,13 +3,46 @@
#
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
-apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
+apparmor-y := apparmorfs.o audit.o capability.o task.o ipc.o lib.o match.o \
path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
- resource.o secid.o file.o policy_ns.o label.o mount.o
+ resource.o secid.o file.o policy_ns.o label.o mount.o net.o
apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
-clean-files := capability_names.h rlim_names.h
+clean-files := capability_names.h rlim_names.h net_names.h
+# Build a lower case string table of address family names
+# Transform lines from
+# #define AF_LOCAL 1 /* POSIX name for AF_UNIX */
+# #define AF_INET 2 /* Internet IP Protocol */
+# to
+# [1] = "local",
+# [2] = "inet",
+#
+# and build the securityfs entries for the mapping.
+# Transforms lines from
+# #define AF_INET 2 /* Internet IP Protocol */
+# to
+# #define AA_SFS_AF_MASK "local inet"
+quiet_cmd_make-af = GEN $@
+cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\
+ sed $< >>$@ -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
+ 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
+ echo "};" >> $@ ;\
+ printf '%s' '\#define AA_SFS_AF_MASK "' >> $@ ;\
+ sed -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
+ 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/\L\1/p'\
+ $< | tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
+
+# Build a lower case string table of sock type names
+# Transform lines from
+# SOCK_STREAM = 1,
+# to
+# [1] = "stream",
+quiet_cmd_make-sock = GEN $@
+cmd_make-sock = echo "static const char *sock_type_names[] = {" >> $@ ;\
+ sed $^ >>$@ -r -n \
+ -e 's/^\tSOCK_([A-Z0-9_]+)[\t]+=[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
+ echo "};" >> $@
# Build a lower case string table of capability names
# Transforms lines from
@@ -62,6 +95,7 @@ cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \
tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
$(obj)/capability.o : $(obj)/capability_names.h
+$(obj)/net.o : $(obj)/net_names.h
$(obj)/resource.o : $(obj)/rlim_names.h
$(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
$(src)/Makefile
@@ -69,3 +103,8 @@ $(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
$(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \
$(src)/Makefile
$(call cmd,make-rlim)
+$(obj)/net_names.h : $(srctree)/include/linux/socket.h \
+ $(srctree)/include/linux/net.h \
+ $(src)/Makefile
+ $(call cmd,make-af)
+ $(call cmd,make-sock)
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index a9428daa69f3..949dd8a48164 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -30,10 +30,9 @@
#include "include/apparmor.h"
#include "include/apparmorfs.h"
#include "include/audit.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/crypto.h"
#include "include/ipc.h"
-#include "include/policy_ns.h"
#include "include/label.h"
#include "include/policy.h"
#include "include/policy_ns.h"
@@ -120,9 +119,7 @@ static int aafs_count;
static int aafs_show_path(struct seq_file *seq, struct dentry *dentry)
{
- struct inode *inode = d_inode(dentry);
-
- seq_printf(seq, "%s:[%lu]", AAFS_NAME, inode->i_ino);
+ seq_printf(seq, "%s:[%lu]", AAFS_NAME, d_inode(dentry)->i_ino);
return 0;
}
@@ -313,6 +310,7 @@ static struct dentry *aafs_create_dir(const char *name, struct dentry *parent)
* @name: name of dentry to create
* @parent: parent directory for this dentry
* @target: if symlink, symlink target string
+ * @private: private data
* @iops: struct of inode_operations that should be used
*
* If @target parameter is %NULL, then the @iops parameter needs to be
@@ -321,17 +319,17 @@ static struct dentry *aafs_create_dir(const char *name, struct dentry *parent)
static struct dentry *aafs_create_symlink(const char *name,
struct dentry *parent,
const char *target,
+ void *private,
const struct inode_operations *iops)
{
struct dentry *dent;
char *link = NULL;
if (target) {
- link = kstrdup(target, GFP_KERNEL);
if (!link)
return ERR_PTR(-ENOMEM);
}
- dent = aafs_create(name, S_IFLNK | 0444, parent, NULL, link, NULL,
+ dent = aafs_create(name, S_IFLNK | 0444, parent, private, link, NULL,
iops);
if (IS_ERR(dent))
kfree(link);
@@ -622,7 +620,7 @@ static void profile_query_cb(struct aa_profile *profile, struct aa_perms *perms,
tmp = aa_compute_fperms(dfa, state, &cond);
}
} else if (profile->policy.dfa) {
- if (!PROFILE_MEDIATES_SAFE(profile, *match_str))
+ if (!PROFILE_MEDIATES(profile, *match_str))
return; /* no change to current perms */
dfa = profile->policy.dfa;
state = aa_dfa_match_len(dfa, profile->policy.start[0],
@@ -1189,9 +1187,7 @@ static int seq_ns_level_show(struct seq_file *seq, void *v)
static int seq_ns_name_show(struct seq_file *seq, void *v)
{
struct aa_label *label = begin_current_label_crit_section();
-
- seq_printf(seq, "%s\n", aa_ns_name(labels_ns(label),
- labels_ns(label), true));
+ seq_printf(seq, "%s\n", labels_ns(label)->base.name);
end_current_label_crit_section(label);
return 0;
@@ -1484,26 +1480,97 @@ static int profile_depth(struct aa_profile *profile)
return depth;
}
-static int gen_symlink_name(char *buffer, size_t bsize, int depth,
- const char *dirname, const char *fname)
+static char *gen_symlink_name(int depth, const char *dirname, const char *fname)
{
+ char *buffer, *s;
int error;
+ int size = depth * 6 + strlen(dirname) + strlen(fname) + 11;
+
+ s = buffer = kmalloc(size, GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
for (; depth > 0; depth--) {
- if (bsize < 7)
- return -ENAMETOOLONG;
- strcpy(buffer, "../../");
- buffer += 6;
- bsize -= 6;
+ strcpy(s, "../../");
+ s += 6;
+ size -= 6;
}
- error = snprintf(buffer, bsize, "raw_data/%s/%s", dirname, fname);
- if (error >= bsize || error < 0)
- return -ENAMETOOLONG;
+ error = snprintf(s, size, "raw_data/%s/%s", dirname, fname);
+ if (error >= size || error < 0) {
+ kfree(buffer);
+ return ERR_PTR(-ENAMETOOLONG);
+ }
- return 0;
+ return buffer;
+}
+
+static void rawdata_link_cb(void *arg)
+{
+ kfree(arg);
+}
+
+static const char *rawdata_get_link_base(struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done,
+ const char *name)
+{
+ struct aa_proxy *proxy = inode->i_private;
+ struct aa_label *label;
+ struct aa_profile *profile;
+ char *target;
+ int depth;
+
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+
+ label = aa_get_label_rcu(&proxy->label);
+ profile = labels_profile(label);
+ depth = profile_depth(profile);
+ target = gen_symlink_name(depth, profile->rawdata->name, name);
+ aa_put_label(label);
+
+ if (IS_ERR(target))
+ return target;
+
+ set_delayed_call(done, rawdata_link_cb, target);
+
+ return target;
+}
+
+static const char *rawdata_get_link_sha1(struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done)
+{
+ return rawdata_get_link_base(dentry, inode, done, "sha1");
+}
+
+static const char *rawdata_get_link_abi(struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done)
+{
+ return rawdata_get_link_base(dentry, inode, done, "abi");
+}
+
+static const char *rawdata_get_link_data(struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done)
+{
+ return rawdata_get_link_base(dentry, inode, done, "raw_data");
}
+static const struct inode_operations rawdata_link_sha1_iops = {
+ .get_link = rawdata_get_link_sha1,
+};
+
+static const struct inode_operations rawdata_link_abi_iops = {
+ .get_link = rawdata_get_link_abi,
+};
+static const struct inode_operations rawdata_link_data_iops = {
+ .get_link = rawdata_get_link_data,
+};
+
+
/*
* Requires: @profile->ns->lock held
*/
@@ -1574,34 +1641,28 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
}
if (profile->rawdata) {
- char target[64];
- int depth = profile_depth(profile);
-
- error = gen_symlink_name(target, sizeof(target), depth,
- profile->rawdata->name, "sha1");
- if (error < 0)
- goto fail2;
- dent = aafs_create_symlink("raw_sha1", dir, target, NULL);
+ dent = aafs_create_symlink("raw_sha1", dir, NULL,
+ profile->label.proxy,
+ &rawdata_link_sha1_iops);
if (IS_ERR(dent))
goto fail;
+ aa_get_proxy(profile->label.proxy);
profile->dents[AAFS_PROF_RAW_HASH] = dent;
- error = gen_symlink_name(target, sizeof(target), depth,
- profile->rawdata->name, "abi");
- if (error < 0)
- goto fail2;
- dent = aafs_create_symlink("raw_abi", dir, target, NULL);
+ dent = aafs_create_symlink("raw_abi", dir, NULL,
+ profile->label.proxy,
+ &rawdata_link_abi_iops);
if (IS_ERR(dent))
goto fail;
+ aa_get_proxy(profile->label.proxy);
profile->dents[AAFS_PROF_RAW_ABI] = dent;
- error = gen_symlink_name(target, sizeof(target), depth,
- profile->rawdata->name, "raw_data");
- if (error < 0)
- goto fail2;
- dent = aafs_create_symlink("raw_data", dir, target, NULL);
+ dent = aafs_create_symlink("raw_data", dir, NULL,
+ profile->label.proxy,
+ &rawdata_link_data_iops);
if (IS_ERR(dent))
goto fail;
+ aa_get_proxy(profile->label.proxy);
profile->dents[AAFS_PROF_RAW_DATA] = dent;
}
@@ -2152,6 +2213,10 @@ static struct aa_sfs_entry aa_sfs_entry_signal[] = {
{ }
};
+static struct aa_sfs_entry aa_sfs_entry_attach[] = {
+ AA_SFS_FILE_BOOLEAN("xattr", 1),
+ { }
+};
static struct aa_sfs_entry aa_sfs_entry_domain[] = {
AA_SFS_FILE_BOOLEAN("change_hat", 1),
AA_SFS_FILE_BOOLEAN("change_hatv", 1),
@@ -2159,6 +2224,9 @@ static struct aa_sfs_entry aa_sfs_entry_domain[] = {
AA_SFS_FILE_BOOLEAN("change_profile", 1),
AA_SFS_FILE_BOOLEAN("stack", 1),
AA_SFS_FILE_BOOLEAN("fix_binfmt_elf_mmap", 1),
+ AA_SFS_FILE_BOOLEAN("post_nnp_subset", 1),
+ AA_SFS_FILE_BOOLEAN("computed_longest_left", 1),
+ AA_SFS_DIR("attach_conditions", aa_sfs_entry_attach),
AA_SFS_FILE_STRING("version", "1.2"),
{ }
};
@@ -2167,6 +2235,7 @@ static struct aa_sfs_entry aa_sfs_entry_versions[] = {
AA_SFS_FILE_BOOLEAN("v5", 1),
AA_SFS_FILE_BOOLEAN("v6", 1),
AA_SFS_FILE_BOOLEAN("v7", 1),
+ AA_SFS_FILE_BOOLEAN("v8", 1),
{ }
};
@@ -2202,6 +2271,7 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = {
AA_SFS_DIR("policy", aa_sfs_entry_policy),
AA_SFS_DIR("domain", aa_sfs_entry_domain),
AA_SFS_DIR("file", aa_sfs_entry_file),
+ AA_SFS_DIR("network_v8", aa_sfs_entry_network),
AA_SFS_DIR("mount", aa_sfs_entry_mount),
AA_SFS_DIR("namespaces", aa_sfs_entry_ns),
AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK),
@@ -2394,29 +2464,18 @@ static const char *policy_get_link(struct dentry *dentry,
return NULL;
}
-static int ns_get_name(char *buf, size_t size, struct aa_ns *ns,
- struct inode *inode)
-{
- int res = snprintf(buf, size, "%s:[%lu]", AAFS_NAME, inode->i_ino);
-
- if (res < 0 || res >= size)
- res = -ENOENT;
-
- return res;
-}
-
static int policy_readlink(struct dentry *dentry, char __user *buffer,
int buflen)
{
- struct aa_ns *ns;
char name[32];
int res;
- ns = aa_get_current_ns();
- res = ns_get_name(name, sizeof(name), ns, d_inode(dentry));
- if (res >= 0)
+ res = snprintf(name, sizeof(name), "%s:[%lu]", AAFS_NAME,
+ d_inode(dentry)->i_ino);
+ if (res > 0 && res < sizeof(name))
res = readlink_copy(buffer, buflen, name);
- aa_put_ns(ns);
+ else
+ res = -ENOENT;
return res;
}
@@ -2460,34 +2519,26 @@ static int __init aa_create_aafs(void)
dent = securityfs_create_file(".load", 0666, aa_sfs_entry.dentry,
NULL, &aa_fs_profile_load);
- if (IS_ERR(dent)) {
- error = PTR_ERR(dent);
- goto error;
- }
+ if (IS_ERR(dent))
+ goto dent_error;
ns_subload(root_ns) = dent;
dent = securityfs_create_file(".replace", 0666, aa_sfs_entry.dentry,
NULL, &aa_fs_profile_replace);
- if (IS_ERR(dent)) {
- error = PTR_ERR(dent);
- goto error;
- }
+ if (IS_ERR(dent))
+ goto dent_error;
ns_subreplace(root_ns) = dent;
dent = securityfs_create_file(".remove", 0666, aa_sfs_entry.dentry,
NULL, &aa_fs_profile_remove);
- if (IS_ERR(dent)) {
- error = PTR_ERR(dent);
- goto error;
- }
+ if (IS_ERR(dent))
+ goto dent_error;
ns_subremove(root_ns) = dent;
dent = securityfs_create_file("revision", 0444, aa_sfs_entry.dentry,
NULL, &aa_fs_ns_revision_fops);
- if (IS_ERR(dent)) {
- error = PTR_ERR(dent);
- goto error;
- }
+ if (IS_ERR(dent))
+ goto dent_error;
ns_subrevision(root_ns) = dent;
/* policy tree referenced by magic policy symlink */
@@ -2501,10 +2552,8 @@ static int __init aa_create_aafs(void)
/* magic symlink similar to nsfs redirects based on task policy */
dent = securityfs_create_symlink("policy", aa_sfs_entry.dentry,
NULL, &policy_link_iops);
- if (IS_ERR(dent)) {
- error = PTR_ERR(dent);
- goto error;
- }
+ if (IS_ERR(dent))
+ goto dent_error;
error = aa_mk_null_file(aa_sfs_entry.dentry);
if (error)
@@ -2516,6 +2565,8 @@ static int __init aa_create_aafs(void)
aa_info_message("AppArmor Filesystem Enabled");
return 0;
+dent_error:
+ error = PTR_ERR(dent);
error:
aa_destroy_aafs();
AA_ERROR("Error creating AppArmor securityfs\n");
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
index 67e347192a55..253ef6e9d445 100644
--- a/security/apparmor/capability.c
+++ b/security/apparmor/capability.c
@@ -19,7 +19,7 @@
#include "include/apparmor.h"
#include "include/capability.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/policy.h"
#include "include/audit.h"
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 6a54d2ffa840..590b7e8cd21c 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -19,10 +19,11 @@
#include <linux/syscalls.h>
#include <linux/tracehook.h>
#include <linux/personality.h>
+#include <linux/xattr.h>
#include "include/audit.h"
#include "include/apparmorfs.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/domain.h"
#include "include/file.h"
#include "include/ipc.h"
@@ -302,7 +303,70 @@ static int change_profile_perms(struct aa_profile *profile,
}
/**
+ * aa_xattrs_match - check whether a file matches the xattrs defined in profile
+ * @bprm: binprm struct for the process to validate
+ * @profile: profile to match against (NOT NULL)
+ * @state: state to start match in
+ *
+ * Returns: number of extended attributes that matched, or < 0 on error
+ */
+static int aa_xattrs_match(const struct linux_binprm *bprm,
+ struct aa_profile *profile, unsigned int state)
+{
+ int i;
+ ssize_t size;
+ struct dentry *d;
+ char *value = NULL;
+ int value_size = 0, ret = profile->xattr_count;
+
+ if (!bprm || !profile->xattr_count)
+ return 0;
+
+ /* transition from exec match to xattr set */
+ state = aa_dfa_null_transition(profile->xmatch, state);
+
+ d = bprm->file->f_path.dentry;
+
+ for (i = 0; i < profile->xattr_count; i++) {
+ size = vfs_getxattr_alloc(d, profile->xattrs[i], &value,
+ value_size, GFP_KERNEL);
+ if (size >= 0) {
+ u32 perm;
+
+ /* Check the xattr value, not just presence */
+ state = aa_dfa_match_len(profile->xmatch, state, value,
+ size);
+ perm = dfa_user_allow(profile->xmatch, state);
+ if (!(perm & MAY_EXEC)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ /* transition to next element */
+ state = aa_dfa_null_transition(profile->xmatch, state);
+ if (size < 0) {
+ /*
+ * No xattr match, so verify if transition to
+ * next element was valid. IFF so the xattr
+ * was optional.
+ */
+ if (!state) {
+ ret = -EINVAL;
+ goto out;
+ }
+ /* don't count missing optional xattr as matched */
+ ret--;
+ }
+ }
+
+out:
+ kfree(value);
+ return ret;
+}
+
+/**
* __attach_match_ - find an attachment match
+ * @bprm - binprm structure of transitioning task
* @name - to match against (NOT NULL)
* @head - profile list to walk (NOT NULL)
* @info - info message if there was an error (NOT NULL)
@@ -316,40 +380,80 @@ static int change_profile_perms(struct aa_profile *profile,
*
* Returns: profile or NULL if no match found
*/
-static struct aa_profile *__attach_match(const char *name,
+static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
+ const char *name,
struct list_head *head,
const char **info)
{
- int len = 0;
+ int candidate_len = 0, candidate_xattrs = 0;
bool conflict = false;
struct aa_profile *profile, *candidate = NULL;
+ AA_BUG(!name);
+ AA_BUG(!head);
+
list_for_each_entry_rcu(profile, head, base.list) {
if (profile->label.flags & FLAG_NULL &&
&profile->label == ns_unconfined(profile->ns))
continue;
+ /* Find the "best" matching profile. Profiles must
+ * match the path and extended attributes (if any)
+ * associated with the file. A more specific path
+ * match will be preferred over a less specific one,
+ * and a match with more matching extended attributes
+ * will be preferred over one with fewer. If the best
+ * match has both the same level of path specificity
+ * and the same number of matching extended attributes
+ * as another profile, signal a conflict and refuse to
+ * match.
+ */
if (profile->xmatch) {
- if (profile->xmatch_len >= len) {
- unsigned int state;
- u32 perm;
-
- state = aa_dfa_match(profile->xmatch,
- DFA_START, name);
- perm = dfa_user_allow(profile->xmatch, state);
- /* any accepting state means a valid match. */
- if (perm & MAY_EXEC) {
- if (profile->xmatch_len == len) {
+ unsigned int state, count;
+ u32 perm;
+
+ state = aa_dfa_leftmatch(profile->xmatch, DFA_START,
+ name, &count);
+ perm = dfa_user_allow(profile->xmatch, state);
+ /* any accepting state means a valid match. */
+ if (perm & MAY_EXEC) {
+ int ret;
+
+ if (count < candidate_len)
+ continue;
+
+ ret = aa_xattrs_match(bprm, profile, state);
+ /* Fail matching if the xattrs don't match */
+ if (ret < 0)
+ continue;
+
+ /*
+ * TODO: allow for more flexible best match
+ *
+ * The new match isn't more specific
+ * than the current best match
+ */
+ if (count == candidate_len &&
+ ret <= candidate_xattrs) {
+ /* Match is equivalent, so conflict */
+ if (ret == candidate_xattrs)
conflict = true;
- continue;
- }
- candidate = profile;
- len = profile->xmatch_len;
- conflict = false;
+ continue;
}
+
+ /* Either the same length with more matching
+ * xattrs, or a longer match
+ */
+ candidate = profile;
+ candidate_len = profile->xmatch_len;
+ candidate_xattrs = ret;
+ conflict = false;
}
} else if (!strcmp(profile->base.name, name))
- /* exact non-re match, no more searching required */
+ /*
+ * old exact non-re match, without conditionals such
+ * as xattrs. no more searching required
+ */
return profile;
}
@@ -363,6 +467,7 @@ static struct aa_profile *__attach_match(const char *name,
/**
* find_attach - do attachment search for unconfined processes
+ * @bprm - binprm structure of transitioning task
* @ns: the current namespace (NOT NULL)
* @list: list to search (NOT NULL)
* @name: the executable name to match against (NOT NULL)
@@ -370,13 +475,14 @@ static struct aa_profile *__attach_match(const char *name,
*
* Returns: label or NULL if no match found
*/
-static struct aa_label *find_attach(struct aa_ns *ns, struct list_head *list,
+static struct aa_label *find_attach(const struct linux_binprm *bprm,
+ struct aa_ns *ns, struct list_head *list,
const char *name, const char **info)
{
struct aa_profile *profile;
rcu_read_lock();
- profile = aa_get_profile(__attach_match(name, list, info));
+ profile = aa_get_profile(__attach_match(bprm, name, list, info));
rcu_read_unlock();
return profile ? &profile->label : NULL;
@@ -432,6 +538,7 @@ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
/**
* x_to_label - get target label for a given xindex
* @profile: current profile (NOT NULL)
+ * @bprm: binprm structure of transitioning task
* @name: name to lookup (NOT NULL)
* @xindex: index into x transition table
* @lookupname: returns: name used in lookup if one was specified (NOT NULL)
@@ -441,6 +548,7 @@ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
* Returns: refcounted label or NULL if not found available
*/
static struct aa_label *x_to_label(struct aa_profile *profile,
+ const struct linux_binprm *bprm,
const char *name, u32 xindex,
const char **lookupname,
const char **info)
@@ -468,11 +576,11 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
case AA_X_NAME:
if (xindex & AA_X_CHILD)
/* released by caller */
- new = find_attach(ns, &profile->base.profiles,
+ new = find_attach(bprm, ns, &profile->base.profiles,
name, info);
else
/* released by caller */
- new = find_attach(ns, &ns->base.profiles,
+ new = find_attach(bprm, ns, &ns->base.profiles,
name, info);
*lookupname = name;
break;
@@ -512,6 +620,8 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
bool *secure_exec)
{
struct aa_label *new = NULL;
+ struct aa_profile *component;
+ struct label_it i;
const char *info = NULL, *name = NULL, *target = NULL;
unsigned int state = profile->file.start;
struct aa_perms perms = {};
@@ -536,8 +646,8 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
}
if (profile_unconfined(profile)) {
- new = find_attach(profile->ns, &profile->ns->base.profiles,
- name, &info);
+ new = find_attach(bprm, profile->ns,
+ &profile->ns->base.profiles, name, &info);
if (new) {
AA_DEBUG("unconfined attached to new label");
return new;
@@ -550,7 +660,8 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
state = aa_str_perms(profile->file.dfa, state, name, cond, &perms);
if (perms.allow & MAY_EXEC) {
/* exec permission determine how to transition */
- new = x_to_label(profile, name, perms.xindex, &target, &info);
+ new = x_to_label(profile, bprm, name, perms.xindex, &target,
+ &info);
if (new && new->proxy == profile->label.proxy && info) {
/* hack ix fallback - improve how this is detected */
goto audit;
@@ -559,6 +670,21 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
info = "profile transition not found";
/* remove MAY_EXEC to audit as failure */
perms.allow &= ~MAY_EXEC;
+ } else {
+ /* verify that each component's xattr requirements are
+ * met, and fail execution otherwise
+ */
+ label_for_each(i, new, component) {
+ if (aa_xattrs_match(bprm, component, state) <
+ 0) {
+ error = -EACCES;
+ info = "required xattrs not present";
+ perms.allow &= ~MAY_EXEC;
+ aa_put_label(new);
+ new = NULL;
+ goto audit;
+ }
+ }
}
} else if (COMPLAIN_MODE(profile)) {
/* no exec permission - learning mode */
@@ -592,22 +718,6 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
if (!new)
goto audit;
- /* Policy has specified a domain transitions. if no_new_privs and
- * confined and not transitioning to the current domain fail.
- *
- * NOTE: Domain transitions from unconfined and to stritly stacked
- * subsets are allowed even when no_new_privs is set because this
- * aways results in a further reduction of permissions.
- */
- if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) &&
- !profile_unconfined(profile) &&
- !aa_label_is_subset(new, &profile->label)) {
- error = -EPERM;
- info = "no new privs";
- nonewprivs = true;
- perms.allow &= ~MAY_EXEC;
- goto audit;
- }
if (!(perms.xindex & AA_X_UNSAFE)) {
if (DEBUG_ON) {
@@ -684,21 +794,6 @@ static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
perms.allow &= ~AA_MAY_ONEXEC;
goto audit;
}
- /* Policy has specified a domain transitions. if no_new_privs and
- * confined and not transitioning to the current domain fail.
- *
- * NOTE: Domain transitions from unconfined and to stritly stacked
- * subsets are allowed even when no_new_privs is set because this
- * aways results in a further reduction of permissions.
- */
- if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) &&
- !profile_unconfined(profile) &&
- !aa_label_is_subset(onexec, &profile->label)) {
- error = -EPERM;
- info = "no new privs";
- perms.allow &= ~AA_MAY_ONEXEC;
- goto audit;
- }
if (!(perms.xindex & AA_X_UNSAFE)) {
if (DEBUG_ON) {
@@ -794,10 +889,22 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
if (bprm->called_set_creds)
return 0;
- ctx = cred_ctx(bprm->cred);
+ ctx = task_ctx(current);
+ AA_BUG(!cred_label(bprm->cred));
AA_BUG(!ctx);
- label = aa_get_newest_label(ctx->label);
+ label = aa_get_newest_label(cred_label(bprm->cred));
+
+ /*
+ * Detect no new privs being set, and store the label it
+ * occurred under. Ideally this would happen when nnp
+ * is set but there isn't a good way to do that yet.
+ *
+ * Testing for unconfined must be done before the subset test
+ */
+ if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) && !unconfined(label) &&
+ !ctx->nnp)
+ ctx->nnp = aa_get_label(label);
/* buffer freed below, name is pointer into buffer */
get_buffers(buffer);
@@ -819,7 +926,20 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
goto done;
}
- /* TODO: Add ns level no_new_privs subset test */
+ /* Policy has specified a domain transitions. If no_new_privs and
+ * confined ensure the transition is to confinement that is subset
+ * of the confinement when the task entered no new privs.
+ *
+ * NOTE: Domain transitions from unconfined and to stacked
+ * subsets are allowed even when no_new_privs is set because this
+ * aways results in a further reduction of permissions.
+ */
+ if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) &&
+ !unconfined(label) && !aa_label_is_subset(new, ctx->nnp)) {
+ error = -EPERM;
+ info = "no new privs";
+ goto audit;
+ }
if (bprm->unsafe & LSM_UNSAFE_SHARE) {
/* FIXME: currently don't mediate shared state */
@@ -853,14 +973,11 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
}
bprm->per_clear |= PER_CLEAR_ON_SETID;
}
- aa_put_label(ctx->label);
- /* transfer reference, released when ctx is freed */
- ctx->label = new;
+ aa_put_label(cred_label(bprm->cred));
+ /* transfer reference, released when cred is freed */
+ cred_label(bprm->cred) = new;
done:
- /* clear out temporary/transitional state from the context */
- aa_clear_task_ctx_trans(ctx);
-
aa_put_label(label);
put_buffers(buffer);
@@ -1049,30 +1166,28 @@ build:
int aa_change_hat(const char *hats[], int count, u64 token, int flags)
{
const struct cred *cred;
- struct aa_task_ctx *ctx;
+ struct aa_task_ctx *ctx = task_ctx(current);
struct aa_label *label, *previous, *new = NULL, *target = NULL;
struct aa_profile *profile;
struct aa_perms perms = {};
const char *info = NULL;
int error = 0;
- /*
- * Fail explicitly requested domain transitions if no_new_privs.
- * There is no exception for unconfined as change_hat is not
- * available.
- */
- if (task_no_new_privs(current)) {
- /* not an apparmor denial per se, so don't log it */
- AA_DEBUG("no_new_privs - change_hat denied");
- return -EPERM;
- }
-
/* released below */
cred = get_current_cred();
- ctx = cred_ctx(cred);
label = aa_get_newest_cred_label(cred);
previous = aa_get_newest_label(ctx->previous);
+ /*
+ * Detect no new privs being set, and store the label it
+ * occurred under. Ideally this would happen when nnp
+ * is set but there isn't a good way to do that yet.
+ *
+ * Testing for unconfined must be done before the subset test
+ */
+ if (task_no_new_privs(current) && !unconfined(label) && !ctx->nnp)
+ ctx->nnp = aa_get_label(label);
+
if (unconfined(label)) {
info = "unconfined can not change_hat";
error = -EPERM;
@@ -1093,6 +1208,18 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
if (error)
goto fail;
+ /*
+ * no new privs prevents domain transitions that would
+ * reduce restrictions.
+ */
+ if (task_no_new_privs(current) && !unconfined(label) &&
+ !aa_label_is_subset(new, ctx->nnp)) {
+ /* not an apparmor denial per se, so don't log it */
+ AA_DEBUG("no_new_privs - change_hat denied");
+ error = -EPERM;
+ goto out;
+ }
+
if (flags & AA_CHANGE_TEST)
goto out;
@@ -1102,6 +1229,18 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
/* kill task in case of brute force attacks */
goto kill;
} else if (previous && !(flags & AA_CHANGE_TEST)) {
+ /*
+ * no new privs prevents domain transitions that would
+ * reduce restrictions.
+ */
+ if (task_no_new_privs(current) && !unconfined(label) &&
+ !aa_label_is_subset(previous, ctx->nnp)) {
+ /* not an apparmor denial per se, so don't log it */
+ AA_DEBUG("no_new_privs - change_hat denied");
+ error = -EPERM;
+ goto out;
+ }
+
/* Return to saved label. Kill task if restore fails
* to avoid brute force attacks
*/
@@ -1144,21 +1283,6 @@ static int change_profile_perms_wrapper(const char *op, const char *name,
const char *info = NULL;
int error = 0;
- /*
- * Fail explicitly requested domain transitions when no_new_privs
- * and not unconfined OR the transition results in a stack on
- * the current label.
- * Stacking domain transitions and transitions from unconfined are
- * allowed even when no_new_privs is set because this aways results
- * in a reduction of permissions.
- */
- if (task_no_new_privs(current) && !stack &&
- !profile_unconfined(profile) &&
- !aa_label_is_subset(target, &profile->label)) {
- info = "no new privs";
- error = -EPERM;
- }
-
if (!error)
error = change_profile_perms(profile, target, stack, request,
profile->file.start, perms);
@@ -1192,10 +1316,23 @@ int aa_change_profile(const char *fqname, int flags)
const char *info = NULL;
const char *auditname = fqname; /* retain leading & if stack */
bool stack = flags & AA_CHANGE_STACK;
+ struct aa_task_ctx *ctx = task_ctx(current);
int error = 0;
char *op;
u32 request;
+ label = aa_get_current_label();
+
+ /*
+ * Detect no new privs being set, and store the label it
+ * occurred under. Ideally this would happen when nnp
+ * is set but there isn't a good way to do that yet.
+ *
+ * Testing for unconfined must be done before the subset test
+ */
+ if (task_no_new_privs(current) && !unconfined(label) && !ctx->nnp)
+ ctx->nnp = aa_get_label(label);
+
if (!fqname || !*fqname) {
AA_DEBUG("no profile name");
return -EINVAL;
@@ -1283,14 +1420,28 @@ check:
if (flags & AA_CHANGE_TEST)
goto out;
+ /* stacking is always a subset, so only check the nonstack case */
+ if (!stack) {
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+ aa_get_label(target),
+ aa_get_label(&profile->label));
+ /*
+ * no new privs prevents domain transitions that would
+ * reduce restrictions.
+ */
+ if (task_no_new_privs(current) && !unconfined(label) &&
+ !aa_label_is_subset(new, ctx->nnp)) {
+ /* not an apparmor denial per se, so don't log it */
+ AA_DEBUG("no_new_privs - change_hat denied");
+ error = -EPERM;
+ goto out;
+ }
+ }
+
if (!(flags & AA_CHANGE_ONEXEC)) {
/* only transition profiles in the current ns */
if (stack)
new = aa_label_merge(label, target, GFP_KERNEL);
- else
- new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
- aa_get_label(target),
- aa_get_label(&profile->label));
if (IS_ERR_OR_NULL(new)) {
info = "failed to build target label";
error = PTR_ERR(new);
@@ -1299,9 +1450,15 @@ check:
goto audit;
}
error = aa_replace_current_label(new);
- } else
+ } else {
+ if (new) {
+ aa_put_label(new);
+ new = NULL;
+ }
+
/* full transition will be built in exec path */
error = aa_set_current_onexec(target, stack);
+ }
audit:
error = fn_for_each_in_ns(label, profile,
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index e79bf44396a3..224b2fef93ca 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -18,9 +18,10 @@
#include "include/apparmor.h"
#include "include/audit.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/file.h"
#include "include/match.h"
+#include "include/net.h"
#include "include/path.h"
#include "include/policy.h"
#include "include/label.h"
@@ -560,6 +561,32 @@ static int __file_path_perm(const char *op, struct aa_label *label,
return error;
}
+static int __file_sock_perm(const char *op, struct aa_label *label,
+ struct aa_label *flabel, struct file *file,
+ u32 request, u32 denied)
+{
+ struct socket *sock = (struct socket *) file->private_data;
+ int error;
+
+ AA_BUG(!sock);
+
+ /* revalidation due to label out of date. No revocation at this time */
+ if (!denied && aa_label_is_subset(flabel, label))
+ return 0;
+
+ /* TODO: improve to skip profiles cached in flabel */
+ error = aa_sock_file_perm(label, op, request, sock);
+ if (denied) {
+ /* TODO: improve to skip profiles checked above */
+ /* check every profile in file label to is cached */
+ last_error(error, aa_sock_file_perm(flabel, op, request, sock));
+ }
+ if (!error)
+ update_file_ctx(file_ctx(file), label, request);
+
+ return error;
+}
+
/**
* aa_file_perm - do permission revalidation check & audit for @file
* @op: operation being checked
@@ -604,6 +631,9 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
error = __file_path_perm(op, label, flabel, file, request,
denied);
+ else if (S_ISSOCK(file_inode(file)->i_mode))
+ error = __file_sock_perm(op, label, flabel, file, request,
+ denied);
done:
rcu_read_unlock();
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
index 829082c35faa..73d63b58d875 100644
--- a/security/apparmor/include/apparmor.h
+++ b/security/apparmor/include/apparmor.h
@@ -24,12 +24,13 @@
#define AA_CLASS_UNKNOWN 1
#define AA_CLASS_FILE 2
#define AA_CLASS_CAP 3
-#define AA_CLASS_NET 4
+#define AA_CLASS_DEPRECATED 4
#define AA_CLASS_RLIMITS 5
#define AA_CLASS_DOMAIN 6
#define AA_CLASS_MOUNT 7
#define AA_CLASS_PTRACE 9
#define AA_CLASS_SIGNAL 10
+#define AA_CLASS_NET 14
#define AA_CLASS_LABEL 16
#define AA_CLASS_LAST AA_CLASS_LABEL
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index 4ac095118717..9c9be9c98c15 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -126,7 +126,20 @@ struct apparmor_audit_data {
const char *target;
kuid_t ouid;
} fs;
- int signal;
+ struct {
+ int rlim;
+ unsigned long max;
+ } rlim;
+ struct {
+ int signal;
+ int unmappedsig;
+ };
+ struct {
+ int type, protocol;
+ struct sock *peer_sk;
+ void *addr;
+ int addrlen;
+ } net;
};
};
struct {
@@ -135,10 +148,6 @@ struct apparmor_audit_data {
long pos;
} iface;
struct {
- int rlim;
- unsigned long max;
- } rlim;
- struct {
const char *src_name;
const char *type;
const char *trans;
diff --git a/security/apparmor/include/context.h b/security/apparmor/include/cred.h
index 6ae07e9aaa17..e287b7d0d4be 100644
--- a/security/apparmor/include/context.h
+++ b/security/apparmor/include/cred.h
@@ -21,38 +21,9 @@
#include "label.h"
#include "policy_ns.h"
+#include "task.h"
-#define cred_ctx(X) ((X)->security)
-#define current_ctx() cred_ctx(current_cred())
-
-/**
- * struct aa_task_ctx - primary label for confined tasks
- * @label: the current label (NOT NULL)
- * @exec: label to transition to on next exec (MAYBE NULL)
- * @previous: label the task may return to (MAYBE NULL)
- * @token: magic value the task must know for returning to @previous
- *
- * Contains the task's current label (which could change due to
- * change_hat). Plus the hat_magic needed during change_hat.
- *
- * TODO: make so a task can be confined by a stack of contexts
- */
-struct aa_task_ctx {
- struct aa_label *label;
- struct aa_label *onexec;
- struct aa_label *previous;
- u64 token;
-};
-
-struct aa_task_ctx *aa_alloc_task_context(gfp_t flags);
-void aa_free_task_context(struct aa_task_ctx *ctx);
-void aa_dup_task_context(struct aa_task_ctx *new,
- const struct aa_task_ctx *old);
-int aa_replace_current_label(struct aa_label *label);
-int aa_set_current_onexec(struct aa_label *label, bool stack);
-int aa_set_current_hat(struct aa_label *label, u64 token);
-int aa_restore_previous_label(u64 cookie);
-struct aa_label *aa_get_task_label(struct task_struct *task);
+#define cred_label(X) ((X)->security)
/**
@@ -65,10 +36,10 @@ struct aa_label *aa_get_task_label(struct task_struct *task);
*/
static inline struct aa_label *aa_cred_raw_label(const struct cred *cred)
{
- struct aa_task_ctx *ctx = cred_ctx(cred);
+ struct aa_label *label = cred_label(cred);
- AA_BUG(!ctx || !ctx->label);
- return ctx->label;
+ AA_BUG(!label);
+ return label;
}
/**
@@ -96,17 +67,6 @@ static inline struct aa_label *__aa_task_raw_label(struct task_struct *task)
}
/**
- * __aa_task_is_confined - determine if @task has any confinement
- * @task: task to check confinement of (NOT NULL)
- *
- * If @task != current needs to be called in RCU safe critical section
- */
-static inline bool __aa_task_is_confined(struct task_struct *task)
-{
- return !unconfined(__aa_task_raw_label(task));
-}
-
-/**
* aa_current_raw_label - find the current tasks confining label
*
* Returns: up to date confining label or the ns unconfined label (NOT NULL)
@@ -213,17 +173,4 @@ static inline struct aa_ns *aa_get_current_ns(void)
return ns;
}
-/**
- * aa_clear_task_ctx_trans - clear transition tracking info from the ctx
- * @ctx: task context to clear (NOT NULL)
- */
-static inline void aa_clear_task_ctx_trans(struct aa_task_ctx *ctx)
-{
- aa_put_label(ctx->previous);
- aa_put_label(ctx->onexec);
- ctx->previous = NULL;
- ctx->onexec = NULL;
- ctx->token = 0;
-}
-
#endif /* __AA_CONTEXT_H */
diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h
index af22dcbbcb8a..d871e7ff0952 100644
--- a/security/apparmor/include/label.h
+++ b/security/apparmor/include/label.h
@@ -327,9 +327,37 @@ void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp);
void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp);
void aa_label_printk(struct aa_label *label, gfp_t gfp);
+struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str,
+ size_t n, gfp_t gfp, bool create,
+ bool force_stack);
struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
gfp_t gfp, bool create, bool force_stack);
+static inline const char *aa_label_strn_split(const char *str, int n)
+{
+ const char *pos;
+ unsigned int state;
+
+ state = aa_dfa_matchn_until(stacksplitdfa, DFA_START, str, n, &pos);
+ if (!ACCEPT_TABLE(stacksplitdfa)[state])
+ return NULL;
+
+ return pos - 3;
+}
+
+static inline const char *aa_label_str_split(const char *str)
+{
+ const char *pos;
+ unsigned int state;
+
+ state = aa_dfa_match_until(stacksplitdfa, DFA_START, str, &pos);
+ if (!ACCEPT_TABLE(stacksplitdfa)[state])
+ return NULL;
+
+ return pos - 3;
+}
+
+
struct aa_perms;
int aa_label_match(struct aa_profile *profile, struct aa_label *label,
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
index add4c6726558..958d2b52a7b7 100644
--- a/security/apparmor/include/match.h
+++ b/security/apparmor/include/match.h
@@ -40,6 +40,7 @@
*/
#define YYTH_MAGIC 0x1B5E783D
+#define YYTH_FLAG_DIFF_ENCODE 1
struct table_set_header {
u32 th_magic; /* YYTH_MAGIC */
@@ -101,6 +102,7 @@ struct aa_dfa {
};
extern struct aa_dfa *nulldfa;
+extern struct aa_dfa *stacksplitdfa;
#define byte_to_byte(X) (X)
@@ -129,9 +131,32 @@ unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
const char *str);
unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
const char c);
+unsigned int aa_dfa_match_until(struct aa_dfa *dfa, unsigned int start,
+ const char *str, const char **retpos);
+unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start,
+ const char *str, int n, const char **retpos);
void aa_dfa_free_kref(struct kref *kref);
+#define WB_HISTORY_SIZE 8
+struct match_workbuf {
+ unsigned int count;
+ unsigned int pos;
+ unsigned int len;
+ unsigned int size; /* power of 2, same as history size */
+ unsigned int history[WB_HISTORY_SIZE];
+};
+#define DEFINE_MATCH_WB(N) \
+struct match_workbuf N = { \
+ .count = 0, \
+ .pos = 0, \
+ .len = 0, \
+ .size = WB_HISTORY_SIZE, \
+}
+
+unsigned int aa_dfa_leftmatch(struct aa_dfa *dfa, unsigned int start,
+ const char *str, unsigned int *count);
+
/**
* aa_get_dfa - increment refcount on dfa @p
* @dfa: dfa (MAYBE NULL)
@@ -159,4 +184,7 @@ static inline void aa_put_dfa(struct aa_dfa *dfa)
kref_put(&dfa->count, aa_dfa_free_kref);
}
+#define MATCH_FLAG_DIFF_ENCODE 0x80000000
+#define MARK_DIFF_ENCODE 0x40000000
+
#endif /* __AA_MATCH_H */
diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
new file mode 100644
index 000000000000..ec7228e857a9
--- /dev/null
+++ b/security/apparmor/include/net.h
@@ -0,0 +1,106 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor network mediation definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2017 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_NET_H
+#define __AA_NET_H
+
+#include <net/sock.h>
+#include <linux/path.h>
+
+#include "apparmorfs.h"
+#include "label.h"
+#include "perms.h"
+#include "policy.h"
+
+#define AA_MAY_SEND AA_MAY_WRITE
+#define AA_MAY_RECEIVE AA_MAY_READ
+
+#define AA_MAY_SHUTDOWN AA_MAY_DELETE
+
+#define AA_MAY_CONNECT AA_MAY_OPEN
+#define AA_MAY_ACCEPT 0x00100000
+
+#define AA_MAY_BIND 0x00200000
+#define AA_MAY_LISTEN 0x00400000
+
+#define AA_MAY_SETOPT 0x01000000
+#define AA_MAY_GETOPT 0x02000000
+
+#define NET_PERMS_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \
+ AA_MAY_SHUTDOWN | AA_MAY_BIND | AA_MAY_LISTEN | \
+ AA_MAY_CONNECT | AA_MAY_ACCEPT | AA_MAY_SETATTR | \
+ AA_MAY_GETATTR | AA_MAY_SETOPT | AA_MAY_GETOPT)
+
+#define NET_FS_PERMS (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \
+ AA_MAY_SHUTDOWN | AA_MAY_CONNECT | AA_MAY_RENAME |\
+ AA_MAY_SETATTR | AA_MAY_GETATTR | AA_MAY_CHMOD | \
+ AA_MAY_CHOWN | AA_MAY_CHGRP | AA_MAY_LOCK | \
+ AA_MAY_MPROT)
+
+#define NET_PEER_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CONNECT | \
+ AA_MAY_ACCEPT)
+struct aa_sk_ctx {
+ struct aa_label *label;
+ struct aa_label *peer;
+};
+
+#define SK_CTX(X) ((X)->sk_security)
+#define SOCK_ctx(X) SOCK_INODE(X)->i_security
+#define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P) \
+ struct lsm_network_audit NAME ## _net = { .sk = (SK), \
+ .family = (F)}; \
+ DEFINE_AUDIT_DATA(NAME, \
+ ((SK) && (F) != AF_UNIX) ? LSM_AUDIT_DATA_NET : \
+ LSM_AUDIT_DATA_NONE, \
+ OP); \
+ NAME.u.net = &(NAME ## _net); \
+ aad(&NAME)->net.type = (T); \
+ aad(&NAME)->net.protocol = (P)
+
+#define DEFINE_AUDIT_SK(NAME, OP, SK) \
+ DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \
+ (SK)->sk_protocol)
+
+
+#define af_select(FAMILY, FN, DEF_FN) \
+({ \
+ int __e; \
+ switch ((FAMILY)) { \
+ default: \
+ __e = DEF_FN; \
+ } \
+ __e; \
+})
+
+extern struct aa_sfs_entry aa_sfs_entry_network[];
+
+void audit_net_cb(struct audit_buffer *ab, void *va);
+int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
+ u32 request, u16 family, int type);
+int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
+ int type, int protocol);
+static inline int aa_profile_af_sk_perm(struct aa_profile *profile,
+ struct common_audit_data *sa,
+ u32 request,
+ struct sock *sk)
+{
+ return aa_profile_af_perm(profile, sa, request, sk->sk_family,
+ sk->sk_type);
+}
+int aa_sk_perm(const char *op, u32 request, struct sock *sk);
+
+int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
+ struct socket *sock);
+
+#endif /* __AA_NET_H */
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
index d7b7e7115160..38aa6247d00f 100644
--- a/security/apparmor/include/perms.h
+++ b/security/apparmor/include/perms.h
@@ -138,9 +138,10 @@ extern struct aa_perms allperms;
void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask);
-void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask);
+void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
+ u32 mask);
void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
- u32 chrsmask, const char **names, u32 namesmask);
+ u32 chrsmask, const char * const *names, u32 namesmask);
void aa_apply_modes_to_perms(struct aa_profile *profile,
struct aa_perms *perms);
void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index 17fe41a9cac3..ab64c6b5db5a 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -30,6 +30,7 @@
#include "file.h"
#include "lib.h"
#include "label.h"
+#include "net.h"
#include "perms.h"
#include "resource.h"
@@ -148,6 +149,10 @@ struct aa_profile {
struct aa_policydb policy;
struct aa_file_rules file;
struct aa_caps caps;
+
+ int xattr_count;
+ char **xattrs;
+
struct aa_rlimit rlimits;
struct aa_loaddata *rawdata;
@@ -209,15 +214,15 @@ static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
return labels_profile(aa_get_newest_label(&p->label));
}
-#define PROFILE_MEDIATES(P, T) ((P)->policy.start[(T)])
-/* safe version of POLICY_MEDIATES for full range input */
-static inline unsigned int PROFILE_MEDIATES_SAFE(struct aa_profile *profile,
- unsigned char class)
-{
- if (profile->policy.dfa)
- return aa_dfa_match_len(profile->policy.dfa,
- profile->policy.start[0], &class, 1);
- return 0;
+#define PROFILE_MEDIATES(P, T) ((P)->policy.start[(unsigned char) (T)])
+static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile,
+ u16 AF) {
+ unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET);
+ __be16 be_af = cpu_to_be16(AF);
+
+ if (!state)
+ return 0;
+ return aa_dfa_match_len(profile->policy.dfa, state, (char *) &be_af, 2);
}
/**
diff --git a/security/apparmor/include/policy_unpack.h b/security/apparmor/include/policy_unpack.h
index be6cd69ac319..8db4ab759e80 100644
--- a/security/apparmor/include/policy_unpack.h
+++ b/security/apparmor/include/policy_unpack.h
@@ -70,7 +70,7 @@ struct aa_loaddata {
int abi;
unsigned char *hash;
- char data[];
+ char *data;
};
int aa_unpack(struct aa_loaddata *udata, struct list_head *lh, const char **ns);
diff --git a/security/apparmor/include/sig_names.h b/security/apparmor/include/sig_names.h
index 92e62fe95292..cbf7a997ed84 100644
--- a/security/apparmor/include/sig_names.h
+++ b/security/apparmor/include/sig_names.h
@@ -2,6 +2,9 @@
#define SIGUNKNOWN 0
#define MAXMAPPED_SIG 35
+#define MAXMAPPED_SIGNAME (MAXMAPPED_SIG + 1)
+#define SIGRT_BASE 128
+
/* provide a mapping of arch signal to internal signal # for mediation
* those that are always an alias SIGCLD for SIGCLHD and SIGPOLL for SIGIO
* map to the same entry those that may/or may not get a separate entry
@@ -56,7 +59,7 @@ static const int sig_map[MAXMAPPED_SIG] = {
};
/* this table is ordered post sig_map[sig] mapping */
-static const char *const sig_names[MAXMAPPED_SIG + 1] = {
+static const char *const sig_names[MAXMAPPED_SIGNAME] = {
"unknown",
"hup",
"int",
diff --git a/security/apparmor/include/task.h b/security/apparmor/include/task.h
new file mode 100644
index 000000000000..55edaa1d83f8
--- /dev/null
+++ b/security/apparmor/include/task.h
@@ -0,0 +1,94 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor task related definitions and mediation
+ *
+ * Copyright 2017 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_TASK_H
+#define __AA_TASK_H
+
+#define task_ctx(X) ((X)->security)
+
+/*
+ * struct aa_task_ctx - information for current task label change
+ * @nnp: snapshot of label at time of no_new_privs
+ * @onexec: profile to transition to on next exec (MAY BE NULL)
+ * @previous: profile the task may return to (MAY BE NULL)
+ * @token: magic value the task must know for returning to @previous_profile
+ */
+struct aa_task_ctx {
+ struct aa_label *nnp;
+ struct aa_label *onexec;
+ struct aa_label *previous;
+ u64 token;
+};
+
+int aa_replace_current_label(struct aa_label *label);
+int aa_set_current_onexec(struct aa_label *label, bool stack);
+int aa_set_current_hat(struct aa_label *label, u64 token);
+int aa_restore_previous_label(u64 cookie);
+struct aa_label *aa_get_task_label(struct task_struct *task);
+
+/**
+ * aa_alloc_task_ctx - allocate a new task_ctx
+ * @flags: gfp flags for allocation
+ *
+ * Returns: allocated buffer or NULL on failure
+ */
+static inline struct aa_task_ctx *aa_alloc_task_ctx(gfp_t flags)
+{
+ return kzalloc(sizeof(struct aa_task_ctx), flags);
+}
+
+/**
+ * aa_free_task_ctx - free a task_ctx
+ * @ctx: task_ctx to free (MAYBE NULL)
+ */
+static inline void aa_free_task_ctx(struct aa_task_ctx *ctx)
+{
+ if (ctx) {
+ aa_put_label(ctx->nnp);
+ aa_put_label(ctx->previous);
+ aa_put_label(ctx->onexec);
+
+ kzfree(ctx);
+ }
+}
+
+/**
+ * aa_dup_task_ctx - duplicate a task context, incrementing reference counts
+ * @new: a blank task context (NOT NULL)
+ * @old: the task context to copy (NOT NULL)
+ */
+static inline void aa_dup_task_ctx(struct aa_task_ctx *new,
+ const struct aa_task_ctx *old)
+{
+ *new = *old;
+ aa_get_label(new->nnp);
+ aa_get_label(new->previous);
+ aa_get_label(new->onexec);
+}
+
+/**
+ * aa_clear_task_ctx_trans - clear transition tracking info from the ctx
+ * @ctx: task context to clear (NOT NULL)
+ */
+static inline void aa_clear_task_ctx_trans(struct aa_task_ctx *ctx)
+{
+ AA_BUG(!ctx);
+
+ aa_put_label(ctx->previous);
+ aa_put_label(ctx->onexec);
+ ctx->previous = NULL;
+ ctx->onexec = NULL;
+ ctx->token = 0;
+}
+
+#endif /* __AA_TASK_H */
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index b40678f3c1d5..527ea1557120 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -17,7 +17,7 @@
#include "include/audit.h"
#include "include/capability.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/policy.h"
#include "include/ipc.h"
#include "include/sig_names.h"
@@ -138,7 +138,7 @@ static inline int map_signal_num(int sig)
if (sig > SIGRTMAX)
return SIGUNKNOWN;
else if (sig >= SIGRTMIN)
- return sig - SIGRTMIN + 128; /* rt sigs mapped to 128 */
+ return sig - SIGRTMIN + SIGRT_BASE;
else if (sig < MAXMAPPED_SIG)
return sig_map[sig];
return SIGUNKNOWN;
@@ -174,60 +174,48 @@ static void audit_signal_cb(struct audit_buffer *ab, void *va)
audit_signal_mask(ab, aad(sa)->denied);
}
}
- if (aad(sa)->signal < MAXMAPPED_SIG)
+ if (aad(sa)->signal == SIGUNKNOWN)
+ audit_log_format(ab, "signal=unknown(%d)",
+ aad(sa)->unmappedsig);
+ else if (aad(sa)->signal < MAXMAPPED_SIGNAME)
audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]);
else
audit_log_format(ab, " signal=rtmin+%d",
- aad(sa)->signal - 128);
+ aad(sa)->signal - SIGRT_BASE);
audit_log_format(ab, " peer=");
aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
FLAGS_NONE, GFP_ATOMIC);
}
-/* TODO: update to handle compound name&name2, conditionals */
-static void profile_match_signal(struct aa_profile *profile, const char *label,
- int signal, struct aa_perms *perms)
-{
- unsigned int state;
-
- /* TODO: secondary cache check <profile, profile, perm> */
- state = aa_dfa_next(profile->policy.dfa,
- profile->policy.start[AA_CLASS_SIGNAL],
- signal);
- state = aa_dfa_match(profile->policy.dfa, state, label);
- aa_compute_perms(profile->policy.dfa, state, perms);
-}
-
static int profile_signal_perm(struct aa_profile *profile,
- struct aa_profile *peer, u32 request,
+ struct aa_label *peer, u32 request,
struct common_audit_data *sa)
{
struct aa_perms perms;
+ unsigned int state;
if (profile_unconfined(profile) ||
!PROFILE_MEDIATES(profile, AA_CLASS_SIGNAL))
return 0;
- aad(sa)->peer = &peer->label;
- profile_match_signal(profile, peer->base.hname, aad(sa)->signal,
- &perms);
+ aad(sa)->peer = peer;
+ /* TODO: secondary cache check <profile, profile, perm> */
+ state = aa_dfa_next(profile->policy.dfa,
+ profile->policy.start[AA_CLASS_SIGNAL],
+ aad(sa)->signal);
+ aa_label_match(profile, peer, state, false, request, &perms);
aa_apply_modes_to_perms(profile, &perms);
return aa_check_perms(profile, &perms, request, sa, audit_signal_cb);
}
-static int aa_signal_cross_perm(struct aa_profile *sender,
- struct aa_profile *target,
- struct common_audit_data *sa)
-{
- return xcheck(profile_signal_perm(sender, target, MAY_WRITE, sa),
- profile_signal_perm(target, sender, MAY_READ, sa));
-}
-
int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig)
{
+ struct aa_profile *profile;
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_SIGNAL);
aad(&sa)->signal = map_signal_num(sig);
- return xcheck_labels_profiles(sender, target, aa_signal_cross_perm,
- &sa);
+ aad(&sa)->unmappedsig = sig;
+ return xcheck_labels(sender, target, profile,
+ profile_signal_perm(profile, target, MAY_WRITE, &sa),
+ profile_signal_perm(profile, sender, MAY_READ, &sa));
}
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index 324fe5c60f87..523250e34837 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -16,7 +16,7 @@
#include <linux/sort.h>
#include "include/apparmor.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/label.h"
#include "include/policy.h"
#include "include/secid.h"
@@ -1808,14 +1808,17 @@ void aa_label_printk(struct aa_label *label, gfp_t gfp)
aa_put_ns(ns);
}
-static int label_count_str_entries(const char *str)
+static int label_count_strn_entries(const char *str, size_t n)
{
+ const char *end = str + n;
const char *split;
int count = 1;
AA_BUG(!str);
- for (split = strstr(str, "//&"); split; split = strstr(str, "//&")) {
+ for (split = aa_label_strn_split(str, end - str);
+ split;
+ split = aa_label_strn_split(str, end - str)) {
count++;
str = split + 3;
}
@@ -1843,9 +1846,10 @@ static struct aa_profile *fqlookupn_profile(struct aa_label *base,
}
/**
- * aa_label_parse - parse, validate and convert a text string to a label
+ * aa_label_strn_parse - parse, validate and convert a text string to a label
* @base: base label to use for lookups (NOT NULL)
* @str: null terminated text string (NOT NULL)
+ * @n: length of str to parse, will stop at \0 if encountered before n
* @gfp: allocation type
* @create: true if should create compound labels if they don't exist
* @force_stack: true if should stack even if no leading &
@@ -1853,19 +1857,24 @@ static struct aa_profile *fqlookupn_profile(struct aa_label *base,
* Returns: the matching refcounted label if present
* else ERRPTR
*/
-struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
- gfp_t gfp, bool create, bool force_stack)
+struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str,
+ size_t n, gfp_t gfp, bool create,
+ bool force_stack)
{
DEFINE_VEC(profile, vec);
struct aa_label *label, *currbase = base;
int i, len, stack = 0, error;
- char *split;
+ const char *end = str + n;
+ const char *split;
AA_BUG(!base);
AA_BUG(!str);
- str = skip_spaces(str);
- len = label_count_str_entries(str);
+ str = skipn_spaces(str, n);
+ if (str == NULL || (*str == '=' && base != &root_ns->unconfined->label))
+ return ERR_PTR(-EINVAL);
+
+ len = label_count_strn_entries(str, end - str);
if (*str == '&' || force_stack) {
/* stack on top of base */
stack = base->size;
@@ -1873,8 +1882,6 @@ struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
if (*str == '&')
str++;
}
- if (*str == '=')
- base = &root_ns->unconfined->label;
error = vec_setup(profile, vec, len, gfp);
if (error)
@@ -1883,7 +1890,8 @@ struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
for (i = 0; i < stack; i++)
vec[i] = aa_get_profile(base->vec[i]);
- for (split = strstr(str, "//&"), i = stack; split && i < len; i++) {
+ for (split = aa_label_strn_split(str, end - str), i = stack;
+ split && i < len; i++) {
vec[i] = fqlookupn_profile(base, currbase, str, split - str);
if (!vec[i])
goto fail;
@@ -1894,11 +1902,11 @@ struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
if (vec[i]->ns != labels_ns(currbase))
currbase = &vec[i]->label;
str = split + 3;
- split = strstr(str, "//&");
+ split = aa_label_strn_split(str, end - str);
}
/* last element doesn't have a split */
if (i < len) {
- vec[i] = fqlookupn_profile(base, currbase, str, strlen(str));
+ vec[i] = fqlookupn_profile(base, currbase, str, end - str);
if (!vec[i])
goto fail;
}
@@ -1930,6 +1938,12 @@ fail:
goto out;
}
+struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
+ gfp_t gfp, bool create, bool force_stack)
+{
+ return aa_label_strn_parse(base, str, strlen(str), gfp, create,
+ force_stack);
+}
/**
* aa_labelset_destroy - remove all labels from the label set
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 4d5e98e49d5e..068a9f471f77 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -211,7 +211,8 @@ void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask)
*str = '\0';
}
-void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask)
+void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
+ u32 mask)
{
const char *fmt = "%s";
unsigned int i, perm = 1;
@@ -229,7 +230,7 @@ void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask)
}
void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
- u32 chrsmask, const char **names, u32 namesmask)
+ u32 chrsmask, const char * const *names, u32 namesmask)
{
char str[33];
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 528f59b580a8..ce2b89e9ad94 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -29,9 +29,10 @@
#include "include/apparmorfs.h"
#include "include/audit.h"
#include "include/capability.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/file.h"
#include "include/ipc.h"
+#include "include/net.h"
#include "include/path.h"
#include "include/label.h"
#include "include/policy.h"
@@ -50,12 +51,12 @@ DEFINE_PER_CPU(struct aa_buffers, aa_buffers);
*/
/*
- * free the associated aa_task_ctx and put its labels
+ * put the associated labels
*/
static void apparmor_cred_free(struct cred *cred)
{
- aa_free_task_context(cred_ctx(cred));
- cred_ctx(cred) = NULL;
+ aa_put_label(cred_label(cred));
+ cred_label(cred) = NULL;
}
/*
@@ -63,30 +64,17 @@ static void apparmor_cred_free(struct cred *cred)
*/
static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
- /* freed by apparmor_cred_free */
- struct aa_task_ctx *ctx = aa_alloc_task_context(gfp);
-
- if (!ctx)
- return -ENOMEM;
-
- cred_ctx(cred) = ctx;
+ cred_label(cred) = NULL;
return 0;
}
/*
- * prepare new aa_task_ctx for modification by prepare_cred block
+ * prepare new cred label for modification by prepare_cred block
*/
static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
- /* freed by apparmor_cred_free */
- struct aa_task_ctx *ctx = aa_alloc_task_context(gfp);
-
- if (!ctx)
- return -ENOMEM;
-
- aa_dup_task_context(ctx, cred_ctx(old));
- cred_ctx(new) = ctx;
+ cred_label(new) = aa_get_newest_label(cred_label(old));
return 0;
}
@@ -95,10 +83,28 @@ static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
*/
static void apparmor_cred_transfer(struct cred *new, const struct cred *old)
{
- const struct aa_task_ctx *old_ctx = cred_ctx(old);
- struct aa_task_ctx *new_ctx = cred_ctx(new);
+ cred_label(new) = aa_get_newest_label(cred_label(old));
+}
+
+static void apparmor_task_free(struct task_struct *task)
+{
+
+ aa_free_task_ctx(task_ctx(task));
+ task_ctx(task) = NULL;
+}
+
+static int apparmor_task_alloc(struct task_struct *task,
+ unsigned long clone_flags)
+{
+ struct aa_task_ctx *new = aa_alloc_task_ctx(GFP_KERNEL);
+
+ if (!new)
+ return -ENOMEM;
+
+ aa_dup_task_ctx(new, task_ctx(current));
+ task_ctx(task) = new;
- aa_dup_task_context(new_ctx, old_ctx);
+ return 0;
}
static int apparmor_ptrace_access_check(struct task_struct *child,
@@ -576,11 +582,11 @@ static int apparmor_getprocattr(struct task_struct *task, char *name,
int error = -ENOENT;
/* released below */
const struct cred *cred = get_task_cred(task);
- struct aa_task_ctx *ctx = cred_ctx(cred);
+ struct aa_task_ctx *ctx = task_ctx(current);
struct aa_label *label = NULL;
if (strcmp(name, "current") == 0)
- label = aa_get_newest_label(ctx->label);
+ label = aa_get_newest_label(cred_label(cred));
else if (strcmp(name, "prev") == 0 && ctx->previous)
label = aa_get_newest_label(ctx->previous);
else if (strcmp(name, "exec") == 0 && ctx->onexec)
@@ -677,11 +683,11 @@ fail:
static void apparmor_bprm_committing_creds(struct linux_binprm *bprm)
{
struct aa_label *label = aa_current_raw_label();
- struct aa_task_ctx *new_ctx = cred_ctx(bprm->cred);
+ struct aa_label *new_label = cred_label(bprm->cred);
/* bail out if unconfined or not changing profile */
- if ((new_ctx->label->proxy == label->proxy) ||
- (unconfined(new_ctx->label)))
+ if ((new_label->proxy == label->proxy) ||
+ (unconfined(new_label)))
return;
aa_inherit_files(bprm->cred, current->files);
@@ -689,7 +695,7 @@ static void apparmor_bprm_committing_creds(struct linux_binprm *bprm)
current->pdeath_signal = 0;
/* reset soft limits and set hard limits for the new label */
- __aa_transition_rlimits(label, new_ctx->label);
+ __aa_transition_rlimits(label, new_label);
}
/**
@@ -698,7 +704,9 @@ static void apparmor_bprm_committing_creds(struct linux_binprm *bprm)
*/
static void apparmor_bprm_committed_creds(struct linux_binprm *bprm)
{
- /* TODO: cleanup signals - ipc mediation */
+ /* clear out temporary/transitional state from the context */
+ aa_clear_task_ctx_trans(task_ctx(current));
+
return;
}
@@ -742,6 +750,373 @@ static int apparmor_task_kill(struct task_struct *target, struct siginfo *info,
return error;
}
+/**
+ * apparmor_sk_alloc_security - allocate and attach the sk_security field
+ */
+static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
+{
+ struct aa_sk_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), flags);
+ if (!ctx)
+ return -ENOMEM;
+
+ SK_CTX(sk) = ctx;
+
+ return 0;
+}
+
+/**
+ * apparmor_sk_free_security - free the sk_security field
+ */
+static void apparmor_sk_free_security(struct sock *sk)
+{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+
+ SK_CTX(sk) = NULL;
+ aa_put_label(ctx->label);
+ aa_put_label(ctx->peer);
+ kfree(ctx);
+}
+
+/**
+ * apparmor_clone_security - clone the sk_security field
+ */
+static void apparmor_sk_clone_security(const struct sock *sk,
+ struct sock *newsk)
+{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+ struct aa_sk_ctx *new = SK_CTX(newsk);
+
+ new->label = aa_get_label(ctx->label);
+ new->peer = aa_get_label(ctx->peer);
+}
+
+/**
+ * apparmor_socket_create - check perms before creating a new socket
+ */
+static int apparmor_socket_create(int family, int type, int protocol, int kern)
+{
+ struct aa_label *label;
+ int error = 0;
+
+ AA_BUG(in_interrupt());
+
+ label = begin_current_label_crit_section();
+ if (!(kern || unconfined(label)))
+ error = af_select(family,
+ create_perm(label, family, type, protocol),
+ aa_af_perm(label, OP_CREATE, AA_MAY_CREATE,
+ family, type, protocol));
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+/**
+ * apparmor_socket_post_create - setup the per-socket security struct
+ *
+ * Note:
+ * - kernel sockets currently labeled unconfined but we may want to
+ * move to a special kernel label
+ * - socket may not have sk here if created with sock_create_lite or
+ * sock_alloc. These should be accept cases which will be handled in
+ * sock_graft.
+ */
+static int apparmor_socket_post_create(struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ struct aa_label *label;
+
+ if (kern) {
+ struct aa_ns *ns = aa_get_current_ns();
+
+ label = aa_get_label(ns_unconfined(ns));
+ aa_put_ns(ns);
+ } else
+ label = aa_get_current_label();
+
+ if (sock->sk) {
+ struct aa_sk_ctx *ctx = SK_CTX(sock->sk);
+
+ aa_put_label(ctx->label);
+ ctx->label = aa_get_label(label);
+ }
+ aa_put_label(label);
+
+ return 0;
+}
+
+/**
+ * apparmor_socket_bind - check perms before bind addr to socket
+ */
+static int apparmor_socket_bind(struct socket *sock,
+ struct sockaddr *address, int addrlen)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(!address);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ bind_perm(sock, address, addrlen),
+ aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk));
+}
+
+/**
+ * apparmor_socket_connect - check perms before connecting @sock to @address
+ */
+static int apparmor_socket_connect(struct socket *sock,
+ struct sockaddr *address, int addrlen)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(!address);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ connect_perm(sock, address, addrlen),
+ aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk));
+}
+
+/**
+ * apparmor_socket_list - check perms before allowing listen
+ */
+static int apparmor_socket_listen(struct socket *sock, int backlog)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ listen_perm(sock, backlog),
+ aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk));
+}
+
+/**
+ * apparmor_socket_accept - check perms before accepting a new connection.
+ *
+ * Note: while @newsock is created and has some information, the accept
+ * has not been done.
+ */
+static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(!newsock);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ accept_perm(sock, newsock),
+ aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk));
+}
+
+static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
+ struct msghdr *msg, int size)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(!msg);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ msg_perm(op, request, sock, msg, size),
+ aa_sk_perm(op, request, sock->sk));
+}
+
+/**
+ * apparmor_socket_sendmsg - check perms before sending msg to another socket
+ */
+static int apparmor_socket_sendmsg(struct socket *sock,
+ struct msghdr *msg, int size)
+{
+ return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size);
+}
+
+/**
+ * apparmor_socket_recvmsg - check perms before receiving a message
+ */
+static int apparmor_socket_recvmsg(struct socket *sock,
+ struct msghdr *msg, int size, int flags)
+{
+ return aa_sock_msg_perm(OP_RECVMSG, AA_MAY_RECEIVE, sock, msg, size);
+}
+
+/* revaliation, get/set attr, shutdown */
+static int aa_sock_perm(const char *op, u32 request, struct socket *sock)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ sock_perm(op, request, sock),
+ aa_sk_perm(op, request, sock->sk));
+}
+
+/**
+ * apparmor_socket_getsockname - check perms before getting the local address
+ */
+static int apparmor_socket_getsockname(struct socket *sock)
+{
+ return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock);
+}
+
+/**
+ * apparmor_socket_getpeername - check perms before getting remote address
+ */
+static int apparmor_socket_getpeername(struct socket *sock)
+{
+ return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock);
+}
+
+/* revaliation, get/set attr, opt */
+static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
+ int level, int optname)
+{
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(in_interrupt());
+
+ return af_select(sock->sk->sk_family,
+ opt_perm(op, request, sock, level, optname),
+ aa_sk_perm(op, request, sock->sk));
+}
+
+/**
+ * apparmor_getsockopt - check perms before getting socket options
+ */
+static int apparmor_socket_getsockopt(struct socket *sock, int level,
+ int optname)
+{
+ return aa_sock_opt_perm(OP_GETSOCKOPT, AA_MAY_GETOPT, sock,
+ level, optname);
+}
+
+/**
+ * apparmor_setsockopt - check perms before setting socket options
+ */
+static int apparmor_socket_setsockopt(struct socket *sock, int level,
+ int optname)
+{
+ return aa_sock_opt_perm(OP_SETSOCKOPT, AA_MAY_SETOPT, sock,
+ level, optname);
+}
+
+/**
+ * apparmor_socket_shutdown - check perms before shutting down @sock conn
+ */
+static int apparmor_socket_shutdown(struct socket *sock, int how)
+{
+ return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
+}
+
+/**
+ * apparmor_socket_sock_recv_skb - check perms before associating skb to sk
+ *
+ * Note: can not sleep may be called with locks held
+ *
+ * dont want protocol specific in __skb_recv_datagram()
+ * to deny an incoming connection socket_sock_rcv_skb()
+ */
+static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+
+
+static struct aa_label *sk_peer_label(struct sock *sk)
+{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+
+ if (ctx->peer)
+ return ctx->peer;
+
+ return ERR_PTR(-ENOPROTOOPT);
+}
+
+/**
+ * apparmor_socket_getpeersec_stream - get security context of peer
+ *
+ * Note: for tcp only valid if using ipsec or cipso on lan
+ */
+static int apparmor_socket_getpeersec_stream(struct socket *sock,
+ char __user *optval,
+ int __user *optlen,
+ unsigned int len)
+{
+ char *name;
+ int slen, error = 0;
+ struct aa_label *label;
+ struct aa_label *peer;
+
+ label = begin_current_label_crit_section();
+ peer = sk_peer_label(sock->sk);
+ if (IS_ERR(peer)) {
+ error = PTR_ERR(peer);
+ goto done;
+ }
+ slen = aa_label_asxprint(&name, labels_ns(label), peer,
+ FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
+ FLAG_HIDDEN_UNCONFINED, GFP_KERNEL);
+ /* don't include terminating \0 in slen, it breaks some apps */
+ if (slen < 0) {
+ error = -ENOMEM;
+ } else {
+ if (slen > len) {
+ error = -ERANGE;
+ } else if (copy_to_user(optval, name, slen)) {
+ error = -EFAULT;
+ goto out;
+ }
+ if (put_user(slen, optlen))
+ error = -EFAULT;
+out:
+ kfree(name);
+
+ }
+
+done:
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+/**
+ * apparmor_socket_getpeersec_dgram - get security label of packet
+ * @sock: the peer socket
+ * @skb: packet data
+ * @secid: pointer to where to put the secid of the packet
+ *
+ * Sets the netlabel socket state on sk from parent
+ */
+static int apparmor_socket_getpeersec_dgram(struct socket *sock,
+ struct sk_buff *skb, u32 *secid)
+
+{
+ /* TODO: requires secid support */
+ return -ENOPROTOOPT;
+}
+
+/**
+ * apparmor_sock_graft - Initialize newly created socket
+ * @sk: child sock
+ * @parent: parent socket
+ *
+ * Note: could set off of SOCK_CTX(parent) but need to track inode and we can
+ * just set sk security information off of current creating process label
+ * Labeling of sk for accept case - probably should be sock based
+ * instead of task, because of the case where an implicitly labeled
+ * socket is shared by different tasks.
+ */
+static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
+{
+ struct aa_sk_ctx *ctx = SK_CTX(sk);
+
+ if (!ctx->label)
+ ctx->label = aa_get_current_label();
+}
+
static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
@@ -776,6 +1151,30 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
+ LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security),
+ LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security),
+ LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security),
+
+ LSM_HOOK_INIT(socket_create, apparmor_socket_create),
+ LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create),
+ LSM_HOOK_INIT(socket_bind, apparmor_socket_bind),
+ LSM_HOOK_INIT(socket_connect, apparmor_socket_connect),
+ LSM_HOOK_INIT(socket_listen, apparmor_socket_listen),
+ LSM_HOOK_INIT(socket_accept, apparmor_socket_accept),
+ LSM_HOOK_INIT(socket_sendmsg, apparmor_socket_sendmsg),
+ LSM_HOOK_INIT(socket_recvmsg, apparmor_socket_recvmsg),
+ LSM_HOOK_INIT(socket_getsockname, apparmor_socket_getsockname),
+ LSM_HOOK_INIT(socket_getpeername, apparmor_socket_getpeername),
+ LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt),
+ LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt),
+ LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown),
+ LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb),
+ LSM_HOOK_INIT(socket_getpeersec_stream,
+ apparmor_socket_getpeersec_stream),
+ LSM_HOOK_INIT(socket_getpeersec_dgram,
+ apparmor_socket_getpeersec_dgram),
+ LSM_HOOK_INIT(sock_graft, apparmor_sock_graft),
+
LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank),
LSM_HOOK_INIT(cred_free, apparmor_cred_free),
LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare),
@@ -785,6 +1184,8 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(bprm_committing_creds, apparmor_bprm_committing_creds),
LSM_HOOK_INIT(bprm_committed_creds, apparmor_bprm_committed_creds),
+ LSM_HOOK_INIT(task_free, apparmor_task_free),
+ LSM_HOOK_INIT(task_alloc, apparmor_task_alloc),
LSM_HOOK_INIT(task_setrlimit, apparmor_task_setrlimit),
LSM_HOOK_INIT(task_kill, apparmor_task_kill),
};
@@ -1032,12 +1433,12 @@ static int __init set_init_ctx(void)
struct cred *cred = (struct cred *)current->real_cred;
struct aa_task_ctx *ctx;
- ctx = aa_alloc_task_context(GFP_KERNEL);
+ ctx = aa_alloc_task_ctx(GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->label = aa_get_label(ns_unconfined(root_ns));
- cred_ctx(cred) = ctx;
+ cred_label(cred) = aa_get_label(ns_unconfined(root_ns));
+ task_ctx(current) = ctx;
return 0;
}
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index 72c604350e80..280eba082c7b 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -30,6 +30,11 @@ static char nulldfa_src[] = {
};
struct aa_dfa *nulldfa;
+static char stacksplitdfa_src[] = {
+ #include "stacksplitdfa.in"
+};
+struct aa_dfa *stacksplitdfa;
+
int aa_setup_dfa_engine(void)
{
int error;
@@ -37,19 +42,31 @@ int aa_setup_dfa_engine(void)
nulldfa = aa_dfa_unpack(nulldfa_src, sizeof(nulldfa_src),
TO_ACCEPT1_FLAG(YYTD_DATA32) |
TO_ACCEPT2_FLAG(YYTD_DATA32));
- if (!IS_ERR(nulldfa))
- return 0;
+ if (IS_ERR(nulldfa)) {
+ error = PTR_ERR(nulldfa);
+ nulldfa = NULL;
+ return error;
+ }
- error = PTR_ERR(nulldfa);
- nulldfa = NULL;
+ stacksplitdfa = aa_dfa_unpack(stacksplitdfa_src,
+ sizeof(stacksplitdfa_src),
+ TO_ACCEPT1_FLAG(YYTD_DATA32) |
+ TO_ACCEPT2_FLAG(YYTD_DATA32));
+ if (IS_ERR(stacksplitdfa)) {
+ aa_put_dfa(nulldfa);
+ nulldfa = NULL;
+ error = PTR_ERR(stacksplitdfa);
+ stacksplitdfa = NULL;
+ return error;
+ }
- return error;
+ return 0;
}
void aa_teardown_dfa_engine(void)
{
+ aa_put_dfa(stacksplitdfa);
aa_put_dfa(nulldfa);
- nulldfa = NULL;
}
/**
@@ -119,8 +136,8 @@ fail:
}
/**
- * verify_dfa - verify that transitions and states in the tables are in bounds.
- * @dfa: dfa to test (NOT NULL)
+ * verify_table_headers - verify that the tables headers are as expected
+ * @tables - array of dfa tables to check (NOT NULL)
* @flags: flags controlling what type of accept table are acceptable
*
* Assumes dfa has gone through the first pass verification done by unpacking
@@ -128,64 +145,98 @@ fail:
*
* Returns: %0 else error code on failure to verify
*/
-static int verify_dfa(struct aa_dfa *dfa, int flags)
+static int verify_table_headers(struct table_header **tables, int flags)
{
- size_t i, state_count, trans_count;
+ size_t state_count, trans_count;
int error = -EPROTO;
/* check that required tables exist */
- if (!(dfa->tables[YYTD_ID_DEF] &&
- dfa->tables[YYTD_ID_BASE] &&
- dfa->tables[YYTD_ID_NXT] && dfa->tables[YYTD_ID_CHK]))
+ if (!(tables[YYTD_ID_DEF] && tables[YYTD_ID_BASE] &&
+ tables[YYTD_ID_NXT] && tables[YYTD_ID_CHK]))
goto out;
/* accept.size == default.size == base.size */
- state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
+ state_count = tables[YYTD_ID_BASE]->td_lolen;
if (ACCEPT1_FLAGS(flags)) {
- if (!dfa->tables[YYTD_ID_ACCEPT])
+ if (!tables[YYTD_ID_ACCEPT])
goto out;
- if (state_count != dfa->tables[YYTD_ID_ACCEPT]->td_lolen)
+ if (state_count != tables[YYTD_ID_ACCEPT]->td_lolen)
goto out;
}
if (ACCEPT2_FLAGS(flags)) {
- if (!dfa->tables[YYTD_ID_ACCEPT2])
+ if (!tables[YYTD_ID_ACCEPT2])
goto out;
- if (state_count != dfa->tables[YYTD_ID_ACCEPT2]->td_lolen)
+ if (state_count != tables[YYTD_ID_ACCEPT2]->td_lolen)
goto out;
}
- if (state_count != dfa->tables[YYTD_ID_DEF]->td_lolen)
+ if (state_count != tables[YYTD_ID_DEF]->td_lolen)
goto out;
/* next.size == chk.size */
- trans_count = dfa->tables[YYTD_ID_NXT]->td_lolen;
- if (trans_count != dfa->tables[YYTD_ID_CHK]->td_lolen)
+ trans_count = tables[YYTD_ID_NXT]->td_lolen;
+ if (trans_count != tables[YYTD_ID_CHK]->td_lolen)
goto out;
/* if equivalence classes then its table size must be 256 */
- if (dfa->tables[YYTD_ID_EC] &&
- dfa->tables[YYTD_ID_EC]->td_lolen != 256)
+ if (tables[YYTD_ID_EC] && tables[YYTD_ID_EC]->td_lolen != 256)
goto out;
- if (flags & DFA_FLAG_VERIFY_STATES) {
- for (i = 0; i < state_count; i++) {
- if (DEFAULT_TABLE(dfa)[i] >= state_count)
- goto out;
- if (base_idx(BASE_TABLE(dfa)[i]) + 255 >= trans_count) {
- printk(KERN_ERR "AppArmor DFA next/check upper "
- "bounds error\n");
- goto out;
- }
+ error = 0;
+out:
+ return error;
+}
+
+/**
+ * verify_dfa - verify that transitions and states in the tables are in bounds.
+ * @dfa: dfa to test (NOT NULL)
+ *
+ * Assumes dfa has gone through the first pass verification done by unpacking
+ * NOTE: this does not valid accept table values
+ *
+ * Returns: %0 else error code on failure to verify
+ */
+static int verify_dfa(struct aa_dfa *dfa)
+{
+ size_t i, state_count, trans_count;
+ int error = -EPROTO;
+
+ state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
+ trans_count = dfa->tables[YYTD_ID_NXT]->td_lolen;
+ for (i = 0; i < state_count; i++) {
+ if (!(BASE_TABLE(dfa)[i] & MATCH_FLAG_DIFF_ENCODE) &&
+ (DEFAULT_TABLE(dfa)[i] >= state_count))
+ goto out;
+ if (base_idx(BASE_TABLE(dfa)[i]) + 255 >= trans_count) {
+ pr_err("AppArmor DFA next/check upper bounds error\n");
+ goto out;
}
+ }
- for (i = 0; i < trans_count; i++) {
- if (NEXT_TABLE(dfa)[i] >= state_count)
- goto out;
- if (CHECK_TABLE(dfa)[i] >= state_count)
+ for (i = 0; i < trans_count; i++) {
+ if (NEXT_TABLE(dfa)[i] >= state_count)
+ goto out;
+ if (CHECK_TABLE(dfa)[i] >= state_count)
+ goto out;
+ }
+
+ /* Now that all the other tables are verified, verify diffencoding */
+ for (i = 0; i < state_count; i++) {
+ size_t j, k;
+
+ for (j = i;
+ (BASE_TABLE(dfa)[j] & MATCH_FLAG_DIFF_ENCODE) &&
+ !(BASE_TABLE(dfa)[j] & MARK_DIFF_ENCODE);
+ j = k) {
+ k = DEFAULT_TABLE(dfa)[j];
+ if (j == k)
goto out;
+ if (k < j)
+ break; /* already verified */
+ BASE_TABLE(dfa)[j] |= MARK_DIFF_ENCODE;
}
}
-
error = 0;
+
out:
return error;
}
@@ -257,6 +308,9 @@ struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags)
goto fail;
dfa->flags = ntohs(*(__be16 *) (data + 12));
+ if (dfa->flags != 0 && dfa->flags != YYTH_FLAG_DIFF_ENCODE)
+ goto fail;
+
data += hsize;
size -= hsize;
@@ -299,11 +353,16 @@ struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags)
size -= table_size(table->td_lolen, table->td_flags);
table = NULL;
}
-
- error = verify_dfa(dfa, flags);
+ error = verify_table_headers(dfa->tables, flags);
if (error)
goto fail;
+ if (flags & DFA_FLAG_VERIFY_STATES) {
+ error = verify_dfa(dfa);
+ if (error)
+ goto fail;
+ }
+
return dfa;
fail:
@@ -312,6 +371,20 @@ fail:
return ERR_PTR(error);
}
+#define match_char(state, def, base, next, check, C) \
+do { \
+ u32 b = (base)[(state)]; \
+ unsigned int pos = base_idx(b) + (C); \
+ if ((check)[pos] != (state)) { \
+ (state) = (def)[(state)]; \
+ if (b & MATCH_FLAG_DIFF_ENCODE) \
+ continue; \
+ break; \
+ } \
+ (state) = (next)[pos]; \
+ break; \
+} while (1)
+
/**
* aa_dfa_match_len - traverse @dfa to find state @str stops at
* @dfa: the dfa to match @str against (NOT NULL)
@@ -335,6 +408,118 @@ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
u32 *base = BASE_TABLE(dfa);
u16 *next = NEXT_TABLE(dfa);
u16 *check = CHECK_TABLE(dfa);
+ unsigned int state = start;
+
+ if (state == 0)
+ return 0;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ for (; len; len--)
+ match_char(state, def, base, next, check,
+ equiv[(u8) *str++]);
+ } else {
+ /* default is direct to next state */
+ for (; len; len--)
+ match_char(state, def, base, next, check, (u8) *str++);
+ }
+
+ return state;
+}
+
+/**
+ * aa_dfa_match - traverse @dfa to find state @str stops at
+ * @dfa: the dfa to match @str against (NOT NULL)
+ * @start: the state of the dfa to start matching in
+ * @str: the null terminated string of bytes to match against the dfa (NOT NULL)
+ *
+ * aa_dfa_match will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ *
+ * Returns: final state reached after input is consumed
+ */
+unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
+ const char *str)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ unsigned int state = start;
+
+ if (state == 0)
+ return 0;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ /* default is direct to next state */
+ while (*str)
+ match_char(state, def, base, next, check,
+ equiv[(u8) *str++]);
+ } else {
+ /* default is direct to next state */
+ while (*str)
+ match_char(state, def, base, next, check, (u8) *str++);
+ }
+
+ return state;
+}
+
+/**
+ * aa_dfa_next - step one character to the next state in the dfa
+ * @dfa: the dfa to tranverse (NOT NULL)
+ * @state: the state to start in
+ * @c: the input character to transition on
+ *
+ * aa_dfa_match will step through the dfa by one input character @c
+ *
+ * Returns: state reach after input @c
+ */
+unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
+ const char c)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ match_char(state, def, base, next, check, equiv[(u8) c]);
+ } else
+ match_char(state, def, base, next, check, (u8) c);
+
+ return state;
+}
+
+/**
+ * aa_dfa_match_until - traverse @dfa until accept state or end of input
+ * @dfa: the dfa to match @str against (NOT NULL)
+ * @start: the state of the dfa to start matching in
+ * @str: the null terminated string of bytes to match against the dfa (NOT NULL)
+ * @retpos: first character in str after match OR end of string
+ *
+ * aa_dfa_match will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ *
+ * Returns: final state reached after input is consumed
+ */
+unsigned int aa_dfa_match_until(struct aa_dfa *dfa, unsigned int start,
+ const char *str, const char **retpos)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ u32 *accept = ACCEPT_TABLE(dfa);
unsigned int state = start, pos;
if (state == 0)
@@ -345,48 +530,60 @@ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
/* Equivalence class table defined */
u8 *equiv = EQUIV_TABLE(dfa);
/* default is direct to next state */
- for (; len; len--) {
+ while (*str) {
pos = base_idx(base[state]) + equiv[(u8) *str++];
if (check[pos] == state)
state = next[pos];
else
state = def[state];
+ if (accept[state])
+ break;
}
} else {
/* default is direct to next state */
- for (; len; len--) {
+ while (*str) {
pos = base_idx(base[state]) + (u8) *str++;
if (check[pos] == state)
state = next[pos];
else
state = def[state];
+ if (accept[state])
+ break;
}
}
+ *retpos = str;
return state;
}
/**
- * aa_dfa_match - traverse @dfa to find state @str stops at
+ * aa_dfa_matchn_until - traverse @dfa until accept or @n bytes consumed
* @dfa: the dfa to match @str against (NOT NULL)
* @start: the state of the dfa to start matching in
- * @str: the null terminated string of bytes to match against the dfa (NOT NULL)
+ * @str: the string of bytes to match against the dfa (NOT NULL)
+ * @n: length of the string of bytes to match
+ * @retpos: first character in str after match OR str + n
*
- * aa_dfa_match will match @str against the dfa and return the state it
+ * aa_dfa_match_len will match @str against the dfa and return the state it
* finished matching in. The final state can be used to look up the accepting
* label, or as the start state of a continuing match.
*
+ * This function will happily match again the 0 byte and only finishes
+ * when @n input is consumed.
+ *
* Returns: final state reached after input is consumed
*/
-unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
- const char *str)
+unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start,
+ const char *str, int n, const char **retpos)
{
u16 *def = DEFAULT_TABLE(dfa);
u32 *base = BASE_TABLE(dfa);
u16 *next = NEXT_TABLE(dfa);
u16 *check = CHECK_TABLE(dfa);
+ u32 *accept = ACCEPT_TABLE(dfa);
unsigned int state = start, pos;
+ *retpos = NULL;
if (state == 0)
return 0;
@@ -395,65 +592,149 @@ unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
/* Equivalence class table defined */
u8 *equiv = EQUIV_TABLE(dfa);
/* default is direct to next state */
- while (*str) {
+ for (; n; n--) {
pos = base_idx(base[state]) + equiv[(u8) *str++];
if (check[pos] == state)
state = next[pos];
else
state = def[state];
+ if (accept[state])
+ break;
}
} else {
/* default is direct to next state */
- while (*str) {
+ for (; n; n--) {
pos = base_idx(base[state]) + (u8) *str++;
if (check[pos] == state)
state = next[pos];
else
state = def[state];
+ if (accept[state])
+ break;
}
}
+ *retpos = str;
return state;
}
-/**
- * aa_dfa_next - step one character to the next state in the dfa
- * @dfa: the dfa to tranverse (NOT NULL)
- * @state: the state to start in
- * @c: the input character to transition on
- *
- * aa_dfa_match will step through the dfa by one input character @c
- *
- * Returns: state reach after input @c
- */
-unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
- const char c)
+#define inc_wb_pos(wb) \
+do { \
+ wb->pos = (wb->pos + 1) & (wb->size - 1); \
+ wb->len = (wb->len + 1) & (wb->size - 1); \
+} while (0)
+
+/* For DFAs that don't support extended tagging of states */
+static bool is_loop(struct match_workbuf *wb, unsigned int state,
+ unsigned int *adjust)
+{
+ unsigned int pos = wb->pos;
+ unsigned int i;
+
+ if (wb->history[pos] < state)
+ return false;
+
+ for (i = 0; i <= wb->len; i++) {
+ if (wb->history[pos] == state) {
+ *adjust = i;
+ return true;
+ }
+ if (pos == 0)
+ pos = wb->size;
+ pos--;
+ }
+
+ *adjust = i;
+ return true;
+}
+
+static unsigned int leftmatch_fb(struct aa_dfa *dfa, unsigned int start,
+ const char *str, struct match_workbuf *wb,
+ unsigned int *count)
{
u16 *def = DEFAULT_TABLE(dfa);
u32 *base = BASE_TABLE(dfa);
u16 *next = NEXT_TABLE(dfa);
u16 *check = CHECK_TABLE(dfa);
- unsigned int pos;
+ unsigned int state = start, pos;
+
+ AA_BUG(!dfa);
+ AA_BUG(!str);
+ AA_BUG(!wb);
+ AA_BUG(!count);
+
+ *count = 0;
+ if (state == 0)
+ return 0;
/* current state is <state>, matching character *str */
if (dfa->tables[YYTD_ID_EC]) {
/* Equivalence class table defined */
u8 *equiv = EQUIV_TABLE(dfa);
/* default is direct to next state */
+ while (*str) {
+ unsigned int adjust;
- pos = base_idx(base[state]) + equiv[(u8) c];
- if (check[pos] == state)
- state = next[pos];
- else
- state = def[state];
+ wb->history[wb->pos] = state;
+ pos = base_idx(base[state]) + equiv[(u8) *str++];
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ if (is_loop(wb, state, &adjust)) {
+ state = aa_dfa_match(dfa, state, str);
+ *count -= adjust;
+ goto out;
+ }
+ inc_wb_pos(wb);
+ (*count)++;
+ }
} else {
/* default is direct to next state */
- pos = base_idx(base[state]) + (u8) c;
- if (check[pos] == state)
- state = next[pos];
- else
- state = def[state];
+ while (*str) {
+ unsigned int adjust;
+
+ wb->history[wb->pos] = state;
+ pos = base_idx(base[state]) + (u8) *str++;
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ if (is_loop(wb, state, &adjust)) {
+ state = aa_dfa_match(dfa, state, str);
+ *count -= adjust;
+ goto out;
+ }
+ inc_wb_pos(wb);
+ (*count)++;
+ }
}
+out:
+ if (!state)
+ *count = 0;
return state;
}
+
+/**
+ * aa_dfa_leftmatch - traverse @dfa to find state @str stops at
+ * @dfa: the dfa to match @str against (NOT NULL)
+ * @start: the state of the dfa to start matching in
+ * @str: the null terminated string of bytes to match against the dfa (NOT NULL)
+ * @count: current count of longest left.
+ *
+ * aa_dfa_match will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ *
+ * Returns: final state reached after input is consumed
+ */
+unsigned int aa_dfa_leftmatch(struct aa_dfa *dfa, unsigned int start,
+ const char *str, unsigned int *count)
+{
+ DEFINE_MATCH_WB(wb);
+
+ /* TODO: match for extended state dfas */
+
+ return leftmatch_fb(dfa, start, str, &wb, count);
+}
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index 8c558cbce930..6e8c7ac0b33d 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -18,7 +18,7 @@
#include "include/apparmor.h"
#include "include/audit.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/domain.h"
#include "include/file.h"
#include "include/match.h"
diff --git a/security/apparmor/net.c b/security/apparmor/net.c
new file mode 100644
index 000000000000..bb24cfa0a164
--- /dev/null
+++ b/security/apparmor/net.c
@@ -0,0 +1,187 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor network mediation
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2017 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/cred.h"
+#include "include/label.h"
+#include "include/net.h"
+#include "include/policy.h"
+
+#include "net_names.h"
+
+
+struct aa_sfs_entry aa_sfs_entry_network[] = {
+ AA_SFS_FILE_STRING("af_mask", AA_SFS_AF_MASK),
+ { }
+};
+
+static const char * const net_mask_names[] = {
+ "unknown",
+ "send",
+ "receive",
+ "unknown",
+
+ "create",
+ "shutdown",
+ "connect",
+ "unknown",
+
+ "setattr",
+ "getattr",
+ "setcred",
+ "getcred",
+
+ "chmod",
+ "chown",
+ "chgrp",
+ "lock",
+
+ "mmap",
+ "mprot",
+ "unknown",
+ "unknown",
+
+ "accept",
+ "bind",
+ "listen",
+ "unknown",
+
+ "setopt",
+ "getopt",
+ "unknown",
+ "unknown",
+
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+};
+
+
+/* audit callback for net specific fields */
+void audit_net_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+
+ audit_log_format(ab, " family=");
+ if (address_family_names[sa->u.net->family])
+ audit_log_string(ab, address_family_names[sa->u.net->family]);
+ else
+ audit_log_format(ab, "\"unknown(%d)\"", sa->u.net->family);
+ audit_log_format(ab, " sock_type=");
+ if (sock_type_names[aad(sa)->net.type])
+ audit_log_string(ab, sock_type_names[aad(sa)->net.type]);
+ else
+ audit_log_format(ab, "\"unknown(%d)\"", aad(sa)->net.type);
+ audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol);
+
+ if (aad(sa)->request & NET_PERMS_MASK) {
+ audit_log_format(ab, " requested_mask=");
+ aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0,
+ net_mask_names, NET_PERMS_MASK);
+
+ if (aad(sa)->denied & NET_PERMS_MASK) {
+ audit_log_format(ab, " denied_mask=");
+ aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0,
+ net_mask_names, NET_PERMS_MASK);
+ }
+ }
+ if (aad(sa)->peer) {
+ audit_log_format(ab, " peer=");
+ aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+ FLAGS_NONE, GFP_ATOMIC);
+ }
+}
+
+/* Generic af perm */
+int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
+ u32 request, u16 family, int type)
+{
+ struct aa_perms perms = { };
+ unsigned int state;
+ __be16 buffer[2];
+
+ AA_BUG(family >= AF_MAX);
+ AA_BUG(type < 0 || type >= SOCK_MAX);
+
+ if (profile_unconfined(profile))
+ return 0;
+ state = PROFILE_MEDIATES(profile, AA_CLASS_NET);
+ if (!state)
+ return 0;
+
+ buffer[0] = cpu_to_be16(family);
+ buffer[1] = cpu_to_be16((u16) type);
+ state = aa_dfa_match_len(profile->policy.dfa, state, (char *) &buffer,
+ 4);
+ aa_compute_perms(profile->policy.dfa, state, &perms);
+ aa_apply_modes_to_perms(profile, &perms);
+
+ return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
+}
+
+int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
+ int type, int protocol)
+{
+ struct aa_profile *profile;
+ DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol);
+
+ return fn_for_each_confined(label, profile,
+ aa_profile_af_perm(profile, &sa, request, family,
+ type));
+}
+
+static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
+ struct sock *sk)
+{
+ struct aa_profile *profile;
+ DEFINE_AUDIT_SK(sa, op, sk);
+
+ AA_BUG(!label);
+ AA_BUG(!sk);
+
+ if (unconfined(label))
+ return 0;
+
+ return fn_for_each_confined(label, profile,
+ aa_profile_af_sk_perm(profile, &sa, request, sk));
+}
+
+int aa_sk_perm(const char *op, u32 request, struct sock *sk)
+{
+ struct aa_label *label;
+ int error;
+
+ AA_BUG(!sk);
+ AA_BUG(in_interrupt());
+
+ /* TODO: switch to begin_current_label ???? */
+ label = begin_current_label_crit_section();
+ error = aa_label_sk_perm(label, op, request, sk);
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+
+int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
+ struct socket *sock)
+{
+ AA_BUG(!label);
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+
+ return aa_label_sk_perm(label, op, request, sock->sk);
+}
diff --git a/security/apparmor/nulldfa.in b/security/apparmor/nulldfa.in
index 3cb38022902e..095f42a24cbc 100644
--- a/security/apparmor/nulldfa.in
+++ b/security/apparmor/nulldfa.in
@@ -1 +1,107 @@
-0x1B, 0x5E, 0x78, 0x3D, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x04, 0x90, 0x00, 0x00, 0x6E, 0x6F, 0x74, 0x66, 0x6C, 0x65, 0x78, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+0x1B, 0x5E, 0x78, 0x3D, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x04,
+0x90, 0x00, 0x00, 0x6E, 0x6F, 0x74, 0x66, 0x6C, 0x65, 0x78, 0x00,
+0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x04, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x08, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index b0b58848c248..c07493ce2376 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -82,7 +82,7 @@
#include "include/apparmor.h"
#include "include/capability.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/file.h"
#include "include/ipc.h"
#include "include/match.h"
@@ -210,6 +210,7 @@ static void aa_free_data(void *ptr, void *arg)
void aa_free_profile(struct aa_profile *profile)
{
struct rhashtable *rht;
+ int i;
AA_DEBUG("%s(%p)\n", __func__, profile);
@@ -227,6 +228,9 @@ void aa_free_profile(struct aa_profile *profile)
aa_free_cap_rules(&profile->caps);
aa_free_rlimit_rules(&profile->rlimits);
+ for (i = 0; i < profile->xattr_count; i++)
+ kzfree(profile->xattrs[i]);
+ kzfree(profile->xattrs);
kzfree(profile->dirname);
aa_put_dfa(profile->xmatch);
aa_put_dfa(profile->policy.dfa);
@@ -845,8 +849,9 @@ static struct aa_profile *update_to_newest_parent(struct aa_profile *new)
* @udata: serialized data stream (NOT NULL)
*
* unpack and replace a profile on the profile list and uses of that profile
- * by any aa_task_ctx. If the profile does not exist on the profile list
- * it is added.
+ * by any task creds via invalidating the old version of the profile, which
+ * tasks will notice to update their own cred. If the profile does not exist
+ * on the profile list it is added.
*
* Returns: size of data consumed else error code on failure.
*/
diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c
index b1e629cba70b..b0f9dc3f765a 100644
--- a/security/apparmor/policy_ns.c
+++ b/security/apparmor/policy_ns.c
@@ -21,7 +21,7 @@
#include <linux/string.h>
#include "include/apparmor.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/policy_ns.h"
#include "include/label.h"
#include "include/policy.h"
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 59a1a25b7d43..b9e6b2cafa69 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -23,7 +23,7 @@
#include "include/apparmor.h"
#include "include/audit.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/crypto.h"
#include "include/match.h"
#include "include/path.h"
@@ -37,7 +37,8 @@
#define v5 5 /* base version */
#define v6 6 /* per entry policydb mediation check */
-#define v7 7 /* full network masking */
+#define v7 7
+#define v8 8 /* full network masking */
/*
* The AppArmor interface treats data as a type byte followed by the
@@ -164,8 +165,9 @@ static void do_loaddata_free(struct work_struct *work)
}
kzfree(d->hash);
- kfree(d->name);
- kvfree(d);
+ kzfree(d->name);
+ kvfree(d->data);
+ kzfree(d);
}
void aa_loaddata_kref(struct kref *kref)
@@ -180,10 +182,16 @@ void aa_loaddata_kref(struct kref *kref)
struct aa_loaddata *aa_loaddata_alloc(size_t size)
{
- struct aa_loaddata *d = kvzalloc(sizeof(*d) + size, GFP_KERNEL);
+ struct aa_loaddata *d;
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
if (d == NULL)
return ERR_PTR(-ENOMEM);
+ d->data = kvzalloc(size, GFP_KERNEL);
+ if (!d->data) {
+ kfree(d);
+ return ERR_PTR(-ENOMEM);
+ }
kref_init(&d->count);
INIT_LIST_HEAD(&d->list);
@@ -196,6 +204,15 @@ static bool inbounds(struct aa_ext *e, size_t size)
return (size <= e->end - e->pos);
}
+static void *kvmemdup(const void *src, size_t len)
+{
+ void *p = kvmalloc(len, GFP_KERNEL);
+
+ if (p)
+ memcpy(p, src, len);
+ return p;
+}
+
/**
* aa_u16_chunck - test and do bounds checking for a u16 size based chunk
* @e: serialized data read head (NOT NULL)
@@ -515,6 +532,35 @@ fail:
return 0;
}
+static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
+{
+ void *pos = e->pos;
+
+ if (unpack_nameX(e, AA_STRUCT, "xattrs")) {
+ int i, size;
+
+ size = unpack_array(e, NULL);
+ profile->xattr_count = size;
+ profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
+ if (!profile->xattrs)
+ goto fail;
+ for (i = 0; i < size; i++) {
+ if (!unpack_strdup(e, &profile->xattrs[i], NULL))
+ goto fail;
+ }
+ if (!unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+ if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+ return 1;
+
+fail:
+ e->pos = pos;
+ return 0;
+}
+
static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
{
void *pos = e->pos;
@@ -549,15 +595,6 @@ fail:
return 0;
}
-static void *kvmemdup(const void *src, size_t len)
-{
- void *p = kvmalloc(len, GFP_KERNEL);
-
- if (p)
- memcpy(p, src, len);
- return p;
-}
-
static u32 strhash(const void *data, u32 len, u32 seed)
{
const char * const *key = data;
@@ -712,6 +749,11 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
goto fail;
}
+ if (!unpack_xattrs(e, profile)) {
+ info = "failed to unpack profile xattrs";
+ goto fail;
+ }
+
if (!unpack_rlimits(e, profile)) {
info = "failed to unpack profile rlimits";
goto fail;
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
index d81617379d63..80c34ed373c3 100644
--- a/security/apparmor/procattr.c
+++ b/security/apparmor/procattr.c
@@ -13,7 +13,7 @@
*/
#include "include/apparmor.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/policy.h"
#include "include/policy_ns.h"
#include "include/domain.h"
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index cf4d234febe9..d022137143b9 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -16,7 +16,7 @@
#include <linux/security.h>
#include "include/audit.h"
-#include "include/context.h"
+#include "include/cred.h"
#include "include/resource.h"
#include "include/policy.h"
diff --git a/security/apparmor/stacksplitdfa.in b/security/apparmor/stacksplitdfa.in
new file mode 100644
index 000000000000..4bddd10b62a9
--- /dev/null
+++ b/security/apparmor/stacksplitdfa.in
@@ -0,0 +1,114 @@
+/* 0x1 [^\000]*[^/\000]//& */ 0x1B, 0x5E, 0x78, 0x3D, 0x00, 0x00,
+0x00, 0x18, 0x00, 0x00, 0x04, 0xD8, 0x00, 0x00, 0x6E, 0x6F, 0x74,
+0x66, 0x6C, 0x65, 0x78, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x02, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00,
+0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x02, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x02,
+0x00, 0x02, 0x00, 0x02, 0x00, 0x02, 0x00, 0x02, 0x00, 0x08, 0x00,
+0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x05, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, 0x00,
+0x04, 0x00, 0x01, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x01, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00,
+0x02, 0x00, 0x03, 0x00, 0x04, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x04,
+0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00
diff --git a/security/apparmor/context.c b/security/apparmor/task.c
index c95f1ac6190b..c6b78a14da91 100644
--- a/security/apparmor/context.c
+++ b/security/apparmor/task.c
@@ -1,72 +1,23 @@
/*
* AppArmor security module
*
- * This file contains AppArmor functions used to manipulate object security
- * contexts.
+ * This file contains AppArmor task related definitions and mediation
*
- * Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2010 Canonical Ltd.
+ * Copyright 2017 Canonical Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
- *
- * AppArmor sets confinement on every task, via the the aa_task_ctx and
- * the aa_task_ctx.label, both of which are required and are not allowed
- * to be NULL. The aa_task_ctx is not reference counted and is unique
- * to each cred (which is reference count). The label pointed to by
- * the task_ctx is reference counted.
- *
* TODO
* If a task uses change_hat it currently does not return to the old
* cred or task context but instead creates a new one. Ideally the task
* should return to the previous cred if it has not been modified.
- *
*/
-#include "include/context.h"
-#include "include/policy.h"
-
-/**
- * aa_alloc_task_context - allocate a new task_ctx
- * @flags: gfp flags for allocation
- *
- * Returns: allocated buffer or NULL on failure
- */
-struct aa_task_ctx *aa_alloc_task_context(gfp_t flags)
-{
- return kzalloc(sizeof(struct aa_task_ctx), flags);
-}
-
-/**
- * aa_free_task_context - free a task_ctx
- * @ctx: task_ctx to free (MAYBE NULL)
- */
-void aa_free_task_context(struct aa_task_ctx *ctx)
-{
- if (ctx) {
- aa_put_label(ctx->label);
- aa_put_label(ctx->previous);
- aa_put_label(ctx->onexec);
-
- kzfree(ctx);
- }
-}
-
-/**
- * aa_dup_task_context - duplicate a task context, incrementing reference counts
- * @new: a blank task context (NOT NULL)
- * @old: the task context to copy (NOT NULL)
- */
-void aa_dup_task_context(struct aa_task_ctx *new, const struct aa_task_ctx *old)
-{
- *new = *old;
- aa_get_label(new->label);
- aa_get_label(new->previous);
- aa_get_label(new->onexec);
-}
+#include "include/cred.h"
+#include "include/task.h"
/**
* aa_get_task_label - Get another task's label
@@ -93,11 +44,13 @@ struct aa_label *aa_get_task_label(struct task_struct *task)
*/
int aa_replace_current_label(struct aa_label *label)
{
- struct aa_task_ctx *ctx = current_ctx();
+ struct aa_label *old = aa_current_raw_label();
+ struct aa_task_ctx *ctx = task_ctx(current);
struct cred *new;
+
AA_BUG(!label);
- if (ctx->label == label)
+ if (old == label)
return 0;
if (current_cred() != current_real_cred())
@@ -107,27 +60,34 @@ int aa_replace_current_label(struct aa_label *label)
if (!new)
return -ENOMEM;
- ctx = cred_ctx(new);
- if (unconfined(label) || (labels_ns(ctx->label) != labels_ns(label)))
- /* if switching to unconfined or a different label namespace
+ if (ctx->nnp && label_is_stale(ctx->nnp)) {
+ struct aa_label *tmp = ctx->nnp;
+
+ ctx->nnp = aa_get_newest_label(tmp);
+ aa_put_label(tmp);
+ }
+ if (unconfined(label) || (labels_ns(old) != labels_ns(label)))
+ /*
+ * if switching to unconfined or a different label namespace
* clear out context state
*/
- aa_clear_task_ctx_trans(ctx);
+ aa_clear_task_ctx_trans(task_ctx(current));
/*
- * be careful switching ctx->profile, when racing replacement it
- * is possible that ctx->profile->proxy->profile is the reference
- * keeping @profile valid, so make sure to get its reference before
- * dropping the reference on ctx->profile
+ * be careful switching cred label, when racing replacement it
+ * is possible that the cred labels's->proxy->label is the reference
+ * keeping @label valid, so make sure to get its reference before
+ * dropping the reference on the cred's label
*/
aa_get_label(label);
- aa_put_label(ctx->label);
- ctx->label = label;
+ aa_put_label(cred_label(new));
+ cred_label(new) = label;
commit_creds(new);
return 0;
}
+
/**
* aa_set_current_onexec - set the tasks change_profile to happen onexec
* @label: system label to set at exec (MAYBE NULL to clear value)
@@ -136,18 +96,13 @@ int aa_replace_current_label(struct aa_label *label)
*/
int aa_set_current_onexec(struct aa_label *label, bool stack)
{
- struct aa_task_ctx *ctx;
- struct cred *new = prepare_creds();
- if (!new)
- return -ENOMEM;
+ struct aa_task_ctx *ctx = task_ctx(current);
- ctx = cred_ctx(new);
aa_get_label(label);
- aa_clear_task_ctx_trans(ctx);
+ aa_put_label(ctx->onexec);
ctx->onexec = label;
ctx->token = stack;
- commit_creds(new);
return 0;
}
@@ -163,25 +118,27 @@ int aa_set_current_onexec(struct aa_label *label, bool stack)
*/
int aa_set_current_hat(struct aa_label *label, u64 token)
{
- struct aa_task_ctx *ctx;
- struct cred *new = prepare_creds();
+ struct aa_task_ctx *ctx = task_ctx(current);
+ struct cred *new;
+
+ new = prepare_creds();
if (!new)
return -ENOMEM;
AA_BUG(!label);
- ctx = cred_ctx(new);
if (!ctx->previous) {
/* transfer refcount */
- ctx->previous = ctx->label;
+ ctx->previous = cred_label(new);
ctx->token = token;
} else if (ctx->token == token) {
- aa_put_label(ctx->label);
+ aa_put_label(cred_label(new));
} else {
/* previous_profile && ctx->token != token */
abort_creds(new);
return -EACCES;
}
- ctx->label = aa_get_newest_label(label);
+
+ cred_label(new) = aa_get_newest_label(label);
/* clear exec on switching context */
aa_put_label(ctx->onexec);
ctx->onexec = NULL;
@@ -201,28 +158,26 @@ int aa_set_current_hat(struct aa_label *label, u64 token)
*/
int aa_restore_previous_label(u64 token)
{
- struct aa_task_ctx *ctx;
- struct cred *new = prepare_creds();
- if (!new)
- return -ENOMEM;
+ struct aa_task_ctx *ctx = task_ctx(current);
+ struct cred *new;
- ctx = cred_ctx(new);
- if (ctx->token != token) {
- abort_creds(new);
+ if (ctx->token != token)
return -EACCES;
- }
/* ignore restores when there is no saved label */
- if (!ctx->previous) {
- abort_creds(new);
+ if (!ctx->previous)
return 0;
- }
- aa_put_label(ctx->label);
- ctx->label = aa_get_newest_label(ctx->previous);
- AA_BUG(!ctx->label);
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ aa_put_label(cred_label(new));
+ cred_label(new) = aa_get_newest_label(ctx->previous);
+ AA_BUG(!cred_label(new));
/* clear exec && prev information when restoring to previous context */
aa_clear_task_ctx_trans(ctx);
commit_creds(new);
+
return 0;
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7720e3102bcc..7a111a1b5836 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -987,7 +987,7 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
#define azx_del_card_list(chip) /* NOP */
#endif /* CONFIG_PM */
-#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
+#ifdef CONFIG_PM_SLEEP
/*
* power management
*/
@@ -1068,9 +1068,7 @@ static int azx_resume(struct device *dev)
trace_azx_resume(chip);
return 0;
}
-#endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */
-#ifdef CONFIG_PM_SLEEP
/* put codec down to D3 at hibernation for Intel SKL+;
* otherwise BIOS may still access the codec and screw up the driver
*/
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index f41079da38c5..d554c11e01ff 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -316,6 +316,7 @@
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
+#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
@@ -328,6 +329,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
diff --git a/tools/build/Build.include b/tools/build/Build.include
index 418871d02ebf..a4bbb984941d 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -12,6 +12,7 @@
# Convenient variables
comma := ,
squote := '
+pound := \#
###
# Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o
@@ -43,11 +44,11 @@ echo-cmd = $(if $($(quiet)cmd_$(1)),\
###
# Replace >$< with >$$< to preserve $ when reloading the .cmd file
# (needed for make)
-# Replace >#< with >\#< to avoid starting a comment in the .cmd file
+# Replace >#< with >$(pound)< to avoid starting a comment in the .cmd file
# (needed for make)
# Replace >'< with >'\''< to be able to enclose the whole string in '...'
# (needed for the shell)
-make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1)))))
+make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1)))))
###
# Find any prerequisites that is newer than target or that does not exist.
diff --git a/tools/include/tools/config.h b/tools/include/tools/config.h
new file mode 100644
index 000000000000..08ade7df8132
--- /dev/null
+++ b/tools/include/tools/config.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_CONFIG_H
+#define _TOOLS_CONFIG_H
+
+/* Subset of include/linux/kconfig.h */
+
+#define __ARG_PLACEHOLDER_1 0,
+#define __take_second_arg(__ignored, val, ...) val
+
+/*
+ * Helper macros to use CONFIG_ options in C/CPP expressions. Note that
+ * these only work with boolean and tristate options.
+ */
+
+/*
+ * Getting something that works in C and CPP for an arg that may or may
+ * not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1"
+ * we match on the placeholder define, insert the "0," for arg1 and generate
+ * the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one).
+ * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
+ * the last step cherry picks the 2nd arg, we get a zero.
+ */
+#define __is_defined(x) ___is_defined(x)
+#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
+#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
+
+/*
+ * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
+ * otherwise. For boolean options, this is equivalent to
+ * IS_ENABLED(CONFIG_FOO).
+ */
+#define IS_BUILTIN(option) __is_defined(option)
+
+#endif /* _TOOLS_CONFIG_H */
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 536ee4febd74..7f5634ce8e88 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -318,6 +318,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_PERF_OPEN 0x36
#define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38
+#define DRM_I915_QUERY 0x39
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -375,6 +376,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
+#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -1358,7 +1360,9 @@ struct drm_intel_overlay_attrs {
* active on a given plane.
*/
-#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
+#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
+ * flags==0 to disable colorkeying.
+ */
#define I915_SET_COLORKEY_DESTINATION (1<<1)
#define I915_SET_COLORKEY_SOURCE (1<<2)
struct drm_intel_sprite_colorkey {
@@ -1604,15 +1608,115 @@ struct drm_i915_perf_oa_config {
__u32 n_flex_regs;
/*
- * These fields are pointers to tuples of u32 values (register
- * address, value). For example the expected length of the buffer
- * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+ * These fields are pointers to tuples of u32 values (register address,
+ * value). For example the expected length of the buffer pointed by
+ * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
*/
__u64 mux_regs_ptr;
__u64 boolean_regs_ptr;
__u64 flex_regs_ptr;
};
+struct drm_i915_query_item {
+ __u64 query_id;
+#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+
+ /*
+ * When set to zero by userspace, this is filled with the size of the
+ * data to be written at the data_ptr pointer. The kernel sets this
+ * value to a negative value to signal an error on a particular query
+ * item.
+ */
+ __s32 length;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * Data will be written at the location pointed by data_ptr when the
+ * value of length matches the length of the data to be written by the
+ * kernel.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_query {
+ __u32 num_items;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * This points to an array of num_items drm_i915_query_item structures.
+ */
+ __u64 items_ptr;
+};
+
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
+ *
+ * data: contains the 3 pieces of information :
+ *
+ * - the slice mask with one bit per slice telling whether a slice is
+ * available. The availability of slice X can be queried with the following
+ * formula :
+ *
+ * (data[X / 8] >> (X % 8)) & 1
+ *
+ * - the subslice mask for each slice with one bit per subslice telling
+ * whether a subslice is available. The availability of subslice Y in slice
+ * X can be queried with the following formula :
+ *
+ * (data[subslice_offset +
+ * X * subslice_stride +
+ * Y / 8] >> (Y % 8)) & 1
+ *
+ * - the EU mask for each subslice in each slice with one bit per EU telling
+ * whether an EU is available. The availability of EU Z in subslice Y in
+ * slice X can be queried with the following formula :
+ *
+ * (data[eu_offset +
+ * (X * max_subslices + Y) * eu_stride +
+ * Z / 8] >> (Z % 8)) & 1
+ */
+struct drm_i915_query_topology_info {
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u16 flags;
+
+ __u16 max_slices;
+ __u16 max_subslices;
+ __u16 max_eus_per_subslice;
+
+ /*
+ * Offset in data[] at which the subslice masks are stored.
+ */
+ __u16 subslice_offset;
+
+ /*
+ * Stride at which each of the subslice masks for each slice are
+ * stored.
+ */
+ __u16 subslice_stride;
+
+ /*
+ * Offset in data[] at which the EU masks are stored.
+ */
+ __u16 eu_offset;
+
+ /*
+ * Stride at which each of the EU masks for each subslice are stored.
+ */
+ __u16 eu_stride;
+
+ __u8 data[];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index e6acc281dd37..8ae824dbfca3 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -35,7 +35,7 @@ CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
LDFLAGS += -lelf $(LIBSUBCMD)
# Allow old libelf to be used:
-elfshdr := $(shell echo '\#include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
+elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
CFLAGS += $(if $(elfshdr),,-DLIBELF_USE_DEPRECATED)
AWK = awk
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index e1a660e60849..917e36fde6d8 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -80,6 +80,7 @@ OPTIONS
- comm: command (name) of the task which can be read via /proc/<pid>/comm
- pid: command and tid of the task
- dso: name of library or module executed at the time of sample
+ - dso_size: size of library or module executed at the time of sample
- symbol: name of function executed at the time of sample
- symbol_size: size of function executed at the time of sample
- parent: name of function matched to the parent regex filter. Unmatched
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 5a7035c5c523..115db9e06ecd 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -117,6 +117,9 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
--sched::
Accrue thread runtime and provide a summary at the end of the session.
+--failure::
+ Show only syscalls that failed, i.e. that returned < 0.
+
-i::
--input::
Process events from a given perf data file.
diff --git a/tools/perf/Documentation/perf-version.txt b/tools/perf/Documentation/perf-version.txt
new file mode 100644
index 000000000000..e207b7cfca26
--- /dev/null
+++ b/tools/perf/Documentation/perf-version.txt
@@ -0,0 +1,24 @@
+perf-version(1)
+===============
+
+NAME
+----
+perf-version - display the version of perf binary
+
+SYNOPSIS
+--------
+'perf version' [--build-options]
+
+DESCRIPTION
+-----------
+With no options given, the 'perf version' prints the perf version
+on the standard output.
+
+If the option '--build-options' is given, then the status of
+compiled-in libraries are printed on the standard output.
+
+OPTIONS
+-------
+--build-options::
+ Prints the status of compiled-in libraries on the
+ standard output.
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 98ff73648b51..c7abd83a8e19 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -346,12 +346,16 @@ else
ifneq ($(feature-dwarf_getlocations), 1)
msg := $(warning Old libdw.h, finding variables at given 'perf probe' point will not work, install elfutils-devel/libdw-dev >= 0.157);
else
- CFLAGS += -DHAVE_DWARF_GETLOCATIONS
+ CFLAGS += -DHAVE_DWARF_GETLOCATIONS_SUPPORT
endif # dwarf_getlocations
endif # Dwarf support
endif # libelf support
endif # NO_LIBELF
+ifeq ($(feature-glibc), 1)
+ CFLAGS += -DHAVE_GLIBC_SUPPORT
+endif
+
ifdef NO_DWARF
NO_LIBDW_DWARF_UNWIND := 1
endif
@@ -635,6 +639,7 @@ else
else
LDFLAGS += $(PERL_EMBED_LDFLAGS)
EXTLIBS += $(PERL_EMBED_LIBADD)
+ CFLAGS += -DHAVE_LIBPERL_SUPPORT
$(call detected,CONFIG_LIBPERL)
endif
endif
@@ -671,6 +676,7 @@ else
LDFLAGS += $(PYTHON_EMBED_LDFLAGS)
EXTLIBS += $(PYTHON_EMBED_LIBADD)
LANG_BINDINGS += $(obj-perf)python/perf.so
+ CFLAGS += -DHAVE_LIBPYTHON_SUPPORT
$(call detected,CONFIG_LIBPYTHON)
endif
endif
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index f7517e1b73f8..83e453de36f8 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -364,7 +364,8 @@ LIBS = -Wl,--whole-archive $(PERFLIBS) $(EXTRA_PERFLIBS) -Wl,--no-whole-archive
ifeq ($(USE_CLANG), 1)
CLANGLIBS_LIST = AST Basic CodeGen Driver Frontend Lex Tooling Edit Sema Analysis Parse Serialization
- LIBCLANG = $(foreach l,$(CLANGLIBS_LIST),$(wildcard $(shell $(LLVM_CONFIG) --libdir)/libclang$(l).a))
+ CLANGLIBS_NOEXT_LIST = $(foreach l,$(CLANGLIBS_LIST),$(shell $(LLVM_CONFIG) --libdir)/libclang$(l))
+ LIBCLANG = $(foreach l,$(CLANGLIBS_NOEXT_LIST),$(wildcard $(l).a $(l).so))
LIBS += -Wl,--start-group $(LIBCLANG) -Wl,--end-group
endif
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 87b95c9410b4..3ad17ee89403 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -112,6 +112,7 @@ struct trace {
bool multiple_threads;
bool summary;
bool summary_only;
+ bool failure_only;
bool show_comm;
bool print_sample;
bool show_tool_stats;
@@ -1565,7 +1566,7 @@ static int trace__printf_interrupted_entry(struct trace *trace)
struct thread_trace *ttrace;
size_t printed;
- if (trace->current == NULL)
+ if (trace->failure_only || trace->current == NULL)
return 0;
ttrace = thread__priv(trace->current);
@@ -1638,7 +1639,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
args, trace, thread);
if (sc->is_exit) {
- if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
+ if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
}
@@ -1742,7 +1743,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
}
}
- if (trace->summary_only)
+ if (trace->summary_only || (ret >= 0 && trace->failure_only))
goto out;
trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
@@ -1961,7 +1962,7 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
trace->output);
}
- fprintf(trace->output, ")\n");
+ fprintf(trace->output, "\n");
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
@@ -3087,6 +3088,8 @@ int cmd_trace(int argc, const char **argv)
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_BOOLEAN('T', "time", &trace.full_time,
"Show full timestamp, not time relative to first start"),
+ OPT_BOOLEAN(0, "failure", &trace.failure_only,
+ "Show only syscalls that failed"),
OPT_BOOLEAN('s', "summary", &trace.summary_only,
"Show only syscall summary with statistics"),
OPT_BOOLEAN('S', "with-summary", &trace.summary,
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
index 37019c5d675f..2abe3910d6b6 100644
--- a/tools/perf/builtin-version.c
+++ b/tools/perf/builtin-version.c
@@ -1,11 +1,91 @@
// SPDX-License-Identifier: GPL-2.0
#include "builtin.h"
#include "perf.h"
+#include "color.h"
#include <linux/compiler.h>
+#include <tools/config.h>
#include <stdio.h>
+#include <string.h>
+#include <subcmd/parse-options.h>
-int cmd_version(int argc __maybe_unused, const char **argv __maybe_unused)
+int version_verbose;
+
+struct version {
+ bool build_options;
+};
+
+static struct version version;
+
+static struct option version_options[] = {
+ OPT_BOOLEAN(0, "build-options", &version.build_options,
+ "display the build options"),
+};
+
+static const char * const version_usage[] = {
+ "perf version [<options>]",
+ NULL
+};
+
+static void on_off_print(const char *status)
+{
+ printf("[ ");
+
+ if (!strcmp(status, "OFF"))
+ color_fprintf(stdout, PERF_COLOR_RED, "%-3s", status);
+ else
+ color_fprintf(stdout, PERF_COLOR_GREEN, "%-3s", status);
+
+ printf(" ]");
+}
+
+static void status_print(const char *name, const char *macro,
+ const char *status)
{
+ printf("%22s: ", name);
+ on_off_print(status);
+ printf(" # %s\n", macro);
+}
+
+#define STATUS(__d, __m) \
+do { \
+ if (IS_BUILTIN(__d)) \
+ status_print(#__m, #__d, "on"); \
+ else \
+ status_print(#__m, #__d, "OFF"); \
+} while (0)
+
+static void library_status(void)
+{
+ STATUS(HAVE_DWARF_SUPPORT, dwarf);
+ STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations);
+ STATUS(HAVE_GLIBC_SUPPORT, glibc);
+ STATUS(HAVE_GTK2_SUPPORT, gtk2);
+ STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit);
+ STATUS(HAVE_LIBBFD_SUPPORT, libbfd);
+ STATUS(HAVE_LIBELF_SUPPORT, libelf);
+ STATUS(HAVE_LIBNUMA_SUPPORT, libnuma);
+ STATUS(HAVE_LIBNUMA_SUPPORT, numa_num_possible_cpus);
+ STATUS(HAVE_LIBPERL_SUPPORT, libperl);
+ STATUS(HAVE_LIBPYTHON_SUPPORT, libpython);
+ STATUS(HAVE_SLANG_SUPPORT, libslang);
+ STATUS(HAVE_LIBCRYPTO_SUPPORT, libcrypto);
+ STATUS(HAVE_LIBUNWIND_SUPPORT, libunwind);
+ STATUS(HAVE_DWARF_SUPPORT, libdw-dwarf-unwind);
+ STATUS(HAVE_ZLIB_SUPPORT, zlib);
+ STATUS(HAVE_LZMA_SUPPORT, lzma);
+ STATUS(HAVE_AUXTRACE_SUPPORT, get_cpuid);
+ STATUS(HAVE_LIBBPF_SUPPORT, bpf);
+}
+
+int cmd_version(int argc, const char **argv)
+{
+ argc = parse_options(argc, argv, version_options, version_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
printf("perf version %s\n", perf_version_string);
+
+ if (version.build_options || version_verbose == 1)
+ library_status();
+
return 0;
}
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 1b3fc8ec0fa2..1659029d03fc 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -190,6 +190,12 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
break;
}
+ if (!strcmp(cmd, "-vv")) {
+ (*argv)[0] = "version";
+ version_verbose = 1;
+ break;
+ }
+
/*
* Check remaining flags.
*/
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 8fec1abd0f1f..a1a97956136f 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -84,6 +84,7 @@ struct record_opts {
struct option;
extern const char * const *record_usage;
extern struct option *record_options;
+extern int version_verbose;
int record__parse_freq(const struct option *opt, const char *str, int unset);
#endif
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 9f6ce29b83b4..4f75561424ed 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -45,11 +45,16 @@ void ui_browser__set_percent_color(struct ui_browser *browser,
ui_browser__set_color(browser, color);
}
-void ui_browser__gotorc(struct ui_browser *browser, int y, int x)
+void ui_browser__gotorc_title(struct ui_browser *browser, int y, int x)
{
SLsmg_gotorc(browser->y + y, browser->x + x);
}
+void ui_browser__gotorc(struct ui_browser *browser, int y, int x)
+{
+ SLsmg_gotorc(browser->y + y + browser->extra_title_lines, browser->x + x);
+}
+
void ui_browser__write_nstring(struct ui_browser *browser __maybe_unused, const char *msg,
unsigned int width)
{
@@ -191,6 +196,7 @@ void ui_browser__refresh_dimensions(struct ui_browser *browser)
{
browser->width = SLtt_Screen_Cols - 1;
browser->height = browser->rows = SLtt_Screen_Rows - 2;
+ browser->rows -= browser->extra_title_lines;
browser->y = 1;
browser->x = 0;
}
@@ -337,8 +343,8 @@ static int __ui_browser__refresh(struct ui_browser *browser)
else
width += 1;
- SLsmg_fill_region(browser->y + row, browser->x,
- browser->height - row, width, ' ');
+ SLsmg_fill_region(browser->y + row + browser->extra_title_lines, browser->x,
+ browser->rows - row, width, ' ');
return 0;
}
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index 70057178ee34..aa5932e1d62e 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -17,6 +17,7 @@ struct ui_browser {
u64 index, top_idx;
void *top, *entries;
u16 y, x, width, height, rows, columns, horiz_scroll;
+ u8 extra_title_lines;
int current_color;
void *priv;
const char *title;
@@ -38,6 +39,7 @@ bool ui_browser__is_current_entry(struct ui_browser *browser, unsigned row);
void ui_browser__refresh_dimensions(struct ui_browser *browser);
void ui_browser__reset_index(struct ui_browser *browser);
+void ui_browser__gotorc_title(struct ui_browser *browser, int y, int x);
void ui_browser__gotorc(struct ui_browser *browser, int y, int x);
void ui_browser__write_nstring(struct ui_browser *browser, const char *msg,
unsigned int width);
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index c02fb437ac8e..12c099a87f8b 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -218,7 +218,7 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
annotate_browser__draw_current_jump(browser);
ui_browser__set_color(browser, HE_COLORSET_NORMAL);
- __ui_browser__vline(browser, pcnt_width, 0, browser->height - 1);
+ __ui_browser__vline(browser, pcnt_width, 0, browser->rows - 1);
return ret;
}
@@ -592,21 +592,40 @@ bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
return __annotate_browser__search_reverse(browser);
}
+static int annotate_browser__show(struct ui_browser *browser, char *title, const char *help)
+{
+ struct map_symbol *ms = browser->priv;
+ struct symbol *sym = ms->sym;
+ char symbol_dso[SYM_TITLE_MAX_SIZE];
+
+ if (ui_browser__show(browser, title, help) < 0)
+ return -1;
+
+ sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso));
+
+ ui_browser__gotorc_title(browser, 0, 0);
+ ui_browser__set_color(browser, HE_COLORSET_ROOT);
+ ui_browser__write_nstring(browser, symbol_dso, browser->width + 1);
+ return 0;
+}
+
static int annotate_browser__run(struct annotate_browser *browser,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
{
struct rb_node *nd = NULL;
+ struct hists *hists = evsel__hists(evsel);
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(ms->sym);
const char *help = "Press 'h' for help on key bindings";
int delay_secs = hbt ? hbt->refresh : 0;
+ char title[256];
int key;
- char title[SYM_TITLE_MAX_SIZE];
- sym_title(sym, ms->map, title, sizeof(title));
- if (ui_browser__show(&browser->b, title, help) < 0)
+ annotation__scnprintf_samples_period(notes, title, sizeof(title), evsel);
+
+ if (annotate_browser__show(&browser->b, title, help) < 0)
return -1;
annotate_browser__calc_percent(browser, evsel);
@@ -637,8 +656,11 @@ static int annotate_browser__run(struct annotate_browser *browser,
if (hbt)
hbt->timer(hbt->arg);
- if (delay_secs != 0)
+ if (delay_secs != 0) {
symbol__annotate_decay_histogram(sym, evsel->idx);
+ hists__scnprintf_title(hists, title, sizeof(title));
+ annotate_browser__show(&browser->b, title, help);
+ }
continue;
case K_TAB:
if (nd != NULL) {
@@ -812,6 +834,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
.seek = ui_browser__list_head_seek,
.write = annotate_browser__write,
.filter = disasm_line__filter,
+ .extra_title_lines = 1, /* for hists__scnprintf_title() */
.priv = &ms,
.use_navkeypressed = true,
},
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 8b4e82548f8e..0eec06c105c6 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -32,8 +32,7 @@
extern void hist_browser__init_hpp(void);
-static int perf_evsel_browser_title(struct hist_browser *browser,
- char *bf, size_t size);
+static int hists_browser__scnprintf_title(struct hist_browser *browser, char *bf, size_t size);
static void hist_browser__update_nr_entries(struct hist_browser *hb);
static struct rb_node *hists__filter_entries(struct rb_node *nd,
@@ -62,6 +61,15 @@ static int hist_browser__get_folding(struct hist_browser *browser)
return unfolded_rows;
}
+static void hist_browser__set_title_space(struct hist_browser *hb)
+{
+ struct ui_browser *browser = &hb->b;
+ struct hists *hists = hb->hists;
+ struct perf_hpp_list *hpp_list = hists->hpp_list;
+
+ browser->extra_title_lines = hb->show_headers ? hpp_list->nr_header_lines : 0;
+}
+
static u32 hist_browser__nr_entries(struct hist_browser *hb)
{
u32 nr_entries;
@@ -82,10 +90,16 @@ static void hist_browser__update_rows(struct hist_browser *hb)
struct ui_browser *browser = &hb->b;
struct hists *hists = hb->hists;
struct perf_hpp_list *hpp_list = hists->hpp_list;
- u16 header_offset, index_row;
+ u16 index_row;
- header_offset = hb->show_headers ? hpp_list->nr_header_lines : 0;
- browser->rows = browser->height - header_offset;
+ if (!hb->show_headers) {
+ browser->rows += browser->extra_title_lines;
+ browser->extra_title_lines = 0;
+ return;
+ }
+
+ browser->extra_title_lines = hpp_list->nr_header_lines;
+ browser->rows -= browser->extra_title_lines;
/*
* Verify if we were at the last line and that line isn't
* visibe because we now show the header line(s).
@@ -108,17 +122,6 @@ static void hist_browser__refresh_dimensions(struct ui_browser *browser)
* changeset.
*/
ui_browser__refresh_dimensions(browser);
- hist_browser__update_rows(hb);
-}
-
-static void hist_browser__gotorc(struct hist_browser *browser, int row, int column)
-{
- struct hists *hists = browser->hists;
- struct perf_hpp_list *hpp_list = hists->hpp_list;
- u16 header_offset;
-
- header_offset = browser->show_headers ? hpp_list->nr_header_lines : 0;
- ui_browser__gotorc(&browser->b, row + header_offset, column);
}
static void hist_browser__reset(struct hist_browser *browser)
@@ -656,9 +659,10 @@ int hist_browser__run(struct hist_browser *browser, const char *help,
struct hist_entry *h = rb_entry(browser->b.top,
struct hist_entry, rb_node);
ui_helpline__pop();
- ui_helpline__fpush("%d: nr_ent=(%d,%d), rows=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
+ ui_helpline__fpush("%d: nr_ent=(%d,%d), etl: %d, rows=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
seq++, browser->b.nr_entries,
browser->hists->nr_entries,
+ browser->b.extra_title_lines,
browser->b.rows,
browser->b.index,
browser->b.top_idx,
@@ -733,7 +737,7 @@ static void hist_browser__show_callchain_entry(struct hist_browser *browser,
}
ui_browser__set_color(&browser->b, color);
- hist_browser__gotorc(browser, row, 0);
+ ui_browser__gotorc(&browser->b, row, 0);
ui_browser__write_nstring(&browser->b, " ", offset);
ui_browser__printf(&browser->b, "%c", folded_sign);
ui_browser__write_graph(&browser->b, show_annotated ? SLSMG_RARROW_CHAR : ' ');
@@ -1249,7 +1253,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
};
int column = 0;
- hist_browser__gotorc(browser, row, 0);
+ ui_browser__gotorc(&browser->b, row, 0);
hists__for_each_format(browser->hists, fmt) {
char s[2048];
@@ -1358,7 +1362,7 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
goto show_callchain;
}
- hist_browser__gotorc(browser, row, 0);
+ ui_browser__gotorc(&browser->b, row, 0);
if (current_entry && browser->b.navkeypressed)
ui_browser__set_color(&browser->b, HE_COLORSET_SELECTED);
@@ -1507,7 +1511,7 @@ static int hist_browser__show_no_entry(struct hist_browser *browser,
browser->selection = NULL;
}
- hist_browser__gotorc(browser, row, 0);
+ ui_browser__gotorc(&browser->b, row, 0);
if (current_entry && browser->b.navkeypressed)
ui_browser__set_color(&browser->b, HE_COLORSET_SELECTED);
@@ -1713,7 +1717,7 @@ static void hists_browser__headers(struct hist_browser *browser)
hists_browser__scnprintf_headers(browser, headers,
sizeof(headers), line);
- ui_browser__gotorc(&browser->b, line, 0);
+ ui_browser__gotorc_title(&browser->b, line, 0);
ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
}
@@ -1740,17 +1744,11 @@ static void ui_browser__hists_init_top(struct ui_browser *browser)
static unsigned int hist_browser__refresh(struct ui_browser *browser)
{
unsigned row = 0;
- u16 header_offset = 0;
struct rb_node *nd;
struct hist_browser *hb = container_of(browser, struct hist_browser, b);
- struct hists *hists = hb->hists;
-
- if (hb->show_headers) {
- struct perf_hpp_list *hpp_list = hists->hpp_list;
+ if (hb->show_headers)
hist_browser__show_headers(hb);
- header_offset = hpp_list->nr_header_lines;
- }
ui_browser__hists_init_top(browser);
hb->he_selection = NULL;
@@ -1788,7 +1786,7 @@ static unsigned int hist_browser__refresh(struct ui_browser *browser)
break;
}
- return row + header_offset;
+ return row;
}
static struct rb_node *hists__filter_entries(struct rb_node *nd,
@@ -2143,6 +2141,7 @@ void hist_browser__init(struct hist_browser *browser,
browser->b.seek = ui_browser__hists_seek;
browser->b.use_navkeypressed = true;
browser->show_headers = symbol_conf.show_hist_headers;
+ hist_browser__set_title_space(browser);
if (symbol_conf.report_hierarchy) {
struct perf_hpp_list_node *fmt_node;
@@ -2183,7 +2182,7 @@ perf_evsel_browser__new(struct perf_evsel *evsel,
if (browser) {
browser->hbt = hbt;
browser->env = env;
- browser->title = perf_evsel_browser_title;
+ browser->title = hists_browser__scnprintf_title;
}
return browser;
}
@@ -2209,84 +2208,11 @@ static inline bool is_report_browser(void *timer)
return timer == NULL;
}
-static int perf_evsel_browser_title(struct hist_browser *browser,
- char *bf, size_t size)
+static int hists_browser__scnprintf_title(struct hist_browser *browser, char *bf, size_t size)
{
struct hist_browser_timer *hbt = browser->hbt;
- struct hists *hists = browser->hists;
- char unit;
- int printed;
- const struct dso *dso = hists->dso_filter;
- const struct thread *thread = hists->thread_filter;
- int socket_id = hists->socket_filter;
- unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
- u64 nr_events = hists->stats.total_period;
- struct perf_evsel *evsel = hists_to_evsel(hists);
- const char *ev_name = perf_evsel__name(evsel);
- char buf[512], sample_freq_str[64] = "";
- size_t buflen = sizeof(buf);
- char ref[30] = " show reference callgraph, ";
- bool enable_ref = false;
+ int printed = __hists__scnprintf_title(browser->hists, bf, size, !is_report_browser(hbt));
- if (symbol_conf.filter_relative) {
- nr_samples = hists->stats.nr_non_filtered_samples;
- nr_events = hists->stats.total_non_filtered_period;
- }
-
- if (perf_evsel__is_group_event(evsel)) {
- struct perf_evsel *pos;
-
- perf_evsel__group_desc(evsel, buf, buflen);
- ev_name = buf;
-
- for_each_group_member(pos, evsel) {
- struct hists *pos_hists = evsel__hists(pos);
-
- if (symbol_conf.filter_relative) {
- nr_samples += pos_hists->stats.nr_non_filtered_samples;
- nr_events += pos_hists->stats.total_non_filtered_period;
- } else {
- nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
- nr_events += pos_hists->stats.total_period;
- }
- }
- }
-
- if (symbol_conf.show_ref_callgraph &&
- strstr(ev_name, "call-graph=no"))
- enable_ref = true;
-
- if (!is_report_browser(hbt))
- scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->attr.sample_freq);
-
- nr_samples = convert_unit(nr_samples, &unit);
- printed = scnprintf(bf, size,
- "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
- nr_samples, unit, evsel->nr_members > 1 ? "s" : "",
- ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
-
-
- if (hists->uid_filter_str)
- printed += snprintf(bf + printed, size - printed,
- ", UID: %s", hists->uid_filter_str);
- if (thread) {
- if (hists__has(hists, thread)) {
- printed += scnprintf(bf + printed, size - printed,
- ", Thread: %s(%d)",
- (thread->comm_set ? thread__comm_str(thread) : ""),
- thread->tid);
- } else {
- printed += scnprintf(bf + printed, size - printed,
- ", Thread: %s",
- (thread->comm_set ? thread__comm_str(thread) : ""));
- }
- }
- if (dso)
- printed += scnprintf(bf + printed, size - printed,
- ", DSO: %s", dso->short_name);
- if (socket_id > -1)
- printed += scnprintf(bf + printed, size - printed,
- ", Processor Socket: %d", socket_id);
if (!is_report_browser(hbt)) {
struct perf_top *top = hbt->arg;
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 3a428d7c59b9..fbad8dfbb186 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -17,6 +17,7 @@
#include "config.h"
#include "cache.h"
#include "symbol.h"
+#include "units.h"
#include "debug.h"
#include "annotate.h"
#include "evsel.h"
@@ -2324,7 +2325,7 @@ int symbol__tty_annotate2(struct symbol *sym, struct map *map,
struct dso *dso = map->dso;
struct rb_root source_line = RB_ROOT;
struct annotation_options opts = annotation__default_options;
- const char *ev_name = perf_evsel__name(evsel);
+ struct annotation *notes = symbol__annotation(sym);
char buf[1024];
if (symbol__annotate2(sym, map, evsel, &opts, NULL) < 0)
@@ -2336,12 +2337,8 @@ int symbol__tty_annotate2(struct symbol *sym, struct map *map,
print_summary(&source_line, dso->long_name);
}
- if (perf_evsel__is_group_event(evsel)) {
- perf_evsel__group_desc(evsel, buf, sizeof(buf));
- ev_name = buf;
- }
-
- fprintf(stdout, "%s() %s\nEvent: %s\n\n", sym->name, dso->long_name, ev_name);
+ annotation__scnprintf_samples_period(notes, buf, sizeof(buf), evsel);
+ fprintf(stdout, "%s\n%s() %s\n", buf, sym->name, dso->long_name);
symbol__annotate_fprintf2(sym, stdout);
annotated_source__purge(symbol__annotation(sym)->src);
@@ -2597,6 +2594,46 @@ out_free_offsets:
return -1;
}
+int __annotation__scnprintf_samples_period(struct annotation *notes,
+ char *bf, size_t size,
+ struct perf_evsel *evsel,
+ bool show_freq)
+{
+ const char *ev_name = perf_evsel__name(evsel);
+ char buf[1024], ref[30] = " show reference callgraph, ";
+ char sample_freq_str[64] = "";
+ unsigned long nr_samples = 0;
+ int nr_members = 1;
+ bool enable_ref = false;
+ u64 nr_events = 0;
+ char unit;
+ int i;
+
+ if (perf_evsel__is_group_event(evsel)) {
+ perf_evsel__group_desc(evsel, buf, sizeof(buf));
+ ev_name = buf;
+ nr_members = evsel->nr_members;
+ }
+
+ for (i = 0; i < nr_members; i++) {
+ struct sym_hist *ah = annotation__histogram(notes, evsel->idx + i);
+
+ nr_samples += ah->nr_samples;
+ nr_events += ah->period;
+ }
+
+ if (symbol_conf.show_ref_callgraph && strstr(ev_name, "call-graph=no"))
+ enable_ref = true;
+
+ if (show_freq)
+ scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->attr.sample_freq);
+
+ nr_samples = convert_unit(nr_samples, &unit);
+ return scnprintf(bf, size, "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
+ nr_samples, unit, evsel->nr_members > 1 ? "s" : "",
+ ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
+}
+
#define ANNOTATION__CFG(n) \
{ .name = #n, .value = &annotation__default_options.n, }
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index ff7e3df31efa..db8d09bea07e 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -151,6 +151,18 @@ double annotation_line__max_percent(struct annotation_line *al, struct annotatio
void annotation_line__write(struct annotation_line *al, struct annotation *notes,
struct annotation_write_ops *ops);
+int __annotation__scnprintf_samples_period(struct annotation *notes,
+ char *bf, size_t size,
+ struct perf_evsel *evsel,
+ bool show_freq);
+
+static inline int annotation__scnprintf_samples_period(struct annotation *notes,
+ char *bf, size_t size,
+ struct perf_evsel *evsel)
+{
+ return __annotation__scnprintf_samples_period(notes, bf, size, evsel, true);
+}
+
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
size_t disasm__fprintf(struct list_head *head, FILE *fp);
void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel);
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index fb357a00dd86..857de69a5361 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -302,13 +302,27 @@ static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
return 0;
}
+static bool filter_cpu(struct perf_session *session, int cpu)
+{
+ unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
+
+ return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
+}
+
static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
struct perf_session *session,
unsigned int idx,
struct auxtrace_buffer *buffer,
struct auxtrace_buffer **buffer_ptr)
{
- int err;
+ int err = -ENOMEM;
+
+ if (filter_cpu(session, buffer->cpu))
+ return 0;
+
+ buffer = memdup(buffer, sizeof(*buffer));
+ if (!buffer)
+ return -ENOMEM;
if (session->one_mmap) {
buffer->data = buffer->data_offset - session->one_mmap_offset +
@@ -316,31 +330,28 @@ static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
} else if (perf_data__is_pipe(session->data)) {
buffer->data = auxtrace_copy_data(buffer->size, session);
if (!buffer->data)
- return -ENOMEM;
+ goto out_free;
buffer->data_needs_freeing = true;
} else if (BITS_PER_LONG == 32 &&
buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
err = auxtrace_queues__split_buffer(queues, idx, buffer);
if (err)
- return err;
+ goto out_free;
}
err = auxtrace_queues__queue_buffer(queues, idx, buffer);
if (err)
- return err;
+ goto out_free;
/* FIXME: Doesn't work for split buffer */
if (buffer_ptr)
*buffer_ptr = buffer;
return 0;
-}
-static bool filter_cpu(struct perf_session *session, int cpu)
-{
- unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
-
- return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
+out_free:
+ auxtrace_buffer__free(buffer);
+ return err;
}
int auxtrace_queues__add_event(struct auxtrace_queues *queues,
@@ -348,36 +359,19 @@ int auxtrace_queues__add_event(struct auxtrace_queues *queues,
union perf_event *event, off_t data_offset,
struct auxtrace_buffer **buffer_ptr)
{
- struct auxtrace_buffer *buffer;
- unsigned int idx;
- int err;
-
- if (filter_cpu(session, event->auxtrace.cpu))
- return 0;
-
- buffer = zalloc(sizeof(struct auxtrace_buffer));
- if (!buffer)
- return -ENOMEM;
-
- buffer->pid = -1;
- buffer->tid = event->auxtrace.tid;
- buffer->cpu = event->auxtrace.cpu;
- buffer->data_offset = data_offset;
- buffer->offset = event->auxtrace.offset;
- buffer->reference = event->auxtrace.reference;
- buffer->size = event->auxtrace.size;
- idx = event->auxtrace.idx;
-
- err = auxtrace_queues__add_buffer(queues, session, idx, buffer,
- buffer_ptr);
- if (err)
- goto out_err;
-
- return 0;
+ struct auxtrace_buffer buffer = {
+ .pid = -1,
+ .tid = event->auxtrace.tid,
+ .cpu = event->auxtrace.cpu,
+ .data_offset = data_offset,
+ .offset = event->auxtrace.offset,
+ .reference = event->auxtrace.reference,
+ .size = event->auxtrace.size,
+ };
+ unsigned int idx = event->auxtrace.idx;
-out_err:
- auxtrace_buffer__free(buffer);
- return err;
+ return auxtrace_queues__add_buffer(queues, session, idx, &buffer,
+ buffer_ptr);
}
static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
diff --git a/tools/perf/util/c++/clang-test.cpp b/tools/perf/util/c++/clang-test.cpp
index a4014d786676..7b042a5ebc68 100644
--- a/tools/perf/util/c++/clang-test.cpp
+++ b/tools/perf/util/c++/clang-test.cpp
@@ -41,7 +41,7 @@ int test__clang_to_IR(void)
if (!M)
return -1;
for (llvm::Function& F : *M)
- if (F.getName() == "bpf_func__SyS_epoll_wait")
+ if (F.getName() == "bpf_func__SyS_epoll_pwait")
return 0;
return -1;
}
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
index 1bfc946e37dc..bf31ceab33bd 100644
--- a/tools/perf/util/c++/clang.cpp
+++ b/tools/perf/util/c++/clang.cpp
@@ -9,6 +9,7 @@
* Copyright (C) 2016 Huawei Inc.
*/
+#include "clang/Basic/Version.h"
#include "clang/CodeGen/CodeGenAction.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/CompilerInstance.h"
@@ -58,7 +59,8 @@ createCompilerInvocation(llvm::opt::ArgStringList CFlags, StringRef& Path,
FrontendOptions& Opts = CI->getFrontendOpts();
Opts.Inputs.clear();
- Opts.Inputs.emplace_back(Path, IK_C);
+ Opts.Inputs.emplace_back(Path,
+ FrontendOptions::getInputKindForExtension("c"));
return CI;
}
@@ -71,10 +73,17 @@ getModuleFromSource(llvm::opt::ArgStringList CFlags,
Clang.setVirtualFileSystem(&*VFS);
+#if CLANG_VERSION_MAJOR < 4
IntrusiveRefCntPtr<CompilerInvocation> CI =
createCompilerInvocation(std::move(CFlags), Path,
Clang.getDiagnostics());
Clang.setInvocation(&*CI);
+#else
+ std::shared_ptr<CompilerInvocation> CI(
+ createCompilerInvocation(std::move(CFlags), Path,
+ Clang.getDiagnostics()));
+ Clang.setInvocation(CI);
+#endif
std::unique_ptr<CodeGenAction> Act(new EmitLLVMOnlyAction(&*LLVMCtx));
if (!Clang.ExecuteAction(*Act))
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index f5acda13dcfa..7eb7de5aee44 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -979,7 +979,7 @@ int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf)
return ret < 0 ? ret : strbuf_addf(buf, "\t%s", dwarf_diename(vr_die));
}
-#ifdef HAVE_DWARF_GETLOCATIONS
+#ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT
/**
* die_get_var_innermost_scope - Get innermost scope range of given variable DIE
* @sp_die: a subprogram DIE
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 7d968892ee39..4d602fba40b2 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -6,6 +6,7 @@
#include "session.h"
#include "namespaces.h"
#include "sort.h"
+#include "units.h"
#include "evlist.h"
#include "evsel.h"
#include "annotate.h"
@@ -14,6 +15,7 @@
#include "ui/progress.h"
#include <errno.h>
#include <math.h>
+#include <inttypes.h>
#include <sys/param.h>
static bool hists__filter_entry_by_dso(struct hists *hists,
@@ -2454,6 +2456,85 @@ u64 hists__total_period(struct hists *hists)
hists->stats.total_period;
}
+int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
+{
+ char unit;
+ int printed;
+ const struct dso *dso = hists->dso_filter;
+ const struct thread *thread = hists->thread_filter;
+ int socket_id = hists->socket_filter;
+ unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ u64 nr_events = hists->stats.total_period;
+ struct perf_evsel *evsel = hists_to_evsel(hists);
+ const char *ev_name = perf_evsel__name(evsel);
+ char buf[512], sample_freq_str[64] = "";
+ size_t buflen = sizeof(buf);
+ char ref[30] = " show reference callgraph, ";
+ bool enable_ref = false;
+
+ if (symbol_conf.filter_relative) {
+ nr_samples = hists->stats.nr_non_filtered_samples;
+ nr_events = hists->stats.total_non_filtered_period;
+ }
+
+ if (perf_evsel__is_group_event(evsel)) {
+ struct perf_evsel *pos;
+
+ perf_evsel__group_desc(evsel, buf, buflen);
+ ev_name = buf;
+
+ for_each_group_member(pos, evsel) {
+ struct hists *pos_hists = evsel__hists(pos);
+
+ if (symbol_conf.filter_relative) {
+ nr_samples += pos_hists->stats.nr_non_filtered_samples;
+ nr_events += pos_hists->stats.total_non_filtered_period;
+ } else {
+ nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ nr_events += pos_hists->stats.total_period;
+ }
+ }
+ }
+
+ if (symbol_conf.show_ref_callgraph &&
+ strstr(ev_name, "call-graph=no"))
+ enable_ref = true;
+
+ if (show_freq)
+ scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->attr.sample_freq);
+
+ nr_samples = convert_unit(nr_samples, &unit);
+ printed = scnprintf(bf, size,
+ "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
+ nr_samples, unit, evsel->nr_members > 1 ? "s" : "",
+ ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
+
+
+ if (hists->uid_filter_str)
+ printed += snprintf(bf + printed, size - printed,
+ ", UID: %s", hists->uid_filter_str);
+ if (thread) {
+ if (hists__has(hists, thread)) {
+ printed += scnprintf(bf + printed, size - printed,
+ ", Thread: %s(%d)",
+ (thread->comm_set ? thread__comm_str(thread) : ""),
+ thread->tid);
+ } else {
+ printed += scnprintf(bf + printed, size - printed,
+ ", Thread: %s",
+ (thread->comm_set ? thread__comm_str(thread) : ""));
+ }
+ }
+ if (dso)
+ printed += scnprintf(bf + printed, size - printed,
+ ", DSO: %s", dso->short_name);
+ if (socket_id > -1)
+ printed += scnprintf(bf + printed, size - printed,
+ ", Processor Socket: %d", socket_id);
+
+ return printed;
+}
+
int parse_filter_percentage(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused)
{
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index e869cad4d89f..fbabfd8a215d 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -61,6 +61,7 @@ enum hist_column {
HISTC_SRCLINE_TO,
HISTC_TRACE,
HISTC_SYM_SIZE,
+ HISTC_DSO_SIZE,
HISTC_NR_COLS, /* Last entry */
};
@@ -503,5 +504,11 @@ int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...);
int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
struct perf_hpp_list *hpp_list);
int hists__fprintf_headers(struct hists *hists, FILE *fp);
+int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq);
+
+static inline int hists__scnprintf_title(struct hists *hists, char *bf, size_t size)
+{
+ return __hists__scnprintf_title(hists, bf, size, true);
+}
#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index edeb7291c8e1..0e9bbe01b0ab 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -103,6 +103,10 @@ static inline u64 identity__map_ip(struct map *map __maybe_unused, u64 ip)
return ip;
}
+static inline size_t map__size(const struct map *map)
+{
+ return map->end - map->start;
+}
/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */
u64 map__rip_2objdump(struct map *map, u64 rip);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index c71ced7db152..f4a7a437ee87 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1591,7 +1591,7 @@ static void perf_session__warn_about_errors(const struct perf_session *session)
drop_rate = (double)stats->total_lost_samples /
(double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
if (drop_rate > 0.05) {
- ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
+ ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
drop_rate * 100.0);
}
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index e8514f651865..26a68dfd8a4f 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1545,6 +1545,46 @@ struct sort_entry sort_sym_size = {
.se_width_idx = HISTC_SYM_SIZE,
};
+/* --sort dso_size */
+
+static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
+{
+ int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
+ int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
+
+ return size_l < size_r ? -1 :
+ size_l == size_r ? 0 : 1;
+}
+
+static int64_t
+sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return _sort__dso_size_cmp(right->ms.map, left->ms.map);
+}
+
+static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
+ size_t bf_size, unsigned int width)
+{
+ if (map && map->dso)
+ return repsep_snprintf(bf, bf_size, "%*d", width,
+ map__size(map));
+
+ return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
+}
+
+static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
+}
+
+struct sort_entry sort_dso_size = {
+ .se_header = "DSO size",
+ .se_cmp = sort__dso_size_cmp,
+ .se_snprintf = hist_entry__dso_size_snprintf,
+ .se_width_idx = HISTC_DSO_SIZE,
+};
+
struct sort_dimension {
const char *name;
@@ -1569,6 +1609,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_TRANSACTION, "transaction", sort_transaction),
DIM(SORT_TRACE, "trace", sort_trace),
DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
+ DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
};
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index f5901c10a563..035b62e2c60b 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -220,6 +220,7 @@ enum sort_type {
SORT_TRANSACTION,
SORT_TRACE,
SORT_SYM_SIZE,
+ SORT_DSO_SIZE,
SORT_CGROUP_ID,
/* branch stack specific sort keys */
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 9496365da3d7..c9626c206208 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -11,8 +11,7 @@
#include <stdlib.h>
#include <stdarg.h>
#include <linux/compiler.h>
-#include <linux/types.h>
-#include "namespaces.h"
+#include <sys/types.h>
/* General helper functions */
void usage(const char *err) __noreturn;
@@ -26,6 +25,7 @@ static inline void *zalloc(size_t size)
#define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
struct dirent;
+struct nsinfo;
struct strlist;
int mkdir_p(char *path, mode_t mode);
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index dd614463d4d6..495066bafbe3 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -120,3 +120,5 @@ ifneq ($(silent),1)
QUIET_UNINST = @printf ' UNINST %s\n' $1;
endif
endif
+
+pound := \#
diff --git a/tools/testing/ktest/config-bisect.pl b/tools/testing/ktest/config-bisect.pl
new file mode 100755
index 000000000000..b28feea7c363
--- /dev/null
+++ b/tools/testing/ktest/config-bisect.pl
@@ -0,0 +1,770 @@
+#!/usr/bin/perl -w
+#
+# Copyright 2015 - Steven Rostedt, Red Hat Inc.
+# Copyright 2017 - Steven Rostedt, VMware, Inc.
+#
+# Licensed under the terms of the GNU GPL License version 2
+#
+
+# usage:
+# config-bisect.pl [options] good-config bad-config [good|bad]
+#
+
+# Compares a good config to a bad config, then takes half of the diffs
+# and produces a config that is somewhere between the good config and
+# the bad config. That is, the resulting config will start with the
+# good config and will try to make half of the differences of between
+# the good and bad configs match the bad config. It tries because of
+# dependencies between the two configs it may not be able to change
+# exactly half of the configs that are different between the two config
+# files.
+
+# Here's a normal way to use it:
+#
+# $ cd /path/to/linux/kernel
+# $ config-bisect.pl /path/to/good/config /path/to/bad/config
+
+# This will now pull in good config (blowing away .config in that directory
+# so do not make that be one of the good or bad configs), and then
+# build the config with "make oldconfig" to make sure it matches the
+# current kernel. It will then store the configs in that result for
+# the good config. It does the same for the bad config as well.
+# The algorithm will run, merging half of the differences between
+# the two configs and building them with "make oldconfig" to make sure
+# the result changes (dependencies may reset changes the tool had made).
+# It then copies the result of its good config to /path/to/good/config.tmp
+# and the bad config to /path/to/bad/config.tmp (just appends ".tmp" to the
+# files passed in). And the ".config" that you should test will be in
+# directory
+
+# After the first run, determine if the result is good or bad then
+# run the same command appending the result
+
+# For good results:
+# $ config-bisect.pl /path/to/good/config /path/to/bad/config good
+
+# For bad results:
+# $ config-bisect.pl /path/to/good/config /path/to/bad/config bad
+
+# Do not change the good-config or bad-config, config-bisect.pl will
+# copy the good-config to a temp file with the same name as good-config
+# but with a ".tmp" after it. It will do the same with the bad-config.
+
+# If "good" or "bad" is not stated at the end, it will copy the good and
+# bad configs to the .tmp versions. If a .tmp version already exists, it will
+# warn before writing over them (-r will not warn, and just write over them).
+# If the last config is labeled "good", then it will copy it to the good .tmp
+# version. If the last config is labeled "bad", it will copy it to the bad
+# .tmp version. It will continue this until it can not merge the two any more
+# without the result being equal to either the good or bad .tmp configs.
+
+my $start = 0;
+my $val = "";
+
+my $pwd = `pwd`;
+chomp $pwd;
+my $tree = $pwd;
+my $build;
+
+my $output_config;
+my $reset_bisect;
+
+sub usage {
+ print << "EOF"
+
+usage: config-bisect.pl [-l linux-tree][-b build-dir] good-config bad-config [good|bad]
+ -l [optional] define location of linux-tree (default is current directory)
+ -b [optional] define location to build (O=build-dir) (default is linux-tree)
+ good-config the config that is considered good
+ bad-config the config that does not work
+ "good" add this if the last run produced a good config
+ "bad" add this if the last run produced a bad config
+ If "good" or "bad" is not specified, then it is the start of a new bisect
+
+ Note, each run will create copy of good and bad configs with ".tmp" appended.
+
+EOF
+;
+
+ exit(-1);
+}
+
+sub doprint {
+ print @_;
+}
+
+sub dodie {
+ doprint "CRITICAL FAILURE... ", @_, "\n";
+
+ die @_, "\n";
+}
+
+sub expand_path {
+ my ($file) = @_;
+
+ if ($file =~ m,^/,) {
+ return $file;
+ }
+ return "$pwd/$file";
+}
+
+sub read_prompt {
+ my ($cancel, $prompt) = @_;
+
+ my $ans;
+
+ for (;;) {
+ if ($cancel) {
+ print "$prompt [y/n/C] ";
+ } else {
+ print "$prompt [y/N] ";
+ }
+ $ans = <STDIN>;
+ chomp $ans;
+ if ($ans =~ /^\s*$/) {
+ if ($cancel) {
+ $ans = "c";
+ } else {
+ $ans = "n";
+ }
+ }
+ last if ($ans =~ /^y$/i || $ans =~ /^n$/i);
+ if ($cancel) {
+ last if ($ans =~ /^c$/i);
+ print "Please answer either 'y', 'n' or 'c'.\n";
+ } else {
+ print "Please answer either 'y' or 'n'.\n";
+ }
+ }
+ if ($ans =~ /^c/i) {
+ exit;
+ }
+ if ($ans !~ /^y$/i) {
+ return 0;
+ }
+ return 1;
+}
+
+sub read_yn {
+ my ($prompt) = @_;
+
+ return read_prompt 0, $prompt;
+}
+
+sub read_ync {
+ my ($prompt) = @_;
+
+ return read_prompt 1, $prompt;
+}
+
+sub run_command {
+ my ($command, $redirect) = @_;
+ my $start_time;
+ my $end_time;
+ my $dord = 0;
+ my $pid;
+
+ $start_time = time;
+
+ doprint("$command ... ");
+
+ $pid = open(CMD, "$command 2>&1 |") or
+ dodie "unable to exec $command";
+
+ if (defined($redirect)) {
+ open (RD, ">$redirect") or
+ dodie "failed to write to redirect $redirect";
+ $dord = 1;
+ }
+
+ while (<CMD>) {
+ print RD if ($dord);
+ }
+
+ waitpid($pid, 0);
+ my $failed = $?;
+
+ close(CMD);
+ close(RD) if ($dord);
+
+ $end_time = time;
+ my $delta = $end_time - $start_time;
+
+ if ($delta == 1) {
+ doprint "[1 second] ";
+ } else {
+ doprint "[$delta seconds] ";
+ }
+
+ if ($failed) {
+ doprint "FAILED!\n";
+ } else {
+ doprint "SUCCESS\n";
+ }
+
+ return !$failed;
+}
+
+###### CONFIG BISECT ######
+
+# config_ignore holds the configs that were set (or unset) for
+# a good config and we will ignore these configs for the rest
+# of a config bisect. These configs stay as they were.
+my %config_ignore;
+
+# config_set holds what all configs were set as.
+my %config_set;
+
+# config_off holds the set of configs that the bad config had disabled.
+# We need to record them and set them in the .config when running
+# olddefconfig, because olddefconfig keeps the defaults.
+my %config_off;
+
+# config_off_tmp holds a set of configs to turn off for now
+my @config_off_tmp;
+
+# config_list is the set of configs that are being tested
+my %config_list;
+my %null_config;
+
+my %dependency;
+
+my $make;
+
+sub make_oldconfig {
+
+ if (!run_command "$make olddefconfig") {
+ # Perhaps olddefconfig doesn't exist in this version of the kernel
+ # try oldnoconfig
+ doprint "olddefconfig failed, trying make oldnoconfig\n";
+ if (!run_command "$make oldnoconfig") {
+ doprint "oldnoconfig failed, trying yes '' | make oldconfig\n";
+ # try a yes '' | oldconfig
+ run_command "yes '' | $make oldconfig" or
+ dodie "failed make config oldconfig";
+ }
+ }
+}
+
+sub assign_configs {
+ my ($hash, $config) = @_;
+
+ doprint "Reading configs from $config\n";
+
+ open (IN, $config)
+ or dodie "Failed to read $config";
+
+ while (<IN>) {
+ chomp;
+ if (/^((CONFIG\S*)=.*)/) {
+ ${$hash}{$2} = $1;
+ } elsif (/^(# (CONFIG\S*) is not set)/) {
+ ${$hash}{$2} = $1;
+ }
+ }
+
+ close(IN);
+}
+
+sub process_config_ignore {
+ my ($config) = @_;
+
+ assign_configs \%config_ignore, $config;
+}
+
+sub get_dependencies {
+ my ($config) = @_;
+
+ my $arr = $dependency{$config};
+ if (!defined($arr)) {
+ return ();
+ }
+
+ my @deps = @{$arr};
+
+ foreach my $dep (@{$arr}) {
+ print "ADD DEP $dep\n";
+ @deps = (@deps, get_dependencies $dep);
+ }
+
+ return @deps;
+}
+
+sub save_config {
+ my ($pc, $file) = @_;
+
+ my %configs = %{$pc};
+
+ doprint "Saving configs into $file\n";
+
+ open(OUT, ">$file") or dodie "Can not write to $file";
+
+ foreach my $config (keys %configs) {
+ print OUT "$configs{$config}\n";
+ }
+ close(OUT);
+}
+
+sub create_config {
+ my ($name, $pc) = @_;
+
+ doprint "Creating old config from $name configs\n";
+
+ save_config $pc, $output_config;
+
+ make_oldconfig;
+}
+
+# compare two config hashes, and return configs with different vals.
+# It returns B's config values, but you can use A to see what A was.
+sub diff_config_vals {
+ my ($pa, $pb) = @_;
+
+ # crappy Perl way to pass in hashes.
+ my %a = %{$pa};
+ my %b = %{$pb};
+
+ my %ret;
+
+ foreach my $item (keys %a) {
+ if (defined($b{$item}) && $b{$item} ne $a{$item}) {
+ $ret{$item} = $b{$item};
+ }
+ }
+
+ return %ret;
+}
+
+# compare two config hashes and return the configs in B but not A
+sub diff_configs {
+ my ($pa, $pb) = @_;
+
+ my %ret;
+
+ # crappy Perl way to pass in hashes.
+ my %a = %{$pa};
+ my %b = %{$pb};
+
+ foreach my $item (keys %b) {
+ if (!defined($a{$item})) {
+ $ret{$item} = $b{$item};
+ }
+ }
+
+ return %ret;
+}
+
+# return if two configs are equal or not
+# 0 is equal +1 b has something a does not
+# +1 if a and b have a different item.
+# -1 if a has something b does not
+sub compare_configs {
+ my ($pa, $pb) = @_;
+
+ my %ret;
+
+ # crappy Perl way to pass in hashes.
+ my %a = %{$pa};
+ my %b = %{$pb};
+
+ foreach my $item (keys %b) {
+ if (!defined($a{$item})) {
+ return 1;
+ }
+ if ($a{$item} ne $b{$item}) {
+ return 1;
+ }
+ }
+
+ foreach my $item (keys %a) {
+ if (!defined($b{$item})) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+sub process_failed {
+ my ($config) = @_;
+
+ doprint "\n\n***************************************\n";
+ doprint "Found bad config: $config\n";
+ doprint "***************************************\n\n";
+}
+
+sub process_new_config {
+ my ($tc, $nc, $gc, $bc) = @_;
+
+ my %tmp_config = %{$tc};
+ my %good_configs = %{$gc};
+ my %bad_configs = %{$bc};
+
+ my %new_configs;
+
+ my $runtest = 1;
+ my $ret;
+
+ create_config "tmp_configs", \%tmp_config;
+ assign_configs \%new_configs, $output_config;
+
+ $ret = compare_configs \%new_configs, \%bad_configs;
+ if (!$ret) {
+ doprint "New config equals bad config, try next test\n";
+ $runtest = 0;
+ }
+
+ if ($runtest) {
+ $ret = compare_configs \%new_configs, \%good_configs;
+ if (!$ret) {
+ doprint "New config equals good config, try next test\n";
+ $runtest = 0;
+ }
+ }
+
+ %{$nc} = %new_configs;
+
+ return $runtest;
+}
+
+sub convert_config {
+ my ($config) = @_;
+
+ if ($config =~ /^# (.*) is not set/) {
+ $config = "$1=n";
+ }
+
+ $config =~ s/^CONFIG_//;
+ return $config;
+}
+
+sub print_config {
+ my ($sym, $config) = @_;
+
+ $config = convert_config $config;
+ doprint "$sym$config\n";
+}
+
+sub print_config_compare {
+ my ($good_config, $bad_config) = @_;
+
+ $good_config = convert_config $good_config;
+ $bad_config = convert_config $bad_config;
+
+ my $good_value = $good_config;
+ my $bad_value = $bad_config;
+ $good_value =~ s/(.*)=//;
+ my $config = $1;
+
+ $bad_value =~ s/.*=//;
+
+ doprint " $config $good_value -> $bad_value\n";
+}
+
+# Pass in:
+# $phalf: half of the configs names you want to add
+# $oconfigs: The orginial configs to start with
+# $sconfigs: The source to update $oconfigs with (from $phalf)
+# $which: The name of which half that is updating (top / bottom)
+# $type: The name of the source type (good / bad)
+sub make_half {
+ my ($phalf, $oconfigs, $sconfigs, $which, $type) = @_;
+
+ my @half = @{$phalf};
+ my %orig_configs = %{$oconfigs};
+ my %source_configs = %{$sconfigs};
+
+ my %tmp_config = %orig_configs;
+
+ doprint "Settings bisect with $which half of $type configs:\n";
+ foreach my $item (@half) {
+ doprint "Updating $item to $source_configs{$item}\n";
+ $tmp_config{$item} = $source_configs{$item};
+ }
+
+ return %tmp_config;
+}
+
+sub run_config_bisect {
+ my ($pgood, $pbad) = @_;
+
+ my %good_configs = %{$pgood};
+ my %bad_configs = %{$pbad};
+
+ my %diff_configs = diff_config_vals \%good_configs, \%bad_configs;
+ my %b_configs = diff_configs \%good_configs, \%bad_configs;
+ my %g_configs = diff_configs \%bad_configs, \%good_configs;
+
+ # diff_arr is what is in both good and bad but are different (y->n)
+ my @diff_arr = keys %diff_configs;
+ my $len_diff = $#diff_arr + 1;
+
+ # b_arr is what is in bad but not in good (has depends)
+ my @b_arr = keys %b_configs;
+ my $len_b = $#b_arr + 1;
+
+ # g_arr is what is in good but not in bad
+ my @g_arr = keys %g_configs;
+ my $len_g = $#g_arr + 1;
+
+ my $runtest = 0;
+ my %new_configs;
+ my $ret;
+
+ # Look at the configs that are different between good and bad.
+ # This does not include those that depend on other configs
+ # (configs depending on other configs that are not set would
+ # not show up even as a "# CONFIG_FOO is not set"
+
+
+ doprint "# of configs to check: $len_diff\n";
+ doprint "# of configs showing only in good: $len_g\n";
+ doprint "# of configs showing only in bad: $len_b\n";
+
+ if ($len_diff > 0) {
+ # Now test for different values
+
+ doprint "Configs left to check:\n";
+ doprint " Good Config\t\t\tBad Config\n";
+ doprint " -----------\t\t\t----------\n";
+ foreach my $item (@diff_arr) {
+ doprint " $good_configs{$item}\t$bad_configs{$item}\n";
+ }
+
+ my $half = int($#diff_arr / 2);
+ my @tophalf = @diff_arr[0 .. $half];
+
+ doprint "Set tmp config to be good config with some bad config values\n";
+
+ my %tmp_config = make_half \@tophalf, \%good_configs,
+ \%bad_configs, "top", "bad";
+
+ $runtest = process_new_config \%tmp_config, \%new_configs,
+ \%good_configs, \%bad_configs;
+
+ if (!$runtest) {
+ doprint "Set tmp config to be bad config with some good config values\n";
+
+ my %tmp_config = make_half \@tophalf, \%bad_configs,
+ \%good_configs, "top", "good";
+
+ $runtest = process_new_config \%tmp_config, \%new_configs,
+ \%good_configs, \%bad_configs;
+ }
+ }
+
+ if (!$runtest && $len_diff > 0) {
+ # do the same thing, but this time with bottom half
+
+ my $half = int($#diff_arr / 2);
+ my @bottomhalf = @diff_arr[$half+1 .. $#diff_arr];
+
+ doprint "Set tmp config to be good config with some bad config values\n";
+
+ my %tmp_config = make_half \@bottomhalf, \%good_configs,
+ \%bad_configs, "bottom", "bad";
+
+ $runtest = process_new_config \%tmp_config, \%new_configs,
+ \%good_configs, \%bad_configs;
+
+ if (!$runtest) {
+ doprint "Set tmp config to be bad config with some good config values\n";
+
+ my %tmp_config = make_half \@bottomhalf, \%bad_configs,
+ \%good_configs, "bottom", "good";
+
+ $runtest = process_new_config \%tmp_config, \%new_configs,
+ \%good_configs, \%bad_configs;
+ }
+ }
+
+ if ($runtest) {
+ make_oldconfig;
+ doprint "READY TO TEST .config IN $build\n";
+ return 0;
+ }
+
+ doprint "\n%%%%%%%% FAILED TO FIND SINGLE BAD CONFIG %%%%%%%%\n";
+ doprint "Hmm, can't make any more changes without making good == bad?\n";
+ doprint "Difference between good (+) and bad (-)\n";
+
+ foreach my $item (keys %bad_configs) {
+ if (!defined($good_configs{$item})) {
+ print_config "-", $bad_configs{$item};
+ }
+ }
+
+ foreach my $item (keys %good_configs) {
+ next if (!defined($bad_configs{$item}));
+ if ($good_configs{$item} ne $bad_configs{$item}) {
+ print_config_compare $good_configs{$item}, $bad_configs{$item};
+ }
+ }
+
+ foreach my $item (keys %good_configs) {
+ if (!defined($bad_configs{$item})) {
+ print_config "+", $good_configs{$item};
+ }
+ }
+ return -1;
+}
+
+sub config_bisect {
+ my ($good_config, $bad_config) = @_;
+ my $ret;
+
+ my %good_configs;
+ my %bad_configs;
+ my %tmp_configs;
+
+ doprint "Run good configs through make oldconfig\n";
+ assign_configs \%tmp_configs, $good_config;
+ create_config "$good_config", \%tmp_configs;
+ assign_configs \%good_configs, $output_config;
+
+ doprint "Run bad configs through make oldconfig\n";
+ assign_configs \%tmp_configs, $bad_config;
+ create_config "$bad_config", \%tmp_configs;
+ assign_configs \%bad_configs, $output_config;
+
+ save_config \%good_configs, $good_config;
+ save_config \%bad_configs, $bad_config;
+
+ return run_config_bisect \%good_configs, \%bad_configs;
+}
+
+while ($#ARGV >= 0) {
+ if ($ARGV[0] !~ m/^-/) {
+ last;
+ }
+ my $opt = shift @ARGV;
+
+ if ($opt eq "-b") {
+ $val = shift @ARGV;
+ if (!defined($val)) {
+ die "-b requires value\n";
+ }
+ $build = $val;
+ }
+
+ elsif ($opt eq "-l") {
+ $val = shift @ARGV;
+ if (!defined($val)) {
+ die "-l requires value\n";
+ }
+ $tree = $val;
+ }
+
+ elsif ($opt eq "-r") {
+ $reset_bisect = 1;
+ }
+
+ elsif ($opt eq "-h") {
+ usage;
+ }
+
+ else {
+ die "Unknow option $opt\n";
+ }
+}
+
+$build = $tree if (!defined($build));
+
+$tree = expand_path $tree;
+$build = expand_path $build;
+
+if ( ! -d $tree ) {
+ die "$tree not a directory\n";
+}
+
+if ( ! -d $build ) {
+ die "$build not a directory\n";
+}
+
+usage if $#ARGV < 1;
+
+if ($#ARGV == 1) {
+ $start = 1;
+} elsif ($#ARGV == 2) {
+ $val = $ARGV[2];
+ if ($val ne "good" && $val ne "bad") {
+ die "Unknown command '$val', bust be either \"good\" or \"bad\"\n";
+ }
+} else {
+ usage;
+}
+
+my $good_start = expand_path $ARGV[0];
+my $bad_start = expand_path $ARGV[1];
+
+my $good = "$good_start.tmp";
+my $bad = "$bad_start.tmp";
+
+$make = "make";
+
+if ($build ne $tree) {
+ $make = "make O=$build"
+}
+
+$output_config = "$build/.config";
+
+if ($start) {
+ if ( ! -f $good_start ) {
+ die "$good_start not found\n";
+ }
+ if ( ! -f $bad_start ) {
+ die "$bad_start not found\n";
+ }
+ if ( -f $good || -f $bad ) {
+ my $p = "";
+
+ if ( -f $good ) {
+ $p = "$good exists\n";
+ }
+
+ if ( -f $bad ) {
+ $p = "$p$bad exists\n";
+ }
+
+ if (!defined($reset_bisect)) {
+ if (!read_yn "${p}Overwrite and start new bisect anyway?") {
+ exit (-1);
+ }
+ }
+ }
+ run_command "cp $good_start $good" or die "failed to copy to $good\n";
+ run_command "cp $bad_start $bad" or die "faield to copy to $bad\n";
+} else {
+ if ( ! -f $good ) {
+ die "Can not find file $good\n";
+ }
+ if ( ! -f $bad ) {
+ die "Can not find file $bad\n";
+ }
+ if ($val eq "good") {
+ run_command "cp $output_config $good" or die "failed to copy $config to $good\n";
+ } elsif ($val eq "bad") {
+ run_command "cp $output_config $bad" or die "failed to copy $config to $bad\n";
+ }
+}
+
+chdir $tree || die "can't change directory to $tree";
+
+my $ret = config_bisect $good, $bad;
+
+if (!$ret) {
+ exit(0);
+}
+
+if ($ret > 0) {
+ doprint "Cleaning temp files\n";
+ run_command "rm $good";
+ run_command "rm $bad";
+ exit(1);
+} else {
+ doprint "See good and bad configs for details:\n";
+ doprint "good: $good\n";
+ doprint "bad: $bad\n";
+ doprint "%%%%%%%% FAILED TO FIND SINGLE BAD CONFIG %%%%%%%%\n";
+}
+exit(2);
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 8809f244bb7c..87af8a68ab25 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -10,6 +10,7 @@ use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
use File::Path qw(mkpath);
use File::Copy qw(cp);
use FileHandle;
+use FindBin;
my $VERSION = "0.2";
@@ -22,6 +23,11 @@ my %evals;
#default opts
my %default = (
+ "MAILER" => "sendmail", # default mailer
+ "EMAIL_ON_ERROR" => 1,
+ "EMAIL_WHEN_FINISHED" => 1,
+ "EMAIL_WHEN_CANCELED" => 0,
+ "EMAIL_WHEN_STARTED" => 0,
"NUM_TESTS" => 1,
"TEST_TYPE" => "build",
"BUILD_TYPE" => "randconfig",
@@ -59,6 +65,7 @@ my %default = (
"GRUB_REBOOT" => "grub2-reboot",
"SYSLINUX" => "extlinux",
"SYSLINUX_PATH" => "/boot/extlinux",
+ "CONNECT_TIMEOUT" => 25,
# required, and we will ask users if they don't have them but we keep the default
# value something that is common.
@@ -163,6 +170,8 @@ my $store_failures;
my $store_successes;
my $test_name;
my $timeout;
+my $connect_timeout;
+my $config_bisect_exec;
my $booted_timeout;
my $detect_triplefault;
my $console;
@@ -204,6 +213,20 @@ my $install_time;
my $reboot_time;
my $test_time;
+my $pwd;
+my $dirname = $FindBin::Bin;
+
+my $mailto;
+my $mailer;
+my $mail_path;
+my $mail_command;
+my $email_on_error;
+my $email_when_finished;
+my $email_when_started;
+my $email_when_canceled;
+
+my $script_start_time = localtime();
+
# set when a test is something other that just building or install
# which would require more options.
my $buildonly = 1;
@@ -229,6 +252,14 @@ my $no_reboot = 1;
my $reboot_success = 0;
my %option_map = (
+ "MAILTO" => \$mailto,
+ "MAILER" => \$mailer,
+ "MAIL_PATH" => \$mail_path,
+ "MAIL_COMMAND" => \$mail_command,
+ "EMAIL_ON_ERROR" => \$email_on_error,
+ "EMAIL_WHEN_FINISHED" => \$email_when_finished,
+ "EMAIL_WHEN_STARTED" => \$email_when_started,
+ "EMAIL_WHEN_CANCELED" => \$email_when_canceled,
"MACHINE" => \$machine,
"SSH_USER" => \$ssh_user,
"TMP_DIR" => \$tmpdir,
@@ -296,6 +327,8 @@ my %option_map = (
"STORE_SUCCESSES" => \$store_successes,
"TEST_NAME" => \$test_name,
"TIMEOUT" => \$timeout,
+ "CONNECT_TIMEOUT" => \$connect_timeout,
+ "CONFIG_BISECT_EXEC" => \$config_bisect_exec,
"BOOTED_TIMEOUT" => \$booted_timeout,
"CONSOLE" => \$console,
"CLOSE_CONSOLE_SIGNAL" => \$close_console_signal,
@@ -337,6 +370,7 @@ my %used_options;
# default variables that can be used
chomp ($variable{"PWD"} = `pwd`);
+$pwd = $variable{"PWD"};
$config_help{"MACHINE"} = << "EOF"
The machine hostname that you will test.
@@ -718,22 +752,14 @@ sub set_value {
my $prvalue = process_variables($rvalue);
- if ($buildonly && $lvalue =~ /^TEST_TYPE(\[.*\])?$/ && $prvalue ne "build") {
+ if ($lvalue =~ /^(TEST|BISECT|CONFIG_BISECT)_TYPE(\[.*\])?$/ &&
+ $prvalue !~ /^(config_|)bisect$/ &&
+ $prvalue !~ /^build$/ &&
+ $buildonly) {
+
# Note if a test is something other than build, then we
# will need other mandatory options.
if ($prvalue ne "install") {
- # for bisect, we need to check BISECT_TYPE
- if ($prvalue ne "bisect") {
- $buildonly = 0;
- }
- } else {
- # install still limits some mandatory options.
- $buildonly = 2;
- }
- }
-
- if ($buildonly && $lvalue =~ /^BISECT_TYPE(\[.*\])?$/ && $prvalue ne "build") {
- if ($prvalue ne "install") {
$buildonly = 0;
} else {
# install still limits some mandatory options.
@@ -1140,7 +1166,8 @@ sub __read_config {
sub get_test_case {
print "What test case would you like to run?\n";
print " (build, install or boot)\n";
- print " Other tests are available but require editing the config file\n";
+ print " Other tests are available but require editing ktest.conf\n";
+ print " (see tools/testing/ktest/sample.conf)\n";
my $ans = <STDIN>;
chomp $ans;
$default{"TEST_TYPE"} = $ans;
@@ -1328,8 +1355,8 @@ sub reboot {
my ($time) = @_;
my $powercycle = 0;
- # test if the machine can be connected to within 5 seconds
- my $stat = run_ssh("echo check machine status", 5);
+ # test if the machine can be connected to within a few seconds
+ my $stat = run_ssh("echo check machine status", $connect_timeout);
if (!$stat) {
doprint("power cycle\n");
$powercycle = 1;
@@ -1404,10 +1431,18 @@ sub do_not_reboot {
return $test_type eq "build" || $no_reboot ||
($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") ||
- ($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build");
+ ($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build") ||
+ ($test_type eq "config_bisect" && $opt{"CONFIG_BISECT_TYPE[$i]"} eq "build");
}
+my $in_die = 0;
+
sub dodie {
+
+ # avoid recusion
+ return if ($in_die);
+ $in_die = 1;
+
doprint "CRITICAL FAILURE... ", @_, "\n";
my $i = $iteration;
@@ -1426,6 +1461,11 @@ sub dodie {
print " See $opt{LOG_FILE} for more info.\n";
}
+ if ($email_on_error) {
+ send_email("KTEST: critical failure for your [$test_type] test",
+ "Your test started at $script_start_time has failed with:\n@_\n");
+ }
+
if ($monitor_cnt) {
# restore terminal settings
system("stty $stty_orig");
@@ -1477,7 +1517,7 @@ sub exec_console {
close($pts);
exec $console or
- die "Can't open console $console";
+ dodie "Can't open console $console";
}
sub open_console {
@@ -1515,6 +1555,9 @@ sub close_console {
doprint "kill child process $pid\n";
kill $close_console_signal, $pid;
+ doprint "wait for child process $pid to exit\n";
+ waitpid($pid, 0);
+
print "closing!\n";
close($fp);
@@ -1625,7 +1668,7 @@ sub save_logs {
if (!-d $dir) {
mkpath($dir) or
- die "can't create $dir";
+ dodie "can't create $dir";
}
my %files = (
@@ -1638,7 +1681,7 @@ sub save_logs {
while (my ($name, $source) = each(%files)) {
if (-f "$source") {
cp "$source", "$dir/$name" or
- die "failed to copy $source";
+ dodie "failed to copy $source";
}
}
@@ -1692,6 +1735,7 @@ sub run_command {
my $end_time;
my $dolog = 0;
my $dord = 0;
+ my $dostdout = 0;
my $pid;
$command =~ s/\$SSH_USER/$ssh_user/g;
@@ -1710,9 +1754,15 @@ sub run_command {
}
if (defined($redirect)) {
- open (RD, ">$redirect") or
- dodie "failed to write to redirect $redirect";
- $dord = 1;
+ if ($redirect eq 1) {
+ $dostdout = 1;
+ # Have the output of the command on its own line
+ doprint "\n";
+ } else {
+ open (RD, ">$redirect") or
+ dodie "failed to write to redirect $redirect";
+ $dord = 1;
+ }
}
my $hit_timeout = 0;
@@ -1734,6 +1784,7 @@ sub run_command {
}
print LOG $line if ($dolog);
print RD $line if ($dord);
+ print $line if ($dostdout);
}
waitpid($pid, 0);
@@ -1812,7 +1863,7 @@ sub get_grub2_index {
$ssh_grub =~ s,\$SSH_COMMAND,cat $grub_file,g;
open(IN, "$ssh_grub |")
- or die "unable to get $grub_file";
+ or dodie "unable to get $grub_file";
my $found = 0;
@@ -1821,13 +1872,13 @@ sub get_grub2_index {
$grub_number++;
$found = 1;
last;
- } elsif (/^menuentry\s/) {
+ } elsif (/^menuentry\s|^submenu\s/) {
$grub_number++;
}
}
close(IN);
- die "Could not find '$grub_menu' in $grub_file on $machine"
+ dodie "Could not find '$grub_menu' in $grub_file on $machine"
if (!$found);
doprint "$grub_number\n";
$last_grub_menu = $grub_menu;
@@ -1855,7 +1906,7 @@ sub get_grub_index {
$ssh_grub =~ s,\$SSH_COMMAND,cat /boot/grub/menu.lst,g;
open(IN, "$ssh_grub |")
- or die "unable to get menu.lst";
+ or dodie "unable to get menu.lst";
my $found = 0;
@@ -1870,7 +1921,7 @@ sub get_grub_index {
}
close(IN);
- die "Could not find '$grub_menu' in /boot/grub/menu on $machine"
+ dodie "Could not find '$grub_menu' in /boot/grub/menu on $machine"
if (!$found);
doprint "$grub_number\n";
$last_grub_menu = $grub_menu;
@@ -1983,7 +2034,7 @@ sub monitor {
my $full_line = "";
open(DMESG, "> $dmesg") or
- die "unable to write to $dmesg";
+ dodie "unable to write to $dmesg";
reboot_to;
@@ -2862,7 +2913,7 @@ sub run_bisect {
sub update_bisect_replay {
my $tmp_log = "$tmpdir/ktest_bisect_log";
run_command "git bisect log > $tmp_log" or
- die "can't create bisect log";
+ dodie "can't create bisect log";
return $tmp_log;
}
@@ -2871,9 +2922,9 @@ sub bisect {
my $result;
- die "BISECT_GOOD[$i] not defined\n" if (!defined($bisect_good));
- die "BISECT_BAD[$i] not defined\n" if (!defined($bisect_bad));
- die "BISECT_TYPE[$i] not defined\n" if (!defined($bisect_type));
+ dodie "BISECT_GOOD[$i] not defined\n" if (!defined($bisect_good));
+ dodie "BISECT_BAD[$i] not defined\n" if (!defined($bisect_bad));
+ dodie "BISECT_TYPE[$i] not defined\n" if (!defined($bisect_type));
my $good = $bisect_good;
my $bad = $bisect_bad;
@@ -2936,7 +2987,7 @@ sub bisect {
if ($check ne "good") {
doprint "TESTING BISECT BAD [$bad]\n";
run_command "git checkout $bad" or
- die "Failed to checkout $bad";
+ dodie "Failed to checkout $bad";
$result = run_bisect $type;
@@ -2948,7 +2999,7 @@ sub bisect {
if ($check ne "bad") {
doprint "TESTING BISECT GOOD [$good]\n";
run_command "git checkout $good" or
- die "Failed to checkout $good";
+ dodie "Failed to checkout $good";
$result = run_bisect $type;
@@ -2959,7 +3010,7 @@ sub bisect {
# checkout where we started
run_command "git checkout $head" or
- die "Failed to checkout $head";
+ dodie "Failed to checkout $head";
}
run_command "git bisect start$start_files" or
@@ -3092,76 +3143,6 @@ sub create_config {
make_oldconfig;
}
-# compare two config hashes, and return configs with different vals.
-# It returns B's config values, but you can use A to see what A was.
-sub diff_config_vals {
- my ($pa, $pb) = @_;
-
- # crappy Perl way to pass in hashes.
- my %a = %{$pa};
- my %b = %{$pb};
-
- my %ret;
-
- foreach my $item (keys %a) {
- if (defined($b{$item}) && $b{$item} ne $a{$item}) {
- $ret{$item} = $b{$item};
- }
- }
-
- return %ret;
-}
-
-# compare two config hashes and return the configs in B but not A
-sub diff_configs {
- my ($pa, $pb) = @_;
-
- my %ret;
-
- # crappy Perl way to pass in hashes.
- my %a = %{$pa};
- my %b = %{$pb};
-
- foreach my $item (keys %b) {
- if (!defined($a{$item})) {
- $ret{$item} = $b{$item};
- }
- }
-
- return %ret;
-}
-
-# return if two configs are equal or not
-# 0 is equal +1 b has something a does not
-# +1 if a and b have a different item.
-# -1 if a has something b does not
-sub compare_configs {
- my ($pa, $pb) = @_;
-
- my %ret;
-
- # crappy Perl way to pass in hashes.
- my %a = %{$pa};
- my %b = %{$pb};
-
- foreach my $item (keys %b) {
- if (!defined($a{$item})) {
- return 1;
- }
- if ($a{$item} ne $b{$item}) {
- return 1;
- }
- }
-
- foreach my $item (keys %a) {
- if (!defined($b{$item})) {
- return -1;
- }
- }
-
- return 0;
-}
-
sub run_config_bisect_test {
my ($type) = @_;
@@ -3174,166 +3155,57 @@ sub run_config_bisect_test {
return $ret;
}
-sub process_failed {
- my ($config) = @_;
+sub config_bisect_end {
+ my ($good, $bad) = @_;
+ my $diffexec = "diff -u";
+ if (-f "$builddir/scripts/diffconfig") {
+ $diffexec = "$builddir/scripts/diffconfig";
+ }
doprint "\n\n***************************************\n";
- doprint "Found bad config: $config\n";
+ doprint "No more config bisecting possible.\n";
+ run_command "$diffexec $good $bad", 1;
doprint "***************************************\n\n";
}
-# used for config bisecting
-my $good_config;
-my $bad_config;
-
-sub process_new_config {
- my ($tc, $nc, $gc, $bc) = @_;
-
- my %tmp_config = %{$tc};
- my %good_configs = %{$gc};
- my %bad_configs = %{$bc};
-
- my %new_configs;
-
- my $runtest = 1;
- my $ret;
-
- create_config "tmp_configs", \%tmp_config;
- assign_configs \%new_configs, $output_config;
-
- $ret = compare_configs \%new_configs, \%bad_configs;
- if (!$ret) {
- doprint "New config equals bad config, try next test\n";
- $runtest = 0;
- }
-
- if ($runtest) {
- $ret = compare_configs \%new_configs, \%good_configs;
- if (!$ret) {
- doprint "New config equals good config, try next test\n";
- $runtest = 0;
- }
- }
-
- %{$nc} = %new_configs;
-
- return $runtest;
-}
-
sub run_config_bisect {
- my ($pgood, $pbad) = @_;
-
- my $type = $config_bisect_type;
-
- my %good_configs = %{$pgood};
- my %bad_configs = %{$pbad};
-
- my %diff_configs = diff_config_vals \%good_configs, \%bad_configs;
- my %b_configs = diff_configs \%good_configs, \%bad_configs;
- my %g_configs = diff_configs \%bad_configs, \%good_configs;
-
- my @diff_arr = keys %diff_configs;
- my $len_diff = $#diff_arr + 1;
-
- my @b_arr = keys %b_configs;
- my $len_b = $#b_arr + 1;
-
- my @g_arr = keys %g_configs;
- my $len_g = $#g_arr + 1;
-
- my $runtest = 1;
- my %new_configs;
+ my ($good, $bad, $last_result) = @_;
+ my $reset = "";
+ my $cmd;
my $ret;
- # First, lets get it down to a single subset.
- # Is the problem with a difference in values?
- # Is the problem with a missing config?
- # Is the problem with a config that breaks things?
-
- # Enable all of one set and see if we get a new bad
- # or good config.
-
- # first set the good config to the bad values.
-
- doprint "d=$len_diff g=$len_g b=$len_b\n";
-
- # first lets enable things in bad config that are enabled in good config
-
- if ($len_diff > 0) {
- if ($len_b > 0 || $len_g > 0) {
- my %tmp_config = %bad_configs;
-
- doprint "Set tmp config to be bad config with good config values\n";
- foreach my $item (@diff_arr) {
- $tmp_config{$item} = $good_configs{$item};
- }
-
- $runtest = process_new_config \%tmp_config, \%new_configs,
- \%good_configs, \%bad_configs;
- }
+ if (!length($last_result)) {
+ $reset = "-r";
}
+ run_command "$config_bisect_exec $reset -b $outputdir $good $bad $last_result", 1;
- if (!$runtest && $len_diff > 0) {
-
- if ($len_diff == 1) {
- process_failed $diff_arr[0];
- return 1;
- }
- my %tmp_config = %bad_configs;
-
- my $half = int($#diff_arr / 2);
- my @tophalf = @diff_arr[0 .. $half];
-
- doprint "Settings bisect with top half:\n";
- doprint "Set tmp config to be bad config with some good config values\n";
- foreach my $item (@tophalf) {
- $tmp_config{$item} = $good_configs{$item};
- }
-
- $runtest = process_new_config \%tmp_config, \%new_configs,
- \%good_configs, \%bad_configs;
-
- if (!$runtest) {
- my %tmp_config = %bad_configs;
-
- doprint "Try bottom half\n";
-
- my @bottomhalf = @diff_arr[$half+1 .. $#diff_arr];
-
- foreach my $item (@bottomhalf) {
- $tmp_config{$item} = $good_configs{$item};
- }
-
- $runtest = process_new_config \%tmp_config, \%new_configs,
- \%good_configs, \%bad_configs;
- }
+ # config-bisect returns:
+ # 0 if there is more to bisect
+ # 1 for finding a good config
+ # 2 if it can not find any more configs
+ # -1 (255) on error
+ if ($run_command_status) {
+ return $run_command_status;
}
- if ($runtest) {
- $ret = run_config_bisect_test $type;
- if ($ret) {
- doprint "NEW GOOD CONFIG\n";
- %good_configs = %new_configs;
- run_command "mv $good_config ${good_config}.last";
- save_config \%good_configs, $good_config;
- %{$pgood} = %good_configs;
- } else {
- doprint "NEW BAD CONFIG\n";
- %bad_configs = %new_configs;
- run_command "mv $bad_config ${bad_config}.last";
- save_config \%bad_configs, $bad_config;
- %{$pbad} = %bad_configs;
- }
- return 0;
+ $ret = run_config_bisect_test $config_bisect_type;
+ if ($ret) {
+ doprint "NEW GOOD CONFIG\n";
+ # Return 3 for good config
+ return 3;
+ } else {
+ doprint "NEW BAD CONFIG\n";
+ # Return 4 for bad config
+ return 4;
}
-
- fail "Hmm, need to do a mix match?\n";
- return -1;
}
sub config_bisect {
my ($i) = @_;
+ my $good_config;
+ my $bad_config;
+
my $type = $config_bisect_type;
my $ret;
@@ -3353,6 +3225,24 @@ sub config_bisect {
$good_config = $output_config;
}
+ if (!defined($config_bisect_exec)) {
+ # First check the location that ktest.pl ran
+ my @locations = ( "$pwd/config-bisect.pl",
+ "$dirname/config-bisect.pl",
+ "$builddir/tools/testing/ktest/config-bisect.pl",
+ undef );
+ foreach my $loc (@locations) {
+ doprint "loc = $loc\n";
+ $config_bisect_exec = $loc;
+ last if (defined($config_bisect_exec && -x $config_bisect_exec));
+ }
+ if (!defined($config_bisect_exec)) {
+ fail "Could not find an executable config-bisect.pl\n",
+ " Set CONFIG_BISECT_EXEC to point to config-bisect.pl";
+ return 1;
+ }
+ }
+
# we don't want min configs to cause issues here.
doprint "Disabling 'MIN_CONFIG' for this test\n";
undef $minconfig;
@@ -3361,21 +3251,31 @@ sub config_bisect {
my %bad_configs;
my %tmp_configs;
+ if (-f "$tmpdir/good_config.tmp" || -f "$tmpdir/bad_config.tmp") {
+ if (read_yn "Interrupted config-bisect. Continue (n - will start new)?") {
+ if (-f "$tmpdir/good_config.tmp") {
+ $good_config = "$tmpdir/good_config.tmp";
+ } else {
+ $good_config = "$tmpdir/good_config";
+ }
+ if (-f "$tmpdir/bad_config.tmp") {
+ $bad_config = "$tmpdir/bad_config.tmp";
+ } else {
+ $bad_config = "$tmpdir/bad_config";
+ }
+ }
+ }
doprint "Run good configs through make oldconfig\n";
assign_configs \%tmp_configs, $good_config;
create_config "$good_config", \%tmp_configs;
- assign_configs \%good_configs, $output_config;
+ $good_config = "$tmpdir/good_config";
+ system("cp $output_config $good_config") == 0 or dodie "cp good config";
doprint "Run bad configs through make oldconfig\n";
assign_configs \%tmp_configs, $bad_config;
create_config "$bad_config", \%tmp_configs;
- assign_configs \%bad_configs, $output_config;
-
- $good_config = "$tmpdir/good_config";
$bad_config = "$tmpdir/bad_config";
-
- save_config \%good_configs, $good_config;
- save_config \%bad_configs, $bad_config;
+ system("cp $output_config $bad_config") == 0 or dodie "cp bad config";
if (defined($config_bisect_check) && $config_bisect_check ne "0") {
if ($config_bisect_check ne "good") {
@@ -3398,10 +3298,21 @@ sub config_bisect {
}
}
+ my $last_run = "";
+
do {
- $ret = run_config_bisect \%good_configs, \%bad_configs;
+ $ret = run_config_bisect $good_config, $bad_config, $last_run;
+ if ($ret == 3) {
+ $last_run = "good";
+ } elsif ($ret == 4) {
+ $last_run = "bad";
+ }
print_times;
- } while (!$ret);
+ } while ($ret == 3 || $ret == 4);
+
+ if ($ret == 2) {
+ config_bisect_end "$good_config.tmp", "$bad_config.tmp";
+ }
return $ret if ($ret < 0);
@@ -3416,9 +3327,9 @@ sub patchcheck_reboot {
sub patchcheck {
my ($i) = @_;
- die "PATCHCHECK_START[$i] not defined\n"
+ dodie "PATCHCHECK_START[$i] not defined\n"
if (!defined($patchcheck_start));
- die "PATCHCHECK_TYPE[$i] not defined\n"
+ dodie "PATCHCHECK_TYPE[$i] not defined\n"
if (!defined($patchcheck_type));
my $start = $patchcheck_start;
@@ -3432,7 +3343,7 @@ sub patchcheck {
if (defined($patchcheck_end)) {
$end = $patchcheck_end;
} elsif ($cherry) {
- die "PATCHCHECK_END must be defined with PATCHCHECK_CHERRY\n";
+ dodie "PATCHCHECK_END must be defined with PATCHCHECK_CHERRY\n";
}
# Get the true sha1's since we can use things like HEAD~3
@@ -3496,7 +3407,7 @@ sub patchcheck {
doprint "\nProcessing commit \"$item\"\n\n";
run_command "git checkout $sha1" or
- die "Failed to checkout $sha1";
+ dodie "Failed to checkout $sha1";
# only clean on the first and last patch
if ($item eq $list[0] ||
@@ -3587,7 +3498,7 @@ sub read_kconfig {
}
open(KIN, "$kconfig")
- or die "Can't open $kconfig";
+ or dodie "Can't open $kconfig";
while (<KIN>) {
chomp;
@@ -3746,7 +3657,7 @@ sub get_depends {
$dep =~ s/^[^$valid]*[$valid]+//;
} else {
- die "this should never happen";
+ dodie "this should never happen";
}
}
@@ -4007,7 +3918,7 @@ sub make_min_config {
# update new ignore configs
if (defined($ignore_config)) {
open (OUT, ">$temp_config")
- or die "Can't write to $temp_config";
+ or dodie "Can't write to $temp_config";
foreach my $config (keys %save_configs) {
print OUT "$save_configs{$config}\n";
}
@@ -4035,7 +3946,7 @@ sub make_min_config {
# Save off all the current mandatory configs
open (OUT, ">$temp_config")
- or die "Can't write to $temp_config";
+ or dodie "Can't write to $temp_config";
foreach my $config (keys %keep_configs) {
print OUT "$keep_configs{$config}\n";
}
@@ -4222,6 +4133,74 @@ sub set_test_option {
return eval_option($name, $option, $i);
}
+sub find_mailer {
+ my ($mailer) = @_;
+
+ my @paths = split /:/, $ENV{PATH};
+
+ # sendmail is usually in /usr/sbin
+ $paths[$#paths + 1] = "/usr/sbin";
+
+ foreach my $path (@paths) {
+ if (-x "$path/$mailer") {
+ return $path;
+ }
+ }
+
+ return undef;
+}
+
+sub do_send_mail {
+ my ($subject, $message) = @_;
+
+ if (!defined($mail_path)) {
+ # find the mailer
+ $mail_path = find_mailer $mailer;
+ if (!defined($mail_path)) {
+ die "\nCan not find $mailer in PATH\n";
+ }
+ }
+
+ if (!defined($mail_command)) {
+ if ($mailer eq "mail" || $mailer eq "mailx") {
+ $mail_command = "\$MAIL_PATH/\$MAILER -s \'\$SUBJECT\' \$MAILTO <<< \'\$MESSAGE\'";
+ } elsif ($mailer eq "sendmail" ) {
+ $mail_command = "echo \'Subject: \$SUBJECT\n\n\$MESSAGE\' | \$MAIL_PATH/\$MAILER -t \$MAILTO";
+ } else {
+ die "\nYour mailer: $mailer is not supported.\n";
+ }
+ }
+
+ $mail_command =~ s/\$MAILER/$mailer/g;
+ $mail_command =~ s/\$MAIL_PATH/$mail_path/g;
+ $mail_command =~ s/\$MAILTO/$mailto/g;
+ $mail_command =~ s/\$SUBJECT/$subject/g;
+ $mail_command =~ s/\$MESSAGE/$message/g;
+
+ run_command $mail_command;
+}
+
+sub send_email {
+
+ if (defined($mailto)) {
+ if (!defined($mailer)) {
+ doprint "No email sent: email or mailer not specified in config.\n";
+ return;
+ }
+ do_send_mail @_;
+ }
+}
+
+sub cancel_test {
+ if ($email_when_canceled) {
+ send_email("KTEST: Your [$test_type] test was cancelled",
+ "Your test started at $script_start_time was cancelled: sig int");
+ }
+ die "\nCaught Sig Int, test interrupted: $!\n"
+}
+
+$SIG{INT} = qw(cancel_test);
+
# First we need to do is the builds
for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
@@ -4245,11 +4224,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
$outputdir = set_test_option("OUTPUT_DIR", $i);
$builddir = set_test_option("BUILD_DIR", $i);
- chdir $builddir || die "can't change directory to $builddir";
+ chdir $builddir || dodie "can't change directory to $builddir";
if (!-d $outputdir) {
mkpath($outputdir) or
- die "can't create $outputdir";
+ dodie "can't create $outputdir";
}
$make = "$makecmd O=$outputdir";
@@ -4262,9 +4241,15 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
$start_minconfig_defined = 1;
# The first test may override the PRE_KTEST option
- if (defined($pre_ktest) && $i == 1) {
- doprint "\n";
- run_command $pre_ktest;
+ if ($i == 1) {
+ if (defined($pre_ktest)) {
+ doprint "\n";
+ run_command $pre_ktest;
+ }
+ if ($email_when_started) {
+ send_email("KTEST: Your [$test_type] test was started",
+ "Your test was started on $script_start_time");
+ }
}
# Any test can override the POST_KTEST option
@@ -4280,7 +4265,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
if (!-d $tmpdir) {
mkpath($tmpdir) or
- die "can't create $tmpdir";
+ dodie "can't create $tmpdir";
}
$ENV{"SSH_USER"} = $ssh_user;
@@ -4353,7 +4338,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
if (defined($checkout)) {
run_command "git checkout $checkout" or
- die "failed to checkout $checkout";
+ dodie "failed to checkout $checkout";
}
$no_reboot = 0;
@@ -4428,4 +4413,8 @@ if ($opt{"POWEROFF_ON_SUCCESS"}) {
doprint "\n $successes of $opt{NUM_TESTS} tests were successful\n\n";
+if ($email_when_finished) {
+ send_email("KTEST: Your [$test_type] test has finished!",
+ "$successes of $opt{NUM_TESTS} tests started at $script_start_time were successful!");
+}
exit 0;
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index 6c58cd8bbbae..6ca6ca0ce695 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -1,6 +1,11 @@
#
# Config file for ktest.pl
#
+# Place your customized version of this, in the working directory that
+# ktest.pl is run from. By default, ktest.pl will look for a file
+# called "ktest.conf", but you can name it anything you like and specify
+# the name of your config file as the first argument of ktest.pl.
+#
# Note, all paths must be absolute
#
@@ -396,6 +401,44 @@
#### Optional Config Options (all have defaults) ####
+# Email options for receiving notifications. Users must setup
+# the specified mailer prior to using this feature.
+#
+# (default undefined)
+#MAILTO =
+#
+# Supported mailers: sendmail, mail, mailx
+# (default sendmail)
+#MAILER = sendmail
+#
+# The executable to run
+# (default: for sendmail "/usr/sbin/sendmail", otherwise equals ${MAILER})
+#MAIL_EXEC = /usr/sbin/sendmail
+#
+# The command used to send mail, which uses the above options
+# can be modified. By default if the mailer is "sendmail" then
+# MAIL_COMMAND = echo \'Subject: $SUBJECT\n\n$MESSAGE\' | $MAIL_PATH/$MAILER -t $MAILTO
+# For mail or mailx:
+# MAIL_COMMAND = "$MAIL_PATH/$MAILER -s \'$SUBJECT\' $MAILTO <<< \'$MESSAGE\'
+# ktest.pl will do the substitution for MAIL_PATH, MAILER, MAILTO at the time
+# it sends the mail if "$FOO" format is used. If "${FOO}" format is used,
+# then the substitutions will occur at the time the config file is read.
+# But note, MAIL_PATH and MAILER require being set by the config file if
+# ${MAIL_PATH} or ${MAILER} are used, but not if $MAIL_PATH or $MAILER are.
+#MAIL_COMMAND = echo \'Subject: $SUBJECT\n\n$MESSAGE\' | $MAIL_PATH/$MAILER -t $MAILTO
+#
+# Errors are defined as those would terminate the script
+# (default 1)
+#EMAIL_ON_ERROR = 1
+# (default 1)
+#EMAIL_WHEN_FINISHED = 1
+# (default 0)
+#EMAIL_WHEN_STARTED = 1
+#
+# Users can cancel the test by Ctrl^C
+# (default 0)
+#EMAIL_WHEN_CANCELED = 1
+
# Start a test setup. If you leave this off, all options
# will be default and the test will run once.
# This is a label and not really an option (it takes no value).
@@ -725,6 +768,13 @@
# (default 120)
#TIMEOUT = 120
+# The timeout in seconds when to test if the box can be rebooted
+# or not. Before issuing the reboot command, a ssh connection
+# is attempted to see if the target machine is still active.
+# If the target does not connect within this timeout, a power cycle
+# is issued instead of a reboot.
+# CONNECT_TIMEOUT = 25
+
# In between tests, a reboot of the box may occur, and this
# is the time to wait for the console after it stops producing
# output. Some machines may not produce a large lag on reboot
@@ -1167,6 +1217,16 @@
# Set it to "good" to test only the good config and set it
# to "bad" to only test the bad config.
#
+# CONFIG_BISECT_EXEC (optional)
+# The config bisect is a separate program that comes with ktest.pl.
+# By befault, it will look for:
+# `pwd`/config-bisect.pl # the location ktest.pl was executed from.
+# If it does not find it there, it will look for:
+# `dirname <ktest.pl>`/config-bisect.pl # The directory that holds ktest.pl
+# If it does not find it there, it will look for:
+# ${BUILD_DIR}/tools/testing/ktest/config-bisect.pl
+# Setting CONFIG_BISECT_EXEC will override where it looks.
+#
# Example:
# TEST_START
# TEST_TYPE = config_bisect
diff --git a/tools/testing/selftests/proc/proc-loadavg-001.c b/tools/testing/selftests/proc/proc-loadavg-001.c
index e38ad6d94d4b..fcff7047000d 100644
--- a/tools/testing/selftests/proc/proc-loadavg-001.c
+++ b/tools/testing/selftests/proc/proc-loadavg-001.c
@@ -1,5 +1,5 @@
/*
- * Copyright _ 2018 Alexey Dobriyan <adobriyan@gmail.com>
+ * Copyright © 2018 Alexey Dobriyan <adobriyan@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/tools/testing/selftests/proc/proc-self-map-files-001.c b/tools/testing/selftests/proc/proc-self-map-files-001.c
index af1d0a6af810..4209c64283d6 100644
--- a/tools/testing/selftests/proc/proc-self-map-files-001.c
+++ b/tools/testing/selftests/proc/proc-self-map-files-001.c
@@ -1,5 +1,5 @@
/*
- * Copyright _ 2018 Alexey Dobriyan <adobriyan@gmail.com>
+ * Copyright © 2018 Alexey Dobriyan <adobriyan@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
index aebf4be56111..6f1f4a6e1ecb 100644
--- a/tools/testing/selftests/proc/proc-self-map-files-002.c
+++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
@@ -1,5 +1,5 @@
/*
- * Copyright _ 2018 Alexey Dobriyan <adobriyan@gmail.com>
+ * Copyright © 2018 Alexey Dobriyan <adobriyan@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/tools/testing/selftests/proc/proc-self-syscall.c b/tools/testing/selftests/proc/proc-self-syscall.c
index 05eb6f91f1e9..5ab5f4810e43 100644
--- a/tools/testing/selftests/proc/proc-self-syscall.c
+++ b/tools/testing/selftests/proc/proc-self-syscall.c
@@ -1,3 +1,18 @@
+/*
+ * Copyright © 2018 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
#define _GNU_SOURCE
#include <unistd.h>
#include <sys/syscall.h>
diff --git a/tools/testing/selftests/proc/proc-self-wchan.c b/tools/testing/selftests/proc/proc-self-wchan.c
index b8d8728a6869..a38b2fbaa7ad 100644
--- a/tools/testing/selftests/proc/proc-self-wchan.c
+++ b/tools/testing/selftests/proc/proc-self-wchan.c
@@ -1,3 +1,18 @@
+/*
+ * Copyright © 2018 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
diff --git a/tools/testing/selftests/proc/proc-uptime-001.c b/tools/testing/selftests/proc/proc-uptime-001.c
index 303f26092306..781f7a50fc3f 100644
--- a/tools/testing/selftests/proc/proc-uptime-001.c
+++ b/tools/testing/selftests/proc/proc-uptime-001.c
@@ -1,5 +1,5 @@
/*
- * Copyright _ 2018 Alexey Dobriyan <adobriyan@gmail.com>
+ * Copyright © 2018 Alexey Dobriyan <adobriyan@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c
index 0cb79e1f1674..30e2b7849089 100644
--- a/tools/testing/selftests/proc/proc-uptime-002.c
+++ b/tools/testing/selftests/proc/proc-uptime-002.c
@@ -1,5 +1,5 @@
/*
- * Copyright _ 2018 Alexey Dobriyan <adobriyan@gmail.com>
+ * Copyright © 2018 Alexey Dobriyan <adobriyan@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/tools/testing/selftests/proc/proc-uptime.h b/tools/testing/selftests/proc/proc-uptime.h
index d584419f50a7..0e464b50e9d9 100644
--- a/tools/testing/selftests/proc/proc-uptime.h
+++ b/tools/testing/selftests/proc/proc-uptime.h
@@ -1,5 +1,5 @@
/*
- * Copyright _ 2018 Alexey Dobriyan <adobriyan@gmail.com>
+ * Copyright © 2018 Alexey Dobriyan <adobriyan@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/tools/testing/selftests/proc/read.c b/tools/testing/selftests/proc/read.c
index 12e397f78592..1e73c2232097 100644
--- a/tools/testing/selftests/proc/read.c
+++ b/tools/testing/selftests/proc/read.c
@@ -1,5 +1,5 @@
/*
- * Copyright _ 2018 Alexey Dobriyan <adobriyan@gmail.com>
+ * Copyright © 2018 Alexey Dobriyan <adobriyan@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above