summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CREDITS3
-rw-r--r--Documentation/arm64/tagged-pointers.txt14
-rw-r--r--Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt10
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt10
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt (renamed from Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt)8
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt (renamed from Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt)0
-rw-r--r--Documentation/filesystems/vfs.txt14
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--Documentation/networking/bonding.txt6
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt4
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt6
-rw-r--r--MAINTAINERS20
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi262
-rw-r--r--arch/arm/boot/dts/am335x-bone.dts256
-rw-r--r--arch/arm/boot/dts/am335x-boneblack.dts17
-rw-r--r--arch/arm/boot/dts/imx27.dtsi6
-rw-r--r--arch/arm/boot/dts/imx51.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q-pinfunc.h4
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi14
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi46
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts39
-rw-r--r--arch/arm/boot/dts/omap5.dtsi7
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/crypto/Makefile14
-rw-r--r--arch/arm/crypto/aes-armv4.S6
-rw-r--r--arch/arm/crypto/aes_glue.c22
-rw-r--r--arch/arm/crypto/aes_glue.h19
-rw-r--r--arch/arm/crypto/aesbs-core.S_shipped2544
-rw-r--r--arch/arm/crypto/aesbs-glue.c434
-rw-r--r--arch/arm/crypto/bsaes-armv7.pl2467
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/uaccess.h7
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/entry-header.S8
-rw-r--r--arch/arm/mach-imx/clk-fixup-mux.c1
-rw-r--r--arch/arm/mach-imx/clk-imx27.c2
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c4
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c9
-rw-r--r--arch/arm/mach-imx/system.c11
-rw-r--r--arch/arm/mach-omap2/cclock44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c2
-rw-r--r--arch/arm/mach-omap2/gpmc.c4
-rw-r--r--arch/arm/mach-omap2/mux34xx.c2
-rw-r--r--arch/arm/mach-omap2/omap-smp.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c2
-rw-r--r--arch/arm/mach-sa1100/collie.c2
-rw-r--r--arch/arm/mach-shmobile/clock-r8a73a4.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c2
-rw-r--r--arch/arm/mach-u300/Kconfig10
-rw-r--r--arch/arm/mach-ux500/cache-l2x0.c1
-rw-r--r--arch/arm64/include/asm/hwcap.h2
-rw-r--r--arch/arm64/kernel/process.c21
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/mips/Makefile3
-rw-r--r--arch/mips/alchemy/common/usb.c3
-rw-r--r--arch/mips/bcm63xx/cpu.c4
l---------arch/mips/boot/dts/include/dt-bindings2
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c1
-rw-r--r--arch/mips/dec/prom/init.c1
-rw-r--r--arch/mips/include/asm/cpu-features.h8
-rw-r--r--arch/mips/include/asm/cpu-info.h1
-rw-r--r--arch/mips/include/asm/cpu-type.h203
-rw-r--r--arch/mips/include/asm/cpu.h38
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h4
-rw-r--r--arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h7
-rw-r--r--arch/mips/include/asm/pci.h12
-rw-r--r--arch/mips/include/asm/timex.h33
-rw-r--r--arch/mips/include/asm/vga.h3
-rw-r--r--arch/mips/kernel/cpu-probe.c58
-rw-r--r--arch/mips/kernel/idle.c3
-rw-r--r--arch/mips/kernel/time.c1
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/mm/c-octeon.c6
-rw-r--r--arch/mips/mm/c-r4k.c48
-rw-r--r--arch/mips/mm/dma-default.c13
-rw-r--r--arch/mips/mm/page.c1
-rw-r--r--arch/mips/mm/sc-mips.c3
-rw-r--r--arch/mips/mm/tlb-r4k.c1
-rw-r--r--arch/mips/mm/tlbex.c1
-rw-r--r--arch/mips/mti-malta/malta-time.c5
-rw-r--r--arch/mips/mti-sead3/sead3-time.c3
-rw-r--r--arch/mips/netlogic/xlr/fmn-config.c3
-rw-r--r--arch/mips/oprofile/common.c1
-rw-r--r--arch/mips/pci/pci-bcm1480.c1
-rw-r--r--arch/mips/sibyte/bcm1480/setup.c3
-rw-r--r--arch/mips/sibyte/sb1250/setup.c3
-rw-r--r--arch/mips/sni/setup.c3
-rw-r--r--arch/openrisc/include/asm/prom.h44
-rw-r--r--arch/powerpc/boot/Makefile4
-rw-r--r--arch/powerpc/boot/epapr-wrapper.c9
-rw-r--r--arch/powerpc/boot/epapr.c4
-rw-r--r--arch/powerpc/boot/of.c16
-rwxr-xr-xarch/powerpc/boot/wrapper9
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/irq.c100
-rw-r--r--arch/powerpc/kernel/misc_32.S25
-rw-r--r--arch/powerpc/kernel/misc_64.S10
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c21
-rw-r--r--arch/powerpc/lib/sstep.c3
-rw-r--r--arch/powerpc/platforms/pseries/smp.c26
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/mutex.h2
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/spinlock.h5
-rw-r--r--arch/tile/Kconfig2
-rw-r--r--arch/tile/gxio/iorpc_mpipe.c90
-rw-r--r--arch/tile/gxio/iorpc_mpipe_info.c15
-rw-r--r--arch/tile/gxio/iorpc_trio.c28
-rw-r--r--arch/tile/gxio/iorpc_usb_host.c8
-rw-r--r--arch/tile/gxio/usb_host.c8
-rw-r--r--arch/tile/include/arch/mpipe.h24
-rw-r--r--arch/tile/include/arch/mpipe_constants.h6
-rw-r--r--arch/tile/include/arch/mpipe_shm.h54
-rw-r--r--arch/tile/include/arch/trio_constants.h10
-rw-r--r--arch/tile/include/asm/page.h5
-rw-r--r--arch/tile/include/asm/pgtable_32.h12
-rw-r--r--arch/tile/include/asm/pgtable_64.h4
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe.h52
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe_info.h12
-rw-r--r--arch/tile/include/gxio/iorpc_trio.h28
-rw-r--r--arch/tile/include/gxio/iorpc_usb_host.h8
-rw-r--r--arch/tile/include/gxio/usb_host.h8
-rw-r--r--arch/tile/kernel/compat.c2
-rw-r--r--arch/tile/kernel/futex_64.S55
-rw-r--r--arch/tile/kernel/setup.c3
-rw-r--r--arch/tile/kernel/unaligned.c4
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/tile/mm/init.c4
-rw-r--r--arch/tile/mm/pgtable.c3
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/include/asm/xen/page.h31
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c10
-rw-r--r--arch/x86/kernel/entry_64.S15
-rw-r--r--arch/x86/kernel/microcode_amd.c1
-rw-r--r--arch/x86/kernel/reboot.c18
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/kvm/emulate.c14
-rw-r--r--arch/x86/kvm/paging_tmpl.h20
-rw-r--r--arch/x86/kvm/vmx.c13
-rw-r--r--arch/x86/platform/efi/efi.c11
-rw-r--r--arch/x86/xen/p2m.c10
-rw-r--r--arch/x86/xen/spinlock.c26
-rw-r--r--block/blk-cgroup.c25
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-exec.c4
-rw-r--r--block/cfq-iosched.c4
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c2
-rw-r--r--block/genhd.c3
-rw-r--r--crypto/Kconfig16
-rw-r--r--drivers/acpi/acpi_ipmi.c24
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/ata/sata_promise.c2
-rw-r--r--drivers/atm/he.c13
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/base/core.c14
-rw-r--r--drivers/bcma/scan.c12
-rw-r--r--drivers/block/cciss.c1
-rw-r--r--drivers/block/cpqarray.c1
-rw-r--r--drivers/block/rbd.c77
-rw-r--r--drivers/char/tpm/xen-tpmfront.c36
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/clksrc-of.c3
-rw-r--r--drivers/clocksource/em_sti.c2
-rw-r--r--drivers/clocksource/exynos_mct.c10
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c7
-rw-r--r--drivers/cpufreq/cpufreq.c34
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c2
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c7
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/drm_context.c73
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_fops.c21
-rw-r--r--drivers/gpu/drm/drm_stub.c10
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c68
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c46
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c13
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c63
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c58
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c41
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c23
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c57
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c32
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c39
-rw-r--r--drivers/gpu/drm/radeon/cik.c53
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c12
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c164
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c8
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c15
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c19
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c40
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c20
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h82
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c69
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c89
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c3
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c7
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c112
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c16
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.c44
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.h2
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/radeon/si.c21
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c43
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c17
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c8
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c1
-rw-r--r--drivers/hid/hid-core.c74
-rw-r--r--drivers/hid/hid-input.c11
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c25
-rw-r--r--drivers/hid/hid-lg2ff.c19
-rw-r--r--drivers/hid/hid-lg3ff.c29
-rw-r--r--drivers/hid/hid-lg4ff.c20
-rw-r--r--drivers/hid/hid-lgff.c17
-rw-r--r--drivers/hid/hid-logitech-dj.c10
-rw-r--r--drivers/hid/hid-multitouch.c26
-rw-r--r--drivers/hid/hid-sony.c4
-rw-r--r--drivers/hid/hid-steelseries.c5
-rw-r--r--drivers/hid/hid-zpff.c18
-rw-r--r--drivers/hv/connection.c2
-rw-r--r--drivers/hv/hv_kvp.c38
-rw-r--r--drivers/hv/hv_snapshot.c6
-rw-r--r--drivers/hv/hv_util.c71
-rw-r--r--drivers/hwmon/applesmc.c11
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c26
-rw-r--r--drivers/i2c/busses/i2c-ismt.c3
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c16
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/iio/accel/bma180.c4
-rw-r--r--drivers/iio/adc/at91_adc.c11
-rw-r--r--drivers/iio/buffer_cb.c2
-rw-r--r--drivers/iio/dac/mcp4725.c12
-rw-r--r--drivers/iio/iio_core.h4
-rw-r--r--drivers/iio/industrialio-buffer.c30
-rw-r--r--drivers/iio/industrialio-core.c31
-rw-r--r--drivers/iio/industrialio-event.c20
-rw-r--r--drivers/iio/temperature/tmp006.c6
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c4
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c4
-rw-r--r--drivers/isdn/hisax/avm_pci.c4
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/diva.c4
-rw-r--r--drivers/isdn/hisax/elsa.c2
-rw-r--r--drivers/isdn/hisax/elsa_ser.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/hisax/hscx_irq.c4
-rw-r--r--drivers/isdn/hisax/icc.c4
-rw-r--r--drivers/isdn/hisax/ipacx.c8
-rw-r--r--drivers/isdn/hisax/isac.c4
-rw-r--r--drivers/isdn/hisax/isar.c6
-rw-r--r--drivers/isdn/hisax/jade.c18
-rw-r--r--drivers/isdn/hisax/jade_irq.c4
-rw-r--r--drivers/isdn/hisax/l3_1tr6.c50
-rw-r--r--drivers/isdn/hisax/netjet.c2
-rw-r--r--drivers/isdn/hisax/q931.c6
-rw-r--r--drivers/isdn/hisax/w6692.c8
-rw-r--r--drivers/mailbox/mailbox-omap2.c1
-rw-r--r--drivers/md/bcache/bcache.h7
-rw-r--r--drivers/md/bcache/bset.c39
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/journal.c33
-rw-r--r--drivers/md/bcache/request.c15
-rw-r--r--drivers/md/bcache/sysfs.c9
-rw-r--r--drivers/md/bcache/util.c11
-rw-r--r--drivers/md/bcache/util.h12
-rw-r--r--drivers/md/bcache/writeback.c42
-rw-r--r--drivers/md/dm-io.c7
-rw-r--r--drivers/md/dm-mpath.c18
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c5
-rw-r--r--drivers/md/dm-stats.c23
-rw-r--r--drivers/md/dm-thin.c14
-rw-r--r--drivers/md/dm.c71
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/misc/mei/amthif.c1
-rw-r--r--drivers/misc/mei/bus.c5
-rw-r--r--drivers/misc/mei/client.h6
-rw-r--r--drivers/misc/mei/hbm.c10
-rw-r--r--drivers/misc/mei/init.c3
-rw-r--r--drivers/misc/mei/main.c11
-rw-r--r--drivers/misc/mei/mei_dev.h6
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c7
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_alb.h9
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/bonding/bond_sysfs.c39
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c4
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c44
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c6
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c3
-rw-r--r--drivers/net/ethernet/marvell/skge.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c2
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c3
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c3
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/sfc/Kconfig2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c58
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c2
-rw-r--r--drivers/net/ethernet/sfc/nic.h3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/irda/mcs7780.c40
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/netconsole.c5
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/tun.c11
-rw-r--r--drivers/net/usb/cdc_ether.c115
-rw-r--r--drivers/net/vxlan.c40
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig4
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c28
-rw-r--r--drivers/net/wireless/cw1200/fwio.c2
-rw-r--r--drivers/net/wireless/cw1200/hwbus.h1
-rw-r--r--drivers/net/wireless/cw1200/hwio.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c11
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c15
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c28
-rw-r--r--drivers/net/xen-netback/netback.c94
-rw-r--r--drivers/net/xen-netback/xenbus.c17
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c3
-rw-r--r--drivers/staging/comedi/Kconfig33
-rw-r--r--drivers/staging/dgap/dgap_driver.c17
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c4
-rw-r--r--drivers/staging/iio/Kconfig2
-rw-r--r--drivers/staging/iio/light/isl29018.c1
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c2
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c2
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c11
-rw-r--r--drivers/staging/line6/toneport.c10
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c2
-rw-r--r--drivers/staging/lustre/lustre/Kconfig4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/workitem.c2
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c6
-rw-r--r--drivers/staging/octeon-usb/cvmx-usb.c2
-rw-r--r--drivers/staging/octeon/ethernet-mem.c7
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c2
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c2
-rw-r--r--drivers/staging/vt6656/card.c4
-rw-r--r--drivers/staging/vt6656/iwctl.c3
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/staging/vt6656/rxtx.c2
-rw-r--r--drivers/staging/xillybus/xillybus_core.c2
-rw-r--r--drivers/staging/zram/zram_drv.c1
-rw-r--r--drivers/tty/n_tty.c3
-rw-r--r--drivers/tty/serial/pch_uart.c13
-rw-r--r--drivers/tty/serial/serial-tegra.c4
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/tty/tty_ioctl.c3
-rw-r--r--drivers/usb/chipidea/Kconfig2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c7
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/devio.c16
-rw-r--r--drivers/usb/core/hub.c3
-rw-r--r--drivers/usb/dwc3/Kconfig1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/cdc2.c19
-rw-r--r--drivers/usb/gadget/dummy_hcd.c7
-rw-r--r--drivers/usb/gadget/f_ecm.c2
-rw-r--r--drivers/usb/gadget/f_eem.c2
-rw-r--r--drivers/usb/gadget/f_fs.c60
-rw-r--r--drivers/usb/gadget/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/fotg210-udc.c2
-rw-r--r--drivers/usb/gadget/fusb300_udc.c2
-rw-r--r--drivers/usb/gadget/multi.c8
-rw-r--r--drivers/usb/gadget/mv_u3d_core.c3
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c13
-rw-r--r--drivers/usb/host/ehci-fsl.c19
-rw-r--r--drivers/usb/host/ehci-grlib.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-mv.c2
-rw-r--r--drivers/usb/host/ehci-octeon.c2
-rw-r--r--drivers/usb/host/ehci-pci.c2
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c2
-rw-r--r--drivers/usb/host/ehci-ps3.c2
-rw-r--r--drivers/usb/host/ehci-q.c5
-rw-r--r--drivers/usb/host/ehci-sead3.c2
-rw-r--r--drivers/usb/host/ehci-sh.c2
-rw-r--r--drivers/usb/host/ehci-tilegx.c2
-rw-r--r--drivers/usb/host/ehci-w90x900.c2
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c6
-rw-r--r--drivers/usb/host/imx21-hcd.c8
-rw-r--r--drivers/usb/host/ohci-hcd.c22
-rw-r--r--drivers/usb/host/ohci-q.c26
-rw-r--r--drivers/usb/host/uhci-pci.c2
-rw-r--r--drivers/usb/host/uhci-q.c12
-rw-r--r--drivers/usb/host/xhci-hub.c47
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-ring.c37
-rw-r--r--drivers/usb/host/xhci.c25
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/phy/phy-omap-usb3.c2
-rw-r--r--drivers/usb/serial/Kconfig2
-rw-r--r--drivers/usb/serial/pl2303.c43
-rw-r--r--drivers/vhost/scsi.c43
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.c17
-rw-r--r--drivers/video/mxsfb.c1
-rw-r--r--drivers/video/neofb.c4
-rw-r--r--drivers/video/of_display_timing.c6
-rw-r--r--drivers/video/omap2/displays-new/Kconfig1
-rw-r--r--drivers/video/omap2/displays-new/connector-analog-tv.c2
-rw-r--r--drivers/video/omap2/displays-new/connector-dvi.c2
-rw-r--r--drivers/video/omap2/displays-new/connector-hdmi.c2
-rw-r--r--drivers/video/omap2/dss/dispc.c1
-rw-r--r--drivers/video/s3fb.c9
-rw-r--r--drivers/xen/balloon.c23
-rw-r--r--fs/9p/v9fs.c7
-rw-r--r--fs/9p/vfs_inode_dotl.c8
-rw-r--r--fs/autofs4/waitq.c13
-rw-r--r--fs/bio-integrity.c2
-rw-r--r--fs/bio.c4
-rw-r--r--fs/btrfs/btrfs_inode.h5
-rw-r--r--fs/btrfs/ctree.c7
-rw-r--r--fs/btrfs/ctree.h17
-rw-r--r--fs/btrfs/dev-replace.c4
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c57
-rw-r--r--fs/btrfs/extent_io.c8
-rw-r--r--fs/btrfs/file.c4
-rw-r--r--fs/btrfs/free-space-cache.c67
-rw-r--r--fs/btrfs/free-space-cache.h5
-rw-r--r--fs/btrfs/inode.c16
-rw-r--r--fs/btrfs/ioctl.c80
-rw-r--r--fs/btrfs/ordered-data.c24
-rw-r--r--fs/btrfs/ordered-data.h5
-rw-r--r--fs/btrfs/relocation.c43
-rw-r--r--fs/btrfs/scrub.c112
-rw-r--r--fs/btrfs/super.c21
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/btrfs/tree-log.c52
-rw-r--r--fs/btrfs/volumes.c7
-rw-r--r--fs/cachefiles/namei.c2
-rw-r--r--fs/cachefiles/xattr.c29
-rw-r--r--fs/cifs/dir.c1
-rw-r--r--fs/fscache/cookie.c3
-rw-r--r--fs/gfs2/inode.c4
-rw-r--r--fs/namei.c34
-rw-r--r--fs/nfs/dir.c3
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/open.c21
-rw-r--r--fs/pstore/platform.c29
-rw-r--r--fs/reiserfs/journal.c67
-rw-r--r--fs/udf/ialloc.c16
-rw-r--r--fs/udf/super.c64
-rw-r--r--fs/udf/udf_sb.h2
-rw-r--r--fs/xfs/xfs_buf_item.c1
-rw-r--r--fs/xfs/xfs_da_btree.c5
-rw-r--r--fs/xfs/xfs_fs.h2
-rw-r--r--fs/xfs/xfs_icache.c9
-rw-r--r--fs/xfs/xfs_log_recover.c73
-rw-r--r--include/drm/drmP.h7
-rw-r--r--include/drm/drm_pciids.h3
-rw-r--r--include/linux/blkdev.h11
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/memcontrol.h55
-rw-r--r--include/linux/mutex.h6
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/linux/netfilter/ipset/ip_set.h6
-rw-r--r--include/linux/of_irq.h20
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/linux/timex.h1
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h2
-rw-r--r--include/trace/events/block.h6
-rw-r--r--include/trace/events/btrfs.h1
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/perf_event.h17
-rw-r--r--ipc/msg.c19
-rw-r--r--ipc/sem.c34
-rw-r--r--ipc/shm.c17
-rw-r--r--ipc/util.c32
-rw-r--r--ipc/util.h10
-rw-r--r--kernel/audit.c5
-rw-r--r--kernel/context_tracking.c12
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/params.c6
-rw-r--r--kernel/reboot.c9
-rw-r--r--kernel/sched/debug.c6
-rw-r--r--kernel/sched/fair.c23
-rw-r--r--kernel/sched/stats.h5
-rw-r--r--kernel/time/ntp.c6
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/watchdog.c60
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/lockref.c23
-rw-r--r--mm/memcontrol.c560
-rw-r--r--mm/mlock.c1
-rw-r--r--mm/vmscan.c83
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h8
-rw-r--r--net/bridge/br_stp.c23
-rw-r--r--net/bridge/br_stp_if.c12
-rw-r--r--net/ceph/osd_client.c11
-rw-r--r--net/core/netpoll.c11
-rw-r--r--net/dccp/ipv6.c1
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inetpeer.c4
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/tcp_metrics.c4
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c2
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/netfilter/nf_nat_proto_icmpv6.c4
-rw-r--r--net/netfilter/ipset/ip_set_core.c5
-rw-r--r--net/netfilter/ipset/ip_set_getport.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h28
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c2
-rw-r--r--net/netfilter/nfnetlink_queue_core.c2
-rw-r--r--net/sctp/input.c3
-rw-r--r--net/sctp/ipv6.c44
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c11
-rwxr-xr-xscripts/checkpatch.pl4
-rw-r--r--sound/core/compress_offload.c15
-rw-r--r--sound/pci/hda/patch_cirrus.c72
-rw-r--r--sound/pci/hda/patch_hdmi.c47
-rw-r--r--sound/pci/hda/patch_realtek.c16
-rw-r--r--tools/lib/lk/debugfs.c1
-rw-r--r--tools/perf/arch/x86/util/tsc.c6
-rw-r--r--tools/perf/builtin-inject.c2
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-report.c5
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/builtin-trace.c18
-rw-r--r--tools/perf/config/Makefile5
-rw-r--r--tools/perf/config/feature-tests.mak10
-rw-r--r--tools/perf/util/annotate.c2
-rw-r--r--tools/perf/util/dwarf-aux.c19
-rw-r--r--tools/perf/util/dwarf-aux.h3
-rw-r--r--tools/perf/util/header.c41
-rw-r--r--tools/perf/util/hist.c2
-rw-r--r--tools/perf/util/machine.c2
-rw-r--r--tools/perf/util/probe-finder.c89
-rw-r--r--tools/perf/util/probe-finder.h3
-rw-r--r--tools/perf/util/session.c9
-rw-r--r--tools/perf/util/session.h4
-rw-r--r--tools/perf/util/symbol-elf.c16
-rw-r--r--tools/perf/util/trace-event-parse.c2
-rw-r--r--virt/kvm/async_pf.c5
-rw-r--r--virt/kvm/kvm_main.c14
674 files changed, 11905 insertions, 3537 deletions
diff --git a/CREDITS b/CREDITS
index 9416a9a8b95e..0640e1650483 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2808,8 +2808,7 @@ S: Ottawa, Ontario
S: Canada K2P 0X8
N: Mikael Pettersson
-E: mikpe@it.uu.se
-W: http://user.it.uu.se/~mikpe/linux/
+E: mikpelinux@gmail.com
D: Miscellaneous fixes
N: Reed H. Petty
diff --git a/Documentation/arm64/tagged-pointers.txt b/Documentation/arm64/tagged-pointers.txt
index 264e9841563a..d9995f1f51b3 100644
--- a/Documentation/arm64/tagged-pointers.txt
+++ b/Documentation/arm64/tagged-pointers.txt
@@ -18,17 +18,17 @@ this byte for application use, with the following caveats:
parameters containing user virtual addresses *must* have
their top byte cleared before trapping to the kernel.
- (2) Tags are not guaranteed to be preserved when delivering
- signals. This means that signal handlers in applications
- making use of tags cannot rely on the tag information for
- user virtual addresses being maintained for fields inside
- siginfo_t. One exception to this rule is for signals raised
- in response to debug exceptions, where the tag information
+ (2) Non-zero tags are not preserved when delivering signals.
+ This means that signal handlers in applications making use
+ of tags cannot rely on the tag information for user virtual
+ addresses being maintained for fields inside siginfo_t.
+ One exception to this rule is for signals raised in response
+ to watchpoint debug exceptions, where the tag information
will be preserved.
(3) Special care should be taken when using tagged pointers,
since it is likely that C compilers will not hazard two
- addresses differing only in the upper bits.
+ virtual addresses differing only in the upper byte.
The architecture prevents the use of a tagged PC, so the upper byte will
be set to a sign-extension of bit 55 on exception return.
diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
index 6d1c0988cfc7..c67b975c8906 100644
--- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
@@ -1,11 +1,11 @@
-* Samsung Exynos specific extensions to the Synopsis Designware Mobile
+* Samsung Exynos specific extensions to the Synopsys Designware Mobile
Storage Host Controller
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsis dw mshc controller properties described
-by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific
-extensions to the Synopsis Designware Mobile Storage Host Controller.
+differences between the core Synopsys dw mshc controller properties described
+by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific
+extensions to the Synopsys Designware Mobile Storage Host Controller.
Required Properties:
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index 8a3d91d47b6a..c559f3f36309 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -1,11 +1,11 @@
-* Rockchip specific extensions to the Synopsis Designware Mobile
+* Rockchip specific extensions to the Synopsys Designware Mobile
Storage Host Controller
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsis dw mshc controller properties described
-by synopsis-dw-mshc.txt and the properties used by the Rockchip specific
-extensions to the Synopsis Designware Mobile Storage Host Controller.
+differences between the core Synopsys dw mshc controller properties described
+by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
+extensions to the Synopsys Designware Mobile Storage Host Controller.
Required Properties:
diff --git a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index cdcebea9c6f5..066a78b034ca 100644
--- a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -1,14 +1,14 @@
-* Synopsis Designware Mobile Storage Host Controller
+* Synopsys Designware Mobile Storage Host Controller
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
differences between the core mmc properties described by mmc.txt and the
-properties used by the Synopsis Designware Mobile Storage Host Controller.
+properties used by the Synopsys Designware Mobile Storage Host Controller.
Required Properties:
* compatible: should be
- - snps,dw-mshc: for controllers compliant with synopsis dw-mshc.
+ - snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
* #address-cells: should be 1.
* #size-cells: should be 0.
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index eabcb4b5db6e..e216af356847 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -1,4 +1,4 @@
-* Synopsis Designware PCIe interface
+* Synopsys Designware PCIe interface
Required properties:
- compatible: should contain "snps,dw-pcie" to identify the
diff --git a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
index c5e032c85bf9..c5e032c85bf9 100644
--- a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt
+++ b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index f93a88250a44..deb48b5fd883 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -359,11 +359,9 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
void (*update_time)(struct inode *, struct timespec *, int);
- int (*atomic_open)(struct inode *, struct dentry *,
+ int (*atomic_open)(struct inode *, struct dentry *, struct file *,
+ unsigned open_flag, umode_t create_mode, int *opened);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
-} ____cacheline_aligned;
- struct file *, unsigned open_flag,
- umode_t create_mode, int *opened);
};
Again, all methods are called without any locks being held, unless
@@ -470,9 +468,11 @@ otherwise noted.
method the filesystem can look up, possibly create and open the file in
one atomic operation. If it cannot perform this (e.g. the file type
turned out to be wrong) it may signal this by returning 1 instead of
- usual 0 or -ve . This method is only called if the last
- component is negative or needs lookup. Cached positive dentries are
- still handled by f_op->open().
+ usual 0 or -ve . This method is only called if the last component is
+ negative or needs lookup. Cached positive dentries are still handled by
+ f_op->open(). If the file was created, the FILE_CREATED flag should be
+ set in "opened". In case of O_EXCL the method must only succeed if the
+ file didn't exist and hence FILE_CREATED shall always be set on success.
tmpfile: called in the end of O_TMPFILE open(). Optional, equivalent to
atomically creating, opening and unlinking a file in given directory.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1a036cd972fb..539a23631990 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3485,6 +3485,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the unplug protocol
never -- do not unplug even if version check succeeds
+ xen_nopvspin [X86,XEN]
+ Disables the ticketlock slowpath using Xen PV
+ optimizations.
+
xirc2ps_cs= [NET,PCMCIA]
Format:
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 87bbcfee2e06..9b28e714831a 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -1362,6 +1362,12 @@ To add ARP targets:
To remove an ARP target:
# echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target
+To configure the interval between learning packet transmits:
+# echo 12 > /sys/class/net/bond0/bonding/lp_interval
+ NOTE: the lp_inteval is the number of seconds between instances where
+the bonding driver sends learning packets to each slaves peer switch. The
+default interval is 1 second.
+
Example Configuration
---------------------
We begin with the same example that is shown in section 3.3,
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index d529e02d928d..f14f49304222 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -66,9 +66,7 @@ rq->cfs.load value, which is the sum of the weights of the tasks queued on the
runqueue.
CFS maintains a time-ordered rbtree, where all runnable tasks are sorted by the
-p->se.vruntime key (there is a subtraction using rq->cfs.min_vruntime to
-account for possible wraparounds). CFS picks the "leftmost" task from this
-tree and sticks to it.
+p->se.vruntime key. CFS picks the "leftmost" task from this tree and sticks to it.
As the system progresses forwards, the executed tasks are put into the tree
more and more to the right --- slowly but surely giving a chance for every task
to become the "leftmost task" and thus get on the CPU within a deterministic
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index a46ddb85e83a..f911e3656209 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -296,6 +296,12 @@ Cirrus Logic CS4206/4207
imac27 IMac 27 Inch
auto BIOS setup (default)
+Cirrus Logic CS4208
+===================
+ mba6 MacBook Air 6,1 and 6,2
+ gpio0 Enable GPIO 0 amp
+ auto BIOS setup (default)
+
VIA VT17xx/VT18xx/VT20xx
========================
auto BIOS setup (default)
diff --git a/MAINTAINERS b/MAINTAINERS
index e61c2e83fc2b..284969fa2896 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1812,7 +1812,8 @@ S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/
BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
-M: Christian Daudt <csd@broadcom.com>
+M: Christian Daudt <bcm@fixthebug.org>
+L: bcm-kernel-feedback-list@broadcom.com
T: git git://git.github.com/broadcom/bcm11351
S: Maintained
F: arch/arm/mach-bcm/
@@ -2639,6 +2640,18 @@ F: include/linux/device-mapper.h
F: include/linux/dm-*.h
F: include/uapi/linux/dm-*.h
+DIGI NEO AND CLASSIC PCI PRODUCTS
+M: Lidza Louina <lidza.louina@gmail.com>
+L: driverdev-devel@linuxdriverproject.org
+S: Maintained
+F: drivers/staging/dgnc/
+
+DIGI EPCA PCI PRODUCTS
+M: Lidza Louina <lidza.louina@gmail.com>
+L: driverdev-devel@linuxdriverproject.org
+S: Maintained
+F: drivers/staging/dgap/
+
DIOLAN U2C-12 I2C DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-i2c@vger.kernel.org
@@ -6595,7 +6608,7 @@ S: Obsolete
F: drivers/net/wireless/prism54/
PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
-M: Mikael Pettersson <mikpe@it.uu.se>
+M: Mikael Pettersson <mikpelinux@gmail.com>
L: linux-ide@vger.kernel.org
S: Maintained
F: drivers/ata/sata_promise.*
@@ -8724,9 +8737,8 @@ F: Documentation/hid/hiddev.txt
F: drivers/hid/usbhid/
USB/IP DRIVERS
-M: Matt Mooney <mfm@muteddisk.com>
L: linux-usb@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/staging/usbip/
USB ISP116X DRIVER
diff --git a/Makefile b/Makefile
index de004ceb6b5e..2ae108d4f2af 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 12
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
NAME = One Giant Leap for Frogkind
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 1feb169274fe..af2cc6eabcc7 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP
config HAVE_ARCH_JUMP_LABEL
bool
-config HAVE_ARCH_MUTEX_CPU_RELAX
- bool
-
config HAVE_RCU_TABLE_FREE
bool
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b3135378ac39..22efc5d9c952 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2243,8 +2243,7 @@ config NEON
config KERNEL_MODE_NEON
bool "Support for NEON in kernel mode"
- default n
- depends on NEON
+ depends on NEON && AEABI
help
Say Y to include support for NEON in kernel mode.
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index cc0f1fb61753..e95af3f5433b 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -183,6 +183,7 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
am335x-evm.dtb \
am335x-evmsk.dtb \
am335x-bone.dtb \
+ am335x-boneblack.dtb \
am3517-evm.dtb \
am3517_mt_ventoux.dtb \
am43x-epos-evm.dtb
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
new file mode 100644
index 000000000000..2f66deda9f5c
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+ model = "TI AM335x BeagleBone";
+ compatible = "ti,am335x-bone", "ti,am33xx";
+
+ cpus {
+ cpu@0 {
+ cpu0-supply = <&dcdc2_reg>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256 MB */
+ };
+
+ am33xx_pinmux: pinmux@44e10800 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&clkout2_pin>;
+
+ user_leds_s0: user_leds_s0 {
+ pinctrl-single,pins = <
+ 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
+ 0x58 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a6.gpio1_22 */
+ 0x5c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a7.gpio1_23 */
+ 0x60 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a8.gpio1_24 */
+ >;
+ };
+
+ i2c0_pins: pinmux_i2c0_pins {
+ pinctrl-single,pins = <
+ 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
+ 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ >;
+ };
+
+ uart0_pins: pinmux_uart0_pins {
+ pinctrl-single,pins = <
+ 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
+ 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ >;
+ };
+
+ clkout2_pin: pinmux_clkout2_pin {
+ pinctrl-single,pins = <
+ 0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
+ >;
+ };
+
+ cpsw_default: cpsw_default {
+ pinctrl-single,pins = <
+ /* Slave 1 */
+ 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxerr.mii1_rxerr */
+ 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txen.mii1_txen */
+ 0x118 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxdv.mii1_rxdv */
+ 0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd3.mii1_txd3 */
+ 0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd2.mii1_txd2 */
+ 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd1.mii1_txd1 */
+ 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd0.mii1_txd0 */
+ 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_txclk.mii1_txclk */
+ 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxclk.mii1_rxclk */
+ 0x134 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd3.mii1_rxd3 */
+ 0x138 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd2.mii1_rxd2 */
+ 0x13c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd1.mii1_rxd1 */
+ 0x140 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd0.mii1_rxd0 */
+ >;
+ };
+
+ cpsw_sleep: cpsw_sleep {
+ pinctrl-single,pins = <
+ /* Slave 1 reset value */
+ 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+
+ davinci_mdio_default: davinci_mdio_default {
+ pinctrl-single,pins = <
+ /* MDIO */
+ 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
+ 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ >;
+ };
+
+ davinci_mdio_sleep: davinci_mdio_sleep {
+ pinctrl-single,pins = <
+ /* MDIO reset value */
+ 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+ >;
+ };
+ };
+
+ ocp {
+ uart0: serial@44e09000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+
+ status = "okay";
+ };
+
+ musb: usb@47400000 {
+ status = "okay";
+
+ control@44e10000 {
+ status = "okay";
+ };
+
+ usb-phy@47401300 {
+ status = "okay";
+ };
+
+ usb-phy@47401b00 {
+ status = "okay";
+ };
+
+ usb@47401000 {
+ status = "okay";
+ };
+
+ usb@47401800 {
+ status = "okay";
+ dr_mode = "host";
+ };
+
+ dma-controller@07402000 {
+ status = "okay";
+ };
+ };
+
+ i2c0: i2c@44e0b000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+
+ status = "okay";
+ clock-frequency = <400000>;
+
+ tps: tps@24 {
+ reg = <0x24>;
+ };
+
+ };
+ };
+
+ leds {
+ pinctrl-names = "default";
+ pinctrl-0 = <&user_leds_s0>;
+
+ compatible = "gpio-leds";
+
+ led@2 {
+ label = "beaglebone:green:heartbeat";
+ gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ led@3 {
+ label = "beaglebone:green:mmc0";
+ gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ default-state = "off";
+ };
+
+ led@4 {
+ label = "beaglebone:green:usr2";
+ gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led@5 {
+ label = "beaglebone:green:usr3";
+ gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+};
+
+/include/ "tps65217.dtsi"
+
+&tps {
+ regulators {
+ dcdc1_reg: regulator@0 {
+ regulator-always-on;
+ };
+
+ dcdc2_reg: regulator@1 {
+ /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
+ regulator-name = "vdd_mpu";
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <1325000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ dcdc3_reg: regulator@2 {
+ /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
+ regulator-name = "vdd_core";
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1_reg: regulator@3 {
+ regulator-always-on;
+ };
+
+ ldo2_reg: regulator@4 {
+ regulator-always-on;
+ };
+
+ ldo3_reg: regulator@5 {
+ regulator-always-on;
+ };
+
+ ldo4_reg: regulator@6 {
+ regulator-always-on;
+ };
+ };
+};
+
+&cpsw_emac0 {
+ phy_id = <&davinci_mdio>, <0>;
+ phy-mode = "mii";
+};
+
+&cpsw_emac1 {
+ phy_id = <&davinci_mdio>, <1>;
+ phy-mode = "mii";
+};
+
+&mac {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&cpsw_default>;
+ pinctrl-1 = <&cpsw_sleep>;
+
+};
+
+&davinci_mdio {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&davinci_mdio_default>;
+ pinctrl-1 = <&davinci_mdio_sleep>;
+};
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts
index d318987d44a1..7993c489982c 100644
--- a/arch/arm/boot/dts/am335x-bone.dts
+++ b/arch/arm/boot/dts/am335x-bone.dts
@@ -8,258 +8,4 @@
/dts-v1/;
#include "am33xx.dtsi"
-
-/ {
- model = "TI AM335x BeagleBone";
- compatible = "ti,am335x-bone", "ti,am33xx";
-
- cpus {
- cpu@0 {
- cpu0-supply = <&dcdc2_reg>;
- };
- };
-
- memory {
- device_type = "memory";
- reg = <0x80000000 0x10000000>; /* 256 MB */
- };
-
- am33xx_pinmux: pinmux@44e10800 {
- pinctrl-names = "default";
- pinctrl-0 = <&clkout2_pin>;
-
- user_leds_s0: user_leds_s0 {
- pinctrl-single,pins = <
- 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
- 0x58 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a6.gpio1_22 */
- 0x5c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a7.gpio1_23 */
- 0x60 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a8.gpio1_24 */
- >;
- };
-
- i2c0_pins: pinmux_i2c0_pins {
- pinctrl-single,pins = <
- 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
- >;
- };
-
- uart0_pins: pinmux_uart0_pins {
- pinctrl-single,pins = <
- 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
- >;
- };
-
- clkout2_pin: pinmux_clkout2_pin {
- pinctrl-single,pins = <
- 0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
- >;
- };
-
- cpsw_default: cpsw_default {
- pinctrl-single,pins = <
- /* Slave 1 */
- 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxerr.mii1_rxerr */
- 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txen.mii1_txen */
- 0x118 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxdv.mii1_rxdv */
- 0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd3.mii1_txd3 */
- 0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd2.mii1_txd2 */
- 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd1.mii1_txd1 */
- 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd0.mii1_txd0 */
- 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_txclk.mii1_txclk */
- 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxclk.mii1_rxclk */
- 0x134 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd3.mii1_rxd3 */
- 0x138 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd2.mii1_rxd2 */
- 0x13c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd1.mii1_rxd1 */
- 0x140 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd0.mii1_rxd0 */
- >;
- };
-
- cpsw_sleep: cpsw_sleep {
- pinctrl-single,pins = <
- /* Slave 1 reset value */
- 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- >;
- };
-
- davinci_mdio_default: davinci_mdio_default {
- pinctrl-single,pins = <
- /* MDIO */
- 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
- >;
- };
-
- davinci_mdio_sleep: davinci_mdio_sleep {
- pinctrl-single,pins = <
- /* MDIO reset value */
- 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
- 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
- >;
- };
- };
-
- ocp {
- uart0: serial@44e09000 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins>;
-
- status = "okay";
- };
-
- musb: usb@47400000 {
- status = "okay";
-
- control@44e10000 {
- status = "okay";
- };
-
- usb-phy@47401300 {
- status = "okay";
- };
-
- usb-phy@47401b00 {
- status = "okay";
- };
-
- usb@47401000 {
- status = "okay";
- };
-
- usb@47401800 {
- status = "okay";
- dr_mode = "host";
- };
-
- dma-controller@07402000 {
- status = "okay";
- };
- };
-
- i2c0: i2c@44e0b000 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins>;
-
- status = "okay";
- clock-frequency = <400000>;
-
- tps: tps@24 {
- reg = <0x24>;
- };
-
- };
- };
-
- leds {
- pinctrl-names = "default";
- pinctrl-0 = <&user_leds_s0>;
-
- compatible = "gpio-leds";
-
- led@2 {
- label = "beaglebone:green:heartbeat";
- gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "heartbeat";
- default-state = "off";
- };
-
- led@3 {
- label = "beaglebone:green:mmc0";
- gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "mmc0";
- default-state = "off";
- };
-
- led@4 {
- label = "beaglebone:green:usr2";
- gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
- default-state = "off";
- };
-
- led@5 {
- label = "beaglebone:green:usr3";
- gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
- default-state = "off";
- };
- };
-};
-
-/include/ "tps65217.dtsi"
-
-&tps {
- regulators {
- dcdc1_reg: regulator@0 {
- regulator-always-on;
- };
-
- dcdc2_reg: regulator@1 {
- /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
- regulator-name = "vdd_mpu";
- regulator-min-microvolt = <925000>;
- regulator-max-microvolt = <1325000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- dcdc3_reg: regulator@2 {
- /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
- regulator-name = "vdd_core";
- regulator-min-microvolt = <925000>;
- regulator-max-microvolt = <1150000>;
- regulator-boot-on;
- regulator-always-on;
- };
-
- ldo1_reg: regulator@3 {
- regulator-always-on;
- };
-
- ldo2_reg: regulator@4 {
- regulator-always-on;
- };
-
- ldo3_reg: regulator@5 {
- regulator-always-on;
- };
-
- ldo4_reg: regulator@6 {
- regulator-always-on;
- };
- };
-};
-
-&cpsw_emac0 {
- phy_id = <&davinci_mdio>, <0>;
- phy-mode = "mii";
-};
-
-&cpsw_emac1 {
- phy_id = <&davinci_mdio>, <1>;
- phy-mode = "mii";
-};
-
-&mac {
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&cpsw_default>;
- pinctrl-1 = <&cpsw_sleep>;
-
-};
-
-&davinci_mdio {
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&davinci_mdio_default>;
- pinctrl-1 = <&davinci_mdio_sleep>;
-};
+#include "am335x-bone-common.dtsi"
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
new file mode 100644
index 000000000000..197cadf72d2c
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "am33xx.dtsi"
+#include "am335x-bone-common.dtsi"
+
+&ldo3_reg {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+};
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index c037c223619a..b7a1c6d950b9 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -187,7 +187,7 @@
compatible = "fsl,imx27-cspi";
reg = <0x1000e000 0x1000>;
interrupts = <16>;
- clocks = <&clks 53>, <&clks 53>;
+ clocks = <&clks 53>, <&clks 60>;
clock-names = "ipg", "per";
status = "disabled";
};
@@ -198,7 +198,7 @@
compatible = "fsl,imx27-cspi";
reg = <0x1000f000 0x1000>;
interrupts = <15>;
- clocks = <&clks 52>, <&clks 52>;
+ clocks = <&clks 52>, <&clks 60>;
clock-names = "ipg", "per";
status = "disabled";
};
@@ -309,7 +309,7 @@
compatible = "fsl,imx27-cspi";
reg = <0x10017000 0x1000>;
interrupts = <6>;
- clocks = <&clks 51>, <&clks 51>;
+ clocks = <&clks 51>, <&clks 60>;
clock-names = "ipg", "per";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index a85abb424c34..54cee6517902 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -474,7 +474,7 @@
compatible = "fsl,imx51-pata", "fsl,imx27-pata";
reg = <0x83fe0000 0x4000>;
interrupts = <70>;
- clocks = <&clks 161>;
+ clocks = <&clks 172>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6q-pinfunc.h b/arch/arm/boot/dts/imx6q-pinfunc.h
index c0e38a45e4bb..9bbe82bdee41 100644
--- a/arch/arm/boot/dts/imx6q-pinfunc.h
+++ b/arch/arm/boot/dts/imx6q-pinfunc.h
@@ -207,8 +207,8 @@
#define MX6QDL_PAD_EIM_D29__ECSPI4_SS0 0x0c8 0x3dc 0x824 0x2 0x1
#define MX6QDL_PAD_EIM_D29__UART2_RTS_B 0x0c8 0x3dc 0x924 0x4 0x1
#define MX6QDL_PAD_EIM_D29__UART2_CTS_B 0x0c8 0x3dc 0x000 0x4 0x0
-#define MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B 0x0c4 0x3dc 0x000 0x4 0x0
-#define MX6QDL_PAD_EIM_D29__UART2_DTE_CTS_B 0x0c4 0x3dc 0x924 0x4 0x1
+#define MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B 0x0c8 0x3dc 0x000 0x4 0x0
+#define MX6QDL_PAD_EIM_D29__UART2_DTE_CTS_B 0x0c8 0x3dc 0x924 0x4 0x1
#define MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x0c8 0x3dc 0x000 0x5 0x0
#define MX6QDL_PAD_EIM_D29__IPU2_CSI1_VSYNC 0x0c8 0x3dc 0x8e4 0x6 0x0
#define MX6QDL_PAD_EIM_D29__IPU1_DI0_PIN14 0x0c8 0x3dc 0x000 0x7 0x0
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index afdb16417d4e..0c514dc8460c 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -11,7 +11,7 @@
/ {
model = "TI OMAP3 BeagleBoard xM";
- compatible = "ti,omap3-beagle-xm, ti,omap3-beagle", "ti,omap3";
+ compatible = "ti,omap3-beagle-xm", "ti,omap3-beagle", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index bc48b114eae6..2326d11462a5 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -48,6 +48,15 @@
>;
};
+ mcbsp2_pins: pinmux_mcbsp2_pins {
+ pinctrl-single,pins = <
+ 0x10c (PIN_INPUT | MUX_MODE0) /* mcbsp2_fsx.mcbsp2_fsx */
+ 0x10e (PIN_INPUT | MUX_MODE0) /* mcbsp2_clkx.mcbsp2_clkx */
+ 0x110 (PIN_INPUT | MUX_MODE0) /* mcbsp2_dr.mcbsp2.dr */
+ 0x112 (PIN_OUTPUT | MUX_MODE0) /* mcbsp2_dx.mcbsp2_dx */
+ >;
+ };
+
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
0x114 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */
@@ -93,6 +102,11 @@
clock-frequency = <400000>;
};
+&mcbsp2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp2_pins>;
+};
+
&mmc1 {
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index faa95b5b242e..814ab67c8c29 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -107,6 +107,19 @@
*/
clock-frequency = <19200000>;
};
+
+ /* regulator for wl12xx on sdio5 */
+ wl12xx_vmmc: wl12xx_vmmc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wl12xx_gpio>;
+ compatible = "regulator-fixed";
+ regulator-name = "vwl1271";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&gpio2 11 0>;
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
};
&omap4_pmx_wkup {
@@ -235,6 +248,33 @@
0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */
>;
};
+
+ /*
+ * wl12xx GPIO outputs for WLAN_EN, BT_EN, FM_EN, BT_WAKEUP
+ * REVISIT: Are the pull-ups needed for GPIO 48 and 49?
+ */
+ wl12xx_gpio: pinmux_wl12xx_gpio {
+ pinctrl-single,pins = <
+ 0x26 (PIN_OUTPUT | MUX_MODE3) /* gpmc_a19.gpio_43 */
+ 0x2c (PIN_OUTPUT | MUX_MODE3) /* gpmc_a22.gpio_46 */
+ 0x30 (PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a24.gpio_48 */
+ 0x32 (PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a25.gpio_49 */
+ >;
+ };
+
+ /* wl12xx GPIO inputs and SDIO pins */
+ wl12xx_pins: pinmux_wl12xx_pins {
+ pinctrl-single,pins = <
+ 0x38 (PIN_INPUT | MUX_MODE3) /* gpmc_ncs2.gpio_52 */
+ 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */
+ 0x108 (PIN_OUTPUT | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */
+ 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */
+ 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */
+ 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */
+ 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat2.sdmmc5_dat2 */
+ 0x112 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat3.sdmmc5_dat3 */
+ >;
+ };
};
&i2c1 {
@@ -314,8 +354,12 @@
};
&mmc5 {
- ti,non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wl12xx_pins>;
+ vmmc-supply = <&wl12xx_vmmc>;
+ non-removable;
bus-width = <4>;
+ cap-power-off-card;
};
&emif1 {
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 7951b4ea500a..4f78380ecdb8 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -140,6 +140,19 @@
"DMic", "Digital Mic",
"Digital Mic", "Digital Mic1 Bias";
};
+
+ /* regulator for wl12xx on sdio5 */
+ wl12xx_vmmc: wl12xx_vmmc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wl12xx_gpio>;
+ compatible = "regulator-fixed";
+ regulator-name = "vwl1271";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&gpio2 22 0>;
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
};
&omap4_pmx_wkup {
@@ -295,6 +308,26 @@
0xf0 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c4_sda */
>;
};
+
+ /* wl12xx GPIO output for WLAN_EN */
+ wl12xx_gpio: pinmux_wl12xx_gpio {
+ pinctrl-single,pins = <
+ 0x3c (PIN_OUTPUT | MUX_MODE3) /* gpmc_nwp.gpio_54 */
+ >;
+ };
+
+ /* wl12xx GPIO inputs and SDIO pins */
+ wl12xx_pins: pinmux_wl12xx_pins {
+ pinctrl-single,pins = <
+ 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */
+ 0x108 (PIN_OUTPUT | MUX_MODE3) /* sdmmc5_clk.sdmmc5_clk */
+ 0x10a (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_cmd.sdmmc5_cmd */
+ 0x10c (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat0.sdmmc5_dat0 */
+ 0x10e (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat1.sdmmc5_dat1 */
+ 0x110 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat2.sdmmc5_dat2 */
+ 0x112 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat3.sdmmc5_dat3 */
+ >;
+ };
};
&i2c1 {
@@ -420,8 +453,12 @@
};
&mmc5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wl12xx_pins>;
+ vmmc-supply = <&wl12xx_vmmc>;
+ non-removable;
bus-width = <4>;
- ti,non-removable;
+ cap-power-off-card;
};
&emif1 {
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 07be2cd7b318..7cdea1bfea09 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -637,7 +637,7 @@
omap_dwc3@4a020000 {
compatible = "ti,dwc3";
ti,hwmods = "usb_otg_ss";
- reg = <0x4a020000 0x1000>;
+ reg = <0x4a020000 0x10000>;
interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <1>;
@@ -645,17 +645,18 @@
ranges;
dwc3@4a030000 {
compatible = "snps,dwc3";
- reg = <0x4a030000 0x1000>;
+ reg = <0x4a030000 0x10000>;
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
usb-phy = <&usb2_phy>, <&usb3_phy>;
tx-fifo-resize;
};
};
- ocp2scp {
+ ocp2scp@4a080000 {
compatible = "ti,omap-ocp2scp";
#address-cells = <1>;
#size-cells = <1>;
+ reg = <0x4a080000 0x20>;
ranges;
ti,hwmods = "ocp2scp1";
usb2_phy: usb2phy@4a084000 {
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 6e572c64cf5a..f3935b46df29 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -36,6 +36,7 @@ CONFIG_ARCH_TEGRA_114_SOC=y
CONFIG_TEGRA_PCI=y
CONFIG_TEGRA_EMC_SCALING_ENABLE=y
CONFIG_ARCH_U8500=y
+CONFIG_MACH_HREFV60=y
CONFIG_MACH_SNOWBALL=y
CONFIG_MACH_UX500_DT=y
CONFIG_ARCH_VEXPRESS=y
@@ -46,6 +47,7 @@ CONFIG_ARCH_ZYNQ=y
CONFIG_SMP=y
CONFIG_HIGHPTE=y
CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_INET=y
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index a2c83851bc90..81cda39860c5 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -3,7 +3,17 @@
#
obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
+obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
-aes-arm-y := aes-armv4.o aes_glue.o
-sha1-arm-y := sha1-armv4-large.o sha1_glue.o
+aes-arm-y := aes-armv4.o aes_glue.o
+aes-arm-bs-y := aesbs-core.o aesbs-glue.o
+sha1-arm-y := sha1-armv4-large.o sha1_glue.o
+
+quiet_cmd_perl = PERL $@
+ cmd_perl = $(PERL) $(<) > $(@)
+
+$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
+ $(call cmd,perl)
+
+.PRECIOUS: $(obj)/aesbs-core.S
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index 19d6cd6f29f9..3a14ea8fe97e 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -148,7 +148,7 @@ AES_Te:
@ const AES_KEY *key) {
.align 5
ENTRY(AES_encrypt)
- sub r3,pc,#8 @ AES_encrypt
+ adr r3,AES_encrypt
stmdb sp!,{r1,r4-r12,lr}
mov r12,r0 @ inp
mov r11,r2
@@ -381,7 +381,7 @@ _armv4_AES_encrypt:
.align 5
ENTRY(private_AES_set_encrypt_key)
_armv4_AES_set_encrypt_key:
- sub r3,pc,#8 @ AES_set_encrypt_key
+ adr r3,_armv4_AES_set_encrypt_key
teq r0,#0
moveq r0,#-1
beq .Labrt
@@ -843,7 +843,7 @@ AES_Td:
@ const AES_KEY *key) {
.align 5
ENTRY(AES_decrypt)
- sub r3,pc,#8 @ AES_decrypt
+ adr r3,AES_decrypt
stmdb sp!,{r1,r4-r12,lr}
mov r12,r0 @ inp
mov r11,r2
diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
index 59f7877ead6a..3003fa1f6fb4 100644
--- a/arch/arm/crypto/aes_glue.c
+++ b/arch/arm/crypto/aes_glue.c
@@ -6,22 +6,12 @@
#include <linux/crypto.h>
#include <crypto/aes.h>
-#define AES_MAXNR 14
+#include "aes_glue.h"
-typedef struct {
- unsigned int rd_key[4 *(AES_MAXNR + 1)];
- int rounds;
-} AES_KEY;
-
-struct AES_CTX {
- AES_KEY enc_key;
- AES_KEY dec_key;
-};
-
-asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
-asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
+EXPORT_SYMBOL(AES_encrypt);
+EXPORT_SYMBOL(AES_decrypt);
+EXPORT_SYMBOL(private_AES_set_encrypt_key);
+EXPORT_SYMBOL(private_AES_set_decrypt_key);
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
@@ -81,7 +71,7 @@ static struct crypto_alg aes_alg = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = aes_set_key,
+ .cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}
diff --git a/arch/arm/crypto/aes_glue.h b/arch/arm/crypto/aes_glue.h
new file mode 100644
index 000000000000..cca3e51eb606
--- /dev/null
+++ b/arch/arm/crypto/aes_glue.h
@@ -0,0 +1,19 @@
+
+#define AES_MAXNR 14
+
+struct AES_KEY {
+ unsigned int rd_key[4 * (AES_MAXNR + 1)];
+ int rounds;
+};
+
+struct AES_CTX {
+ struct AES_KEY enc_key;
+ struct AES_KEY dec_key;
+};
+
+asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
+ const int bits, struct AES_KEY *key);
+asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
+ const int bits, struct AES_KEY *key);
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
new file mode 100644
index 000000000000..64205d453260
--- /dev/null
+++ b/arch/arm/crypto/aesbs-core.S_shipped
@@ -0,0 +1,2544 @@
+
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+@ <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+@ granted.
+@ ====================================================================
+
+@ Bit-sliced AES for ARM NEON
+@
+@ February 2012.
+@
+@ This implementation is direct adaptation of bsaes-x86_64 module for
+@ ARM NEON. Except that this module is endian-neutral [in sense that
+@ it can be compiled for either endianness] by courtesy of vld1.8's
+@ neutrality. Initial version doesn't implement interface to OpenSSL,
+@ only low-level primitives and unsupported entry points, just enough
+@ to collect performance results, which for Cortex-A8 core are:
+@
+@ encrypt 19.5 cycles per byte processed with 128-bit key
+@ decrypt 22.1 cycles per byte processed with 128-bit key
+@ key conv. 440 cycles per 128-bit key/0.18 of 8x block
+@
+@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+@ which is [much] worse than anticipated (for further details see
+@ http://www.openssl.org/~appro/Snapdragon-S4.html).
+@
+@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+@ manages in 20.0 cycles].
+@
+@ When comparing to x86_64 results keep in mind that NEON unit is
+@ [mostly] single-issue and thus can't [fully] benefit from
+@ instruction-level parallelism. And when comparing to aes-armv4
+@ results keep in mind key schedule conversion overhead (see
+@ bsaes-x86_64.pl for further details)...
+@
+@ <appro@openssl.org>
+
+@ April-August 2013
+@
+@ Add CBC, CTR and XTS subroutines, adapt for kernel use.
+@
+@ <ard.biesheuvel@linaro.org>
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
+# define VFP_ABI_POP vldmia sp!,{d8-d15}
+# define VFP_ABI_FRAME 0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME 0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax unified @ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code 32
+#endif
+
+.fpu neon
+
+.type _bsaes_decrypt8,%function
+.align 4
+_bsaes_decrypt8:
+ adr r6,_bsaes_decrypt8
+ vldmia r4!, {q9} @ round 0 key
+ add r6,r6,#.LM0ISR-_bsaes_decrypt8
+
+ vldmia r6!, {q8} @ .LM0ISR
+ veor q10, q0, q9 @ xor with round0 key
+ veor q11, q1, q9
+ vtbl.8 d0, {q10}, d16
+ vtbl.8 d1, {q10}, d17
+ veor q12, q2, q9
+ vtbl.8 d2, {q11}, d16
+ vtbl.8 d3, {q11}, d17
+ veor q13, q3, q9
+ vtbl.8 d4, {q12}, d16
+ vtbl.8 d5, {q12}, d17
+ veor q14, q4, q9
+ vtbl.8 d6, {q13}, d16
+ vtbl.8 d7, {q13}, d17
+ veor q15, q5, q9
+ vtbl.8 d8, {q14}, d16
+ vtbl.8 d9, {q14}, d17
+ veor q10, q6, q9
+ vtbl.8 d10, {q15}, d16
+ vtbl.8 d11, {q15}, d17
+ veor q11, q7, q9
+ vtbl.8 d12, {q10}, d16
+ vtbl.8 d13, {q10}, d17
+ vtbl.8 d14, {q11}, d16
+ vtbl.8 d15, {q11}, d17
+ vmov.i8 q8,#0x55 @ compose .LBS0
+ vmov.i8 q9,#0x33 @ compose .LBS1
+ vshr.u64 q10, q6, #1
+ vshr.u64 q11, q4, #1
+ veor q10, q10, q7
+ veor q11, q11, q5
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #1
+ veor q5, q5, q11
+ vshl.u64 q11, q11, #1
+ veor q6, q6, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q2, #1
+ vshr.u64 q11, q0, #1
+ veor q10, q10, q3
+ veor q11, q11, q1
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q3, q3, q10
+ vshl.u64 q10, q10, #1
+ veor q1, q1, q11
+ vshl.u64 q11, q11, #1
+ veor q2, q2, q10
+ veor q0, q0, q11
+ vmov.i8 q8,#0x0f @ compose .LBS2
+ vshr.u64 q10, q5, #2
+ vshr.u64 q11, q4, #2
+ veor q10, q10, q7
+ veor q11, q11, q6
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #2
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #2
+ veor q5, q5, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q1, #2
+ vshr.u64 q11, q0, #2
+ veor q10, q10, q3
+ veor q11, q11, q2
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q3, q3, q10
+ vshl.u64 q10, q10, #2
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #2
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vshr.u64 q10, q3, #4
+ vshr.u64 q11, q2, #4
+ veor q10, q10, q7
+ veor q11, q11, q6
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #4
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #4
+ veor q3, q3, q10
+ veor q2, q2, q11
+ vshr.u64 q10, q1, #4
+ vshr.u64 q11, q0, #4
+ veor q10, q10, q5
+ veor q11, q11, q4
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #4
+ veor q4, q4, q11
+ vshl.u64 q11, q11, #4
+ veor q1, q1, q10
+ veor q0, q0, q11
+ sub r5,r5,#1
+ b .Ldec_sbox
+.align 4
+.Ldec_loop:
+ vldmia r4!, {q8-q11}
+ veor q8, q8, q0
+ veor q9, q9, q1
+ vtbl.8 d0, {q8}, d24
+ vtbl.8 d1, {q8}, d25
+ vldmia r4!, {q8}
+ veor q10, q10, q2
+ vtbl.8 d2, {q9}, d24
+ vtbl.8 d3, {q9}, d25
+ vldmia r4!, {q9}
+ veor q11, q11, q3
+ vtbl.8 d4, {q10}, d24
+ vtbl.8 d5, {q10}, d25
+ vldmia r4!, {q10}
+ vtbl.8 d6, {q11}, d24
+ vtbl.8 d7, {q11}, d25
+ vldmia r4!, {q11}
+ veor q8, q8, q4
+ veor q9, q9, q5
+ vtbl.8 d8, {q8}, d24
+ vtbl.8 d9, {q8}, d25
+ veor q10, q10, q6
+ vtbl.8 d10, {q9}, d24
+ vtbl.8 d11, {q9}, d25
+ veor q11, q11, q7
+ vtbl.8 d12, {q10}, d24
+ vtbl.8 d13, {q10}, d25
+ vtbl.8 d14, {q11}, d24
+ vtbl.8 d15, {q11}, d25
+.Ldec_sbox:
+ veor q1, q1, q4
+ veor q3, q3, q4
+
+ veor q4, q4, q7
+ veor q1, q1, q6
+ veor q2, q2, q7
+ veor q6, q6, q4
+
+ veor q0, q0, q1
+ veor q2, q2, q5
+ veor q7, q7, q6
+ veor q3, q3, q0
+ veor q5, q5, q0
+ veor q1, q1, q3
+ veor q11, q3, q0
+ veor q10, q7, q4
+ veor q9, q1, q6
+ veor q13, q4, q0
+ vmov q8, q10
+ veor q12, q5, q2
+
+ vorr q10, q10, q9
+ veor q15, q11, q8
+ vand q14, q11, q12
+ vorr q11, q11, q12
+ veor q12, q12, q9
+ vand q8, q8, q9
+ veor q9, q6, q2
+ vand q15, q15, q12
+ vand q13, q13, q9
+ veor q9, q3, q7
+ veor q12, q1, q5
+ veor q11, q11, q13
+ veor q10, q10, q13
+ vand q13, q9, q12
+ vorr q9, q9, q12
+ veor q11, q11, q15
+ veor q8, q8, q13
+ veor q10, q10, q14
+ veor q9, q9, q15
+ veor q8, q8, q14
+ vand q12, q4, q6
+ veor q9, q9, q14
+ vand q13, q0, q2
+ vand q14, q7, q1
+ vorr q15, q3, q5
+ veor q11, q11, q12
+ veor q9, q9, q14
+ veor q8, q8, q15
+ veor q10, q10, q13
+
+ @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
+
+ @ new smaller inversion
+
+ vand q14, q11, q9
+ vmov q12, q8
+
+ veor q13, q10, q14
+ veor q15, q8, q14
+ veor q14, q8, q14 @ q14=q15
+
+ vbsl q13, q9, q8
+ vbsl q15, q11, q10
+ veor q11, q11, q10
+
+ vbsl q12, q13, q14
+ vbsl q8, q14, q13
+
+ vand q14, q12, q15
+ veor q9, q9, q8
+
+ veor q14, q14, q11
+ veor q12, q5, q2
+ veor q8, q1, q6
+ veor q10, q15, q14
+ vand q10, q10, q5
+ veor q5, q5, q1
+ vand q11, q1, q15
+ vand q5, q5, q14
+ veor q1, q11, q10
+ veor q5, q5, q11
+ veor q15, q15, q13
+ veor q14, q14, q9
+ veor q11, q15, q14
+ veor q10, q13, q9
+ vand q11, q11, q12
+ vand q10, q10, q2
+ veor q12, q12, q8
+ veor q2, q2, q6
+ vand q8, q8, q15
+ vand q6, q6, q13
+ vand q12, q12, q14
+ vand q2, q2, q9
+ veor q8, q8, q12
+ veor q2, q2, q6
+ veor q12, q12, q11
+ veor q6, q6, q10
+ veor q5, q5, q12
+ veor q2, q2, q12
+ veor q1, q1, q8
+ veor q6, q6, q8
+
+ veor q12, q3, q0
+ veor q8, q7, q4
+ veor q11, q15, q14
+ veor q10, q13, q9
+ vand q11, q11, q12
+ vand q10, q10, q0
+ veor q12, q12, q8
+ veor q0, q0, q4
+ vand q8, q8, q15
+ vand q4, q4, q13
+ vand q12, q12, q14
+ vand q0, q0, q9
+ veor q8, q8, q12
+ veor q0, q0, q4
+ veor q12, q12, q11
+ veor q4, q4, q10
+ veor q15, q15, q13
+ veor q14, q14, q9
+ veor q10, q15, q14
+ vand q10, q10, q3
+ veor q3, q3, q7
+ vand q11, q7, q15
+ vand q3, q3, q14
+ veor q7, q11, q10
+ veor q3, q3, q11
+ veor q3, q3, q12
+ veor q0, q0, q12
+ veor q7, q7, q8
+ veor q4, q4, q8
+ veor q1, q1, q7
+ veor q6, q6, q5
+
+ veor q4, q4, q1
+ veor q2, q2, q7
+ veor q5, q5, q7
+ veor q4, q4, q2
+ veor q7, q7, q0
+ veor q4, q4, q5
+ veor q3, q3, q6
+ veor q6, q6, q1
+ veor q3, q3, q4
+
+ veor q4, q4, q0
+ veor q7, q7, q3
+ subs r5,r5,#1
+ bcc .Ldec_done
+ @ multiplication by 0x05-0x00-0x04-0x00
+ vext.8 q8, q0, q0, #8
+ vext.8 q14, q3, q3, #8
+ vext.8 q15, q5, q5, #8
+ veor q8, q8, q0
+ vext.8 q9, q1, q1, #8
+ veor q14, q14, q3
+ vext.8 q10, q6, q6, #8
+ veor q15, q15, q5
+ vext.8 q11, q4, q4, #8
+ veor q9, q9, q1
+ vext.8 q12, q2, q2, #8
+ veor q10, q10, q6
+ vext.8 q13, q7, q7, #8
+ veor q11, q11, q4
+ veor q12, q12, q2
+ veor q13, q13, q7
+
+ veor q0, q0, q14
+ veor q1, q1, q14
+ veor q6, q6, q8
+ veor q2, q2, q10
+ veor q4, q4, q9
+ veor q1, q1, q15
+ veor q6, q6, q15
+ veor q2, q2, q14
+ veor q7, q7, q11
+ veor q4, q4, q14
+ veor q3, q3, q12
+ veor q2, q2, q15
+ veor q7, q7, q15
+ veor q5, q5, q13
+ vext.8 q8, q0, q0, #12 @ x0 <<< 32
+ vext.8 q9, q1, q1, #12
+ veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
+ vext.8 q10, q6, q6, #12
+ veor q1, q1, q9
+ vext.8 q11, q4, q4, #12
+ veor q6, q6, q10
+ vext.8 q12, q2, q2, #12
+ veor q4, q4, q11
+ vext.8 q13, q7, q7, #12
+ veor q2, q2, q12
+ vext.8 q14, q3, q3, #12
+ veor q7, q7, q13
+ vext.8 q15, q5, q5, #12
+ veor q3, q3, q14
+
+ veor q9, q9, q0
+ veor q5, q5, q15
+ vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
+ veor q10, q10, q1
+ veor q8, q8, q5
+ veor q9, q9, q5
+ vext.8 q1, q1, q1, #8
+ veor q13, q13, q2
+ veor q0, q0, q8
+ veor q14, q14, q7
+ veor q1, q1, q9
+ vext.8 q8, q2, q2, #8
+ veor q12, q12, q4
+ vext.8 q9, q7, q7, #8
+ veor q15, q15, q3
+ vext.8 q2, q4, q4, #8
+ veor q11, q11, q6
+ vext.8 q7, q5, q5, #8
+ veor q12, q12, q5
+ vext.8 q4, q3, q3, #8
+ veor q11, q11, q5
+ vext.8 q3, q6, q6, #8
+ veor q5, q9, q13
+ veor q11, q11, q2
+ veor q7, q7, q15
+ veor q6, q4, q14
+ veor q4, q8, q12
+ veor q2, q3, q10
+ vmov q3, q11
+ @ vmov q5, q9
+ vldmia r6, {q12} @ .LISR
+ ite eq @ Thumb2 thing, sanity check in ARM
+ addeq r6,r6,#0x10
+ bne .Ldec_loop
+ vldmia r6, {q12} @ .LISRM0
+ b .Ldec_loop
+.align 4
+.Ldec_done:
+ vmov.i8 q8,#0x55 @ compose .LBS0
+ vmov.i8 q9,#0x33 @ compose .LBS1
+ vshr.u64 q10, q3, #1
+ vshr.u64 q11, q2, #1
+ veor q10, q10, q5
+ veor q11, q11, q7
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #1
+ veor q7, q7, q11
+ vshl.u64 q11, q11, #1
+ veor q3, q3, q10
+ veor q2, q2, q11
+ vshr.u64 q10, q6, #1
+ vshr.u64 q11, q0, #1
+ veor q10, q10, q4
+ veor q11, q11, q1
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q4, q4, q10
+ vshl.u64 q10, q10, #1
+ veor q1, q1, q11
+ vshl.u64 q11, q11, #1
+ veor q6, q6, q10
+ veor q0, q0, q11
+ vmov.i8 q8,#0x0f @ compose .LBS2
+ vshr.u64 q10, q7, #2
+ vshr.u64 q11, q2, #2
+ veor q10, q10, q5
+ veor q11, q11, q3
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #2
+ veor q3, q3, q11
+ vshl.u64 q11, q11, #2
+ veor q7, q7, q10
+ veor q2, q2, q11
+ vshr.u64 q10, q1, #2
+ vshr.u64 q11, q0, #2
+ veor q10, q10, q4
+ veor q11, q11, q6
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q4, q4, q10
+ vshl.u64 q10, q10, #2
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #2
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vshr.u64 q10, q4, #4
+ vshr.u64 q11, q6, #4
+ veor q10, q10, q5
+ veor q11, q11, q3
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #4
+ veor q3, q3, q11
+ vshl.u64 q11, q11, #4
+ veor q4, q4, q10
+ veor q6, q6, q11
+ vshr.u64 q10, q1, #4
+ vshr.u64 q11, q0, #4
+ veor q10, q10, q7
+ veor q11, q11, q2
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #4
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #4
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vldmia r4, {q8} @ last round key
+ veor q6, q6, q8
+ veor q4, q4, q8
+ veor q2, q2, q8
+ veor q7, q7, q8
+ veor q3, q3, q8
+ veor q5, q5, q8
+ veor q0, q0, q8
+ veor q1, q1, q8
+ bx lr
+.size _bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type _bsaes_const,%object
+.align 6
+_bsaes_const:
+.LM0ISR: @ InvShiftRows constants
+ .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+ .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+ .quad 0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR: @ ShiftRows constants
+ .quad 0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+ .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+ .quad 0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+ .quad 0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+ .quad 0x090d01050c000408, 0x03070b0f060a0e02
+.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align 6
+.size _bsaes_const,.-_bsaes_const
+
+.type _bsaes_encrypt8,%function
+.align 4
+_bsaes_encrypt8:
+ adr r6,_bsaes_encrypt8
+ vldmia r4!, {q9} @ round 0 key
+ sub r6,r6,#_bsaes_encrypt8-.LM0SR
+
+ vldmia r6!, {q8} @ .LM0SR
+_bsaes_encrypt8_alt:
+ veor q10, q0, q9 @ xor with round0 key
+ veor q11, q1, q9
+ vtbl.8 d0, {q10}, d16
+ vtbl.8 d1, {q10}, d17
+ veor q12, q2, q9
+ vtbl.8 d2, {q11}, d16
+ vtbl.8 d3, {q11}, d17
+ veor q13, q3, q9
+ vtbl.8 d4, {q12}, d16
+ vtbl.8 d5, {q12}, d17
+ veor q14, q4, q9
+ vtbl.8 d6, {q13}, d16
+ vtbl.8 d7, {q13}, d17
+ veor q15, q5, q9
+ vtbl.8 d8, {q14}, d16
+ vtbl.8 d9, {q14}, d17
+ veor q10, q6, q9
+ vtbl.8 d10, {q15}, d16
+ vtbl.8 d11, {q15}, d17
+ veor q11, q7, q9
+ vtbl.8 d12, {q10}, d16
+ vtbl.8 d13, {q10}, d17
+ vtbl.8 d14, {q11}, d16
+ vtbl.8 d15, {q11}, d17
+_bsaes_encrypt8_bitslice:
+ vmov.i8 q8,#0x55 @ compose .LBS0
+ vmov.i8 q9,#0x33 @ compose .LBS1
+ vshr.u64 q10, q6, #1
+ vshr.u64 q11, q4, #1
+ veor q10, q10, q7
+ veor q11, q11, q5
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #1
+ veor q5, q5, q11
+ vshl.u64 q11, q11, #1
+ veor q6, q6, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q2, #1
+ vshr.u64 q11, q0, #1
+ veor q10, q10, q3
+ veor q11, q11, q1
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q3, q3, q10
+ vshl.u64 q10, q10, #1
+ veor q1, q1, q11
+ vshl.u64 q11, q11, #1
+ veor q2, q2, q10
+ veor q0, q0, q11
+ vmov.i8 q8,#0x0f @ compose .LBS2
+ vshr.u64 q10, q5, #2
+ vshr.u64 q11, q4, #2
+ veor q10, q10, q7
+ veor q11, q11, q6
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #2
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #2
+ veor q5, q5, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q1, #2
+ vshr.u64 q11, q0, #2
+ veor q10, q10, q3
+ veor q11, q11, q2
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q3, q3, q10
+ vshl.u64 q10, q10, #2
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #2
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vshr.u64 q10, q3, #4
+ vshr.u64 q11, q2, #4
+ veor q10, q10, q7
+ veor q11, q11, q6
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #4
+ veor q6, q6, q11
+ vshl.u64 q11, q11, #4
+ veor q3, q3, q10
+ veor q2, q2, q11
+ vshr.u64 q10, q1, #4
+ vshr.u64 q11, q0, #4
+ veor q10, q10, q5
+ veor q11, q11, q4
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #4
+ veor q4, q4, q11
+ vshl.u64 q11, q11, #4
+ veor q1, q1, q10
+ veor q0, q0, q11
+ sub r5,r5,#1
+ b .Lenc_sbox
+.align 4
+.Lenc_loop:
+ vldmia r4!, {q8-q11}
+ veor q8, q8, q0
+ veor q9, q9, q1
+ vtbl.8 d0, {q8}, d24
+ vtbl.8 d1, {q8}, d25
+ vldmia r4!, {q8}
+ veor q10, q10, q2
+ vtbl.8 d2, {q9}, d24
+ vtbl.8 d3, {q9}, d25
+ vldmia r4!, {q9}
+ veor q11, q11, q3
+ vtbl.8 d4, {q10}, d24
+ vtbl.8 d5, {q10}, d25
+ vldmia r4!, {q10}
+ vtbl.8 d6, {q11}, d24
+ vtbl.8 d7, {q11}, d25
+ vldmia r4!, {q11}
+ veor q8, q8, q4
+ veor q9, q9, q5
+ vtbl.8 d8, {q8}, d24
+ vtbl.8 d9, {q8}, d25
+ veor q10, q10, q6
+ vtbl.8 d10, {q9}, d24
+ vtbl.8 d11, {q9}, d25
+ veor q11, q11, q7
+ vtbl.8 d12, {q10}, d24
+ vtbl.8 d13, {q10}, d25
+ vtbl.8 d14, {q11}, d24
+ vtbl.8 d15, {q11}, d25
+.Lenc_sbox:
+ veor q2, q2, q1
+ veor q5, q5, q6
+ veor q3, q3, q0
+ veor q6, q6, q2
+ veor q5, q5, q0
+
+ veor q6, q6, q3
+ veor q3, q3, q7
+ veor q7, q7, q5
+ veor q3, q3, q4
+ veor q4, q4, q5
+
+ veor q2, q2, q7
+ veor q3, q3, q1
+ veor q1, q1, q5
+ veor q11, q7, q4
+ veor q10, q1, q2
+ veor q9, q5, q3
+ veor q13, q2, q4
+ vmov q8, q10
+ veor q12, q6, q0
+
+ vorr q10, q10, q9
+ veor q15, q11, q8
+ vand q14, q11, q12
+ vorr q11, q11, q12
+ veor q12, q12, q9
+ vand q8, q8, q9
+ veor q9, q3, q0
+ vand q15, q15, q12
+ vand q13, q13, q9
+ veor q9, q7, q1
+ veor q12, q5, q6
+ veor q11, q11, q13
+ veor q10, q10, q13
+ vand q13, q9, q12
+ vorr q9, q9, q12
+ veor q11, q11, q15
+ veor q8, q8, q13
+ veor q10, q10, q14
+ veor q9, q9, q15
+ veor q8, q8, q14
+ vand q12, q2, q3
+ veor q9, q9, q14
+ vand q13, q4, q0
+ vand q14, q1, q5
+ vorr q15, q7, q6
+ veor q11, q11, q12
+ veor q9, q9, q14
+ veor q8, q8, q15
+ veor q10, q10, q13
+
+ @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
+
+ @ new smaller inversion
+
+ vand q14, q11, q9
+ vmov q12, q8
+
+ veor q13, q10, q14
+ veor q15, q8, q14
+ veor q14, q8, q14 @ q14=q15
+
+ vbsl q13, q9, q8
+ vbsl q15, q11, q10
+ veor q11, q11, q10
+
+ vbsl q12, q13, q14
+ vbsl q8, q14, q13
+
+ vand q14, q12, q15
+ veor q9, q9, q8
+
+ veor q14, q14, q11
+ veor q12, q6, q0
+ veor q8, q5, q3
+ veor q10, q15, q14
+ vand q10, q10, q6
+ veor q6, q6, q5
+ vand q11, q5, q15
+ vand q6, q6, q14
+ veor q5, q11, q10
+ veor q6, q6, q11
+ veor q15, q15, q13
+ veor q14, q14, q9
+ veor q11, q15, q14
+ veor q10, q13, q9
+ vand q11, q11, q12
+ vand q10, q10, q0
+ veor q12, q12, q8
+ veor q0, q0, q3
+ vand q8, q8, q15
+ vand q3, q3, q13
+ vand q12, q12, q14
+ vand q0, q0, q9
+ veor q8, q8, q12
+ veor q0, q0, q3
+ veor q12, q12, q11
+ veor q3, q3, q10
+ veor q6, q6, q12
+ veor q0, q0, q12
+ veor q5, q5, q8
+ veor q3, q3, q8
+
+ veor q12, q7, q4
+ veor q8, q1, q2
+ veor q11, q15, q14
+ veor q10, q13, q9
+ vand q11, q11, q12
+ vand q10, q10, q4
+ veor q12, q12, q8
+ veor q4, q4, q2
+ vand q8, q8, q15
+ vand q2, q2, q13
+ vand q12, q12, q14
+ vand q4, q4, q9
+ veor q8, q8, q12
+ veor q4, q4, q2
+ veor q12, q12, q11
+ veor q2, q2, q10
+ veor q15, q15, q13
+ veor q14, q14, q9
+ veor q10, q15, q14
+ vand q10, q10, q7
+ veor q7, q7, q1
+ vand q11, q1, q15
+ vand q7, q7, q14
+ veor q1, q11, q10
+ veor q7, q7, q11
+ veor q7, q7, q12
+ veor q4, q4, q12
+ veor q1, q1, q8
+ veor q2, q2, q8
+ veor q7, q7, q0
+ veor q1, q1, q6
+ veor q6, q6, q0
+ veor q4, q4, q7
+ veor q0, q0, q1
+
+ veor q1, q1, q5
+ veor q5, q5, q2
+ veor q2, q2, q3
+ veor q3, q3, q5
+ veor q4, q4, q5
+
+ veor q6, q6, q3
+ subs r5,r5,#1
+ bcc .Lenc_done
+ vext.8 q8, q0, q0, #12 @ x0 <<< 32
+ vext.8 q9, q1, q1, #12
+ veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
+ vext.8 q10, q4, q4, #12
+ veor q1, q1, q9
+ vext.8 q11, q6, q6, #12
+ veor q4, q4, q10
+ vext.8 q12, q3, q3, #12
+ veor q6, q6, q11
+ vext.8 q13, q7, q7, #12
+ veor q3, q3, q12
+ vext.8 q14, q2, q2, #12
+ veor q7, q7, q13
+ vext.8 q15, q5, q5, #12
+ veor q2, q2, q14
+
+ veor q9, q9, q0
+ veor q5, q5, q15
+ vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
+ veor q10, q10, q1
+ veor q8, q8, q5
+ veor q9, q9, q5
+ vext.8 q1, q1, q1, #8
+ veor q13, q13, q3
+ veor q0, q0, q8
+ veor q14, q14, q7
+ veor q1, q1, q9
+ vext.8 q8, q3, q3, #8
+ veor q12, q12, q6
+ vext.8 q9, q7, q7, #8
+ veor q15, q15, q2
+ vext.8 q3, q6, q6, #8
+ veor q11, q11, q4
+ vext.8 q7, q5, q5, #8
+ veor q12, q12, q5
+ vext.8 q6, q2, q2, #8
+ veor q11, q11, q5
+ vext.8 q2, q4, q4, #8
+ veor q5, q9, q13
+ veor q4, q8, q12
+ veor q3, q3, q11
+ veor q7, q7, q15
+ veor q6, q6, q14
+ @ vmov q4, q8
+ veor q2, q2, q10
+ @ vmov q5, q9
+ vldmia r6, {q12} @ .LSR
+ ite eq @ Thumb2 thing, samity check in ARM
+ addeq r6,r6,#0x10
+ bne .Lenc_loop
+ vldmia r6, {q12} @ .LSRM0
+ b .Lenc_loop
+.align 4
+.Lenc_done:
+ vmov.i8 q8,#0x55 @ compose .LBS0
+ vmov.i8 q9,#0x33 @ compose .LBS1
+ vshr.u64 q10, q2, #1
+ vshr.u64 q11, q3, #1
+ veor q10, q10, q5
+ veor q11, q11, q7
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #1
+ veor q7, q7, q11
+ vshl.u64 q11, q11, #1
+ veor q2, q2, q10
+ veor q3, q3, q11
+ vshr.u64 q10, q4, #1
+ vshr.u64 q11, q0, #1
+ veor q10, q10, q6
+ veor q11, q11, q1
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q6, q6, q10
+ vshl.u64 q10, q10, #1
+ veor q1, q1, q11
+ vshl.u64 q11, q11, #1
+ veor q4, q4, q10
+ veor q0, q0, q11
+ vmov.i8 q8,#0x0f @ compose .LBS2
+ vshr.u64 q10, q7, #2
+ vshr.u64 q11, q3, #2
+ veor q10, q10, q5
+ veor q11, q11, q2
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #2
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #2
+ veor q7, q7, q10
+ veor q3, q3, q11
+ vshr.u64 q10, q1, #2
+ vshr.u64 q11, q0, #2
+ veor q10, q10, q6
+ veor q11, q11, q4
+ vand q10, q10, q9
+ vand q11, q11, q9
+ veor q6, q6, q10
+ vshl.u64 q10, q10, #2
+ veor q4, q4, q11
+ vshl.u64 q11, q11, #2
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vshr.u64 q10, q6, #4
+ vshr.u64 q11, q4, #4
+ veor q10, q10, q5
+ veor q11, q11, q2
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q5, q5, q10
+ vshl.u64 q10, q10, #4
+ veor q2, q2, q11
+ vshl.u64 q11, q11, #4
+ veor q6, q6, q10
+ veor q4, q4, q11
+ vshr.u64 q10, q1, #4
+ vshr.u64 q11, q0, #4
+ veor q10, q10, q7
+ veor q11, q11, q3
+ vand q10, q10, q8
+ vand q11, q11, q8
+ veor q7, q7, q10
+ vshl.u64 q10, q10, #4
+ veor q3, q3, q11
+ vshl.u64 q11, q11, #4
+ veor q1, q1, q10
+ veor q0, q0, q11
+ vldmia r4, {q8} @ last round key
+ veor q4, q4, q8
+ veor q6, q6, q8
+ veor q3, q3, q8
+ veor q7, q7, q8
+ veor q2, q2, q8
+ veor q5, q5, q8
+ veor q0, q0, q8
+ veor q1, q1, q8
+ bx lr
+.size _bsaes_encrypt8,.-_bsaes_encrypt8
+.type _bsaes_key_convert,%function
+.align 4
+_bsaes_key_convert:
+ adr r6,_bsaes_key_convert
+ vld1.8 {q7}, [r4]! @ load round 0 key
+ sub r6,r6,#_bsaes_key_convert-.LM0
+ vld1.8 {q15}, [r4]! @ load round 1 key
+
+ vmov.i8 q8, #0x01 @ bit masks
+ vmov.i8 q9, #0x02
+ vmov.i8 q10, #0x04
+ vmov.i8 q11, #0x08
+ vmov.i8 q12, #0x10
+ vmov.i8 q13, #0x20
+ vldmia r6, {q14} @ .LM0
+
+#ifdef __ARMEL__
+ vrev32.8 q7, q7
+ vrev32.8 q15, q15
+#endif
+ sub r5,r5,#1
+ vstmia r12!, {q7} @ save round 0 key
+ b .Lkey_loop
+
+.align 4
+.Lkey_loop:
+ vtbl.8 d14,{q15},d28
+ vtbl.8 d15,{q15},d29
+ vmov.i8 q6, #0x40
+ vmov.i8 q15, #0x80
+
+ vtst.8 q0, q7, q8
+ vtst.8 q1, q7, q9
+ vtst.8 q2, q7, q10
+ vtst.8 q3, q7, q11
+ vtst.8 q4, q7, q12
+ vtst.8 q5, q7, q13
+ vtst.8 q6, q7, q6
+ vtst.8 q7, q7, q15
+ vld1.8 {q15}, [r4]! @ load next round key
+ vmvn q0, q0 @ "pnot"
+ vmvn q1, q1
+ vmvn q5, q5
+ vmvn q6, q6
+#ifdef __ARMEL__
+ vrev32.8 q15, q15
+#endif
+ subs r5,r5,#1
+ vstmia r12!,{q0-q7} @ write bit-sliced round key
+ bne .Lkey_loop
+
+ vmov.i8 q7,#0x63 @ compose .L63
+ @ don't save last round key
+ bx lr
+.size _bsaes_key_convert,.-_bsaes_key_convert
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global bsaes_cbc_encrypt
+.type bsaes_cbc_encrypt,%function
+.align 5
+bsaes_cbc_encrypt:
+#ifndef __KERNEL__
+ cmp r2, #128
+#ifndef __thumb__
+ blo AES_cbc_encrypt
+#else
+ bhs 1f
+ b AES_cbc_encrypt
+1:
+#endif
+#endif
+
+ @ it is up to the caller to make sure we are called with enc == 0
+
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr}
+ VFP_ABI_PUSH
+ ldr r8, [ip] @ IV is 1st arg on the stack
+ mov r2, r2, lsr#4 @ len in 16 byte blocks
+ sub sp, #0x10 @ scratch space to carry over the IV
+ mov r9, sp @ save sp
+
+ ldr r10, [r3, #240] @ get # of rounds
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
+ add r12, #96 @ sifze of bit-slices key schedule
+
+ @ populate the key schedule
+ mov r4, r3 @ pass key
+ mov r5, r10 @ pass # of rounds
+ mov sp, r12 @ sp is sp
+ bl _bsaes_key_convert
+ vldmia sp, {q6}
+ vstmia r12, {q15} @ save last round key
+ veor q7, q7, q6 @ fix up round 0 key
+ vstmia sp, {q7}
+#else
+ ldr r12, [r3, #244]
+ eors r12, #1
+ beq 0f
+
+ @ populate the key schedule
+ str r12, [r3, #244]
+ mov r4, r3 @ pass key
+ mov r5, r10 @ pass # of rounds
+ add r12, r3, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, r3, #248
+ vldmia r4, {q6}
+ vstmia r12, {q15} @ save last round key
+ veor q7, q7, q6 @ fix up round 0 key
+ vstmia r4, {q7}
+
+.align 2
+0:
+#endif
+
+ vld1.8 {q15}, [r8] @ load IV
+ b .Lcbc_dec_loop
+
+.align 4
+.Lcbc_dec_loop:
+ subs r2, r2, #0x8
+ bmi .Lcbc_dec_loop_finish
+
+ vld1.8 {q0-q1}, [r0]! @ load input
+ vld1.8 {q2-q3}, [r0]!
+#ifndef BSAES_ASM_EXTENDED_KEY
+ mov r4, sp @ pass the key
+#else
+ add r4, r3, #248
+#endif
+ vld1.8 {q4-q5}, [r0]!
+ mov r5, r10
+ vld1.8 {q6-q7}, [r0]
+ sub r0, r0, #0x60
+ vstmia r9, {q15} @ put aside IV
+
+ bl _bsaes_decrypt8
+
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10-q11}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q12-q13}, [r0]!
+ veor q4, q4, q10
+ veor q2, q2, q11
+ vld1.8 {q14-q15}, [r0]!
+ veor q7, q7, q12
+ vst1.8 {q0-q1}, [r1]! @ write output
+ veor q3, q3, q13
+ vst1.8 {q6}, [r1]!
+ veor q5, q5, q14
+ vst1.8 {q4}, [r1]!
+ vst1.8 {q2}, [r1]!
+ vst1.8 {q7}, [r1]!
+ vst1.8 {q3}, [r1]!
+ vst1.8 {q5}, [r1]!
+
+ b .Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+ adds r2, r2, #8
+ beq .Lcbc_dec_done
+
+ vld1.8 {q0}, [r0]! @ load input
+ cmp r2, #2
+ blo .Lcbc_dec_one
+ vld1.8 {q1}, [r0]!
+#ifndef BSAES_ASM_EXTENDED_KEY
+ mov r4, sp @ pass the key
+#else
+ add r4, r3, #248
+#endif
+ mov r5, r10
+ vstmia r9, {q15} @ put aside IV
+ beq .Lcbc_dec_two
+ vld1.8 {q2}, [r0]!
+ cmp r2, #4
+ blo .Lcbc_dec_three
+ vld1.8 {q3}, [r0]!
+ beq .Lcbc_dec_four
+ vld1.8 {q4}, [r0]!
+ cmp r2, #6
+ blo .Lcbc_dec_five
+ vld1.8 {q5}, [r0]!
+ beq .Lcbc_dec_six
+ vld1.8 {q6}, [r0]!
+ sub r0, r0, #0x70
+
+ bl _bsaes_decrypt8
+
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10-q11}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q12-q13}, [r0]!
+ veor q4, q4, q10
+ veor q2, q2, q11
+ vld1.8 {q15}, [r0]!
+ veor q7, q7, q12
+ vst1.8 {q0-q1}, [r1]! @ write output
+ veor q3, q3, q13
+ vst1.8 {q6}, [r1]!
+ vst1.8 {q4}, [r1]!
+ vst1.8 {q2}, [r1]!
+ vst1.8 {q7}, [r1]!
+ vst1.8 {q3}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_six:
+ sub r0, r0, #0x60
+ bl _bsaes_decrypt8
+ vldmia r9,{q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10-q11}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q12}, [r0]!
+ veor q4, q4, q10
+ veor q2, q2, q11
+ vld1.8 {q15}, [r0]!
+ veor q7, q7, q12
+ vst1.8 {q0-q1}, [r1]! @ write output
+ vst1.8 {q6}, [r1]!
+ vst1.8 {q4}, [r1]!
+ vst1.8 {q2}, [r1]!
+ vst1.8 {q7}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_five:
+ sub r0, r0, #0x50
+ bl _bsaes_decrypt8
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10-q11}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q15}, [r0]!
+ veor q4, q4, q10
+ vst1.8 {q0-q1}, [r1]! @ write output
+ veor q2, q2, q11
+ vst1.8 {q6}, [r1]!
+ vst1.8 {q4}, [r1]!
+ vst1.8 {q2}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_four:
+ sub r0, r0, #0x40
+ bl _bsaes_decrypt8
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q10}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vld1.8 {q15}, [r0]!
+ veor q4, q4, q10
+ vst1.8 {q0-q1}, [r1]! @ write output
+ vst1.8 {q6}, [r1]!
+ vst1.8 {q4}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_three:
+ sub r0, r0, #0x30
+ bl _bsaes_decrypt8
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8-q9}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q15}, [r0]!
+ veor q1, q1, q8
+ veor q6, q6, q9
+ vst1.8 {q0-q1}, [r1]! @ write output
+ vst1.8 {q6}, [r1]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_two:
+ sub r0, r0, #0x20
+ bl _bsaes_decrypt8
+ vldmia r9, {q14} @ reload IV
+ vld1.8 {q8}, [r0]! @ reload input
+ veor q0, q0, q14 @ ^= IV
+ vld1.8 {q15}, [r0]! @ reload input
+ veor q1, q1, q8
+ vst1.8 {q0-q1}, [r1]! @ write output
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_one:
+ sub r0, r0, #0x10
+ mov r10, r1 @ save original out pointer
+ mov r1, r9 @ use the iv scratch space as out buffer
+ mov r2, r3
+ vmov q4,q15 @ just in case ensure that IV
+ vmov q5,q0 @ and input are preserved
+ bl AES_decrypt
+ vld1.8 {q0}, [r9,:64] @ load result
+ veor q0, q0, q4 @ ^= IV
+ vmov q15, q5 @ q5 holds input
+ vst1.8 {q0}, [r10] @ write output
+
+.Lcbc_dec_done:
+#ifndef BSAES_ASM_EXTENDED_KEY
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+.Lcbc_dec_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r9
+ bne .Lcbc_dec_bzero
+#endif
+
+ mov sp, r9
+ add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
+ vst1.8 {q15}, [r8] @ return IV
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc}
+.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+.extern AES_encrypt
+.global bsaes_ctr32_encrypt_blocks
+.type bsaes_ctr32_encrypt_blocks,%function
+.align 5
+bsaes_ctr32_encrypt_blocks:
+ cmp r2, #8 @ use plain AES for
+ blo .Lctr_enc_short @ small sizes
+
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr}
+ VFP_ABI_PUSH
+ ldr r8, [ip] @ ctr is 1st arg on the stack
+ sub sp, sp, #0x10 @ scratch space to carry over the ctr
+ mov r9, sp @ save sp
+
+ ldr r10, [r3, #240] @ get # of rounds
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
+ add r12, #96 @ size of bit-sliced key schedule
+
+ @ populate the key schedule
+ mov r4, r3 @ pass key
+ mov r5, r10 @ pass # of rounds
+ mov sp, r12 @ sp is sp
+ bl _bsaes_key_convert
+ veor q7,q7,q15 @ fix up last round key
+ vstmia r12, {q7} @ save last round key
+
+ vld1.8 {q0}, [r8] @ load counter
+ add r8, r6, #.LREVM0SR-.LM0 @ borrow r8
+ vldmia sp, {q4} @ load round0 key
+#else
+ ldr r12, [r3, #244]
+ eors r12, #1
+ beq 0f
+
+ @ populate the key schedule
+ str r12, [r3, #244]
+ mov r4, r3 @ pass key
+ mov r5, r10 @ pass # of rounds
+ add r12, r3, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ veor q7,q7,q15 @ fix up last round key
+ vstmia r12, {q7} @ save last round key
+
+.align 2
+0: add r12, r3, #248
+ vld1.8 {q0}, [r8] @ load counter
+ adrl r8, .LREVM0SR @ borrow r8
+ vldmia r12, {q4} @ load round0 key
+ sub sp, #0x10 @ place for adjusted round0 key
+#endif
+
+ vmov.i32 q8,#1 @ compose 1<<96
+ veor q9,q9,q9
+ vrev32.8 q0,q0
+ vext.8 q8,q9,q8,#4
+ vrev32.8 q4,q4
+ vadd.u32 q9,q8,q8 @ compose 2<<96
+ vstmia sp, {q4} @ save adjusted round0 key
+ b .Lctr_enc_loop
+
+.align 4
+.Lctr_enc_loop:
+ vadd.u32 q10, q8, q9 @ compose 3<<96
+ vadd.u32 q1, q0, q8 @ +1
+ vadd.u32 q2, q0, q9 @ +2
+ vadd.u32 q3, q0, q10 @ +3
+ vadd.u32 q4, q1, q10
+ vadd.u32 q5, q2, q10
+ vadd.u32 q6, q3, q10
+ vadd.u32 q7, q4, q10
+ vadd.u32 q10, q5, q10 @ next counter
+
+ @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+ @ to flip byte order in 32-bit counter
+
+ vldmia sp, {q9} @ load round0 key
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x10 @ pass next round key
+#else
+ add r4, r3, #264
+#endif
+ vldmia r8, {q8} @ .LREVM0SR
+ mov r5, r10 @ pass rounds
+ vstmia r9, {q10} @ save next counter
+ sub r6, r8, #.LREVM0SR-.LSR @ pass constants
+
+ bl _bsaes_encrypt8_alt
+
+ subs r2, r2, #8
+ blo .Lctr_enc_loop_done
+
+ vld1.8 {q8-q9}, [r0]! @ load input
+ vld1.8 {q10-q11}, [r0]!
+ veor q0, q8
+ veor q1, q9
+ vld1.8 {q12-q13}, [r0]!
+ veor q4, q10
+ veor q6, q11
+ vld1.8 {q14-q15}, [r0]!
+ veor q3, q12
+ vst1.8 {q0-q1}, [r1]! @ write output
+ veor q7, q13
+ veor q2, q14
+ vst1.8 {q4}, [r1]!
+ veor q5, q15
+ vst1.8 {q6}, [r1]!
+ vmov.i32 q8, #1 @ compose 1<<96
+ vst1.8 {q3}, [r1]!
+ veor q9, q9, q9
+ vst1.8 {q7}, [r1]!
+ vext.8 q8, q9, q8, #4
+ vst1.8 {q2}, [r1]!
+ vadd.u32 q9,q8,q8 @ compose 2<<96
+ vst1.8 {q5}, [r1]!
+ vldmia r9, {q0} @ load counter
+
+ bne .Lctr_enc_loop
+ b .Lctr_enc_done
+
+.align 4
+.Lctr_enc_loop_done:
+ add r2, r2, #8
+ vld1.8 {q8}, [r0]! @ load input
+ veor q0, q8
+ vst1.8 {q0}, [r1]! @ write output
+ cmp r2, #2
+ blo .Lctr_enc_done
+ vld1.8 {q9}, [r0]!
+ veor q1, q9
+ vst1.8 {q1}, [r1]!
+ beq .Lctr_enc_done
+ vld1.8 {q10}, [r0]!
+ veor q4, q10
+ vst1.8 {q4}, [r1]!
+ cmp r2, #4
+ blo .Lctr_enc_done
+ vld1.8 {q11}, [r0]!
+ veor q6, q11
+ vst1.8 {q6}, [r1]!
+ beq .Lctr_enc_done
+ vld1.8 {q12}, [r0]!
+ veor q3, q12
+ vst1.8 {q3}, [r1]!
+ cmp r2, #6
+ blo .Lctr_enc_done
+ vld1.8 {q13}, [r0]!
+ veor q7, q13
+ vst1.8 {q7}, [r1]!
+ beq .Lctr_enc_done
+ vld1.8 {q14}, [r0]
+ veor q2, q14
+ vst1.8 {q2}, [r1]!
+
+.Lctr_enc_done:
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifndef BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r9
+ bne .Lctr_enc_bzero
+#else
+ vstmia sp, {q0-q1}
+#endif
+
+ mov sp, r9
+ add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.align 4
+.Lctr_enc_short:
+ ldr ip, [sp] @ ctr pointer is passed on stack
+ stmdb sp!, {r4-r8, lr}
+
+ mov r4, r0 @ copy arguments
+ mov r5, r1
+ mov r6, r2
+ mov r7, r3
+ ldr r8, [ip, #12] @ load counter LSW
+ vld1.8 {q1}, [ip] @ load whole counter value
+#ifdef __ARMEL__
+ rev r8, r8
+#endif
+ sub sp, sp, #0x10
+ vst1.8 {q1}, [sp,:64] @ copy counter value
+ sub sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+ add r0, sp, #0x10 @ input counter value
+ mov r1, sp @ output on the stack
+ mov r2, r7 @ key
+
+ bl AES_encrypt
+
+ vld1.8 {q0}, [r4]! @ load input
+ vld1.8 {q1}, [sp,:64] @ load encrypted counter
+ add r8, r8, #1
+#ifdef __ARMEL__
+ rev r0, r8
+ str r0, [sp, #0x1c] @ next counter value
+#else
+ str r8, [sp, #0x1c] @ next counter value
+#endif
+ veor q0,q0,q1
+ vst1.8 {q0}, [r5]! @ store output
+ subs r6, r6, #1
+ bne .Lctr_enc_short_loop
+
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+ vstmia sp!, {q0-q1}
+
+ ldmia sp!, {r4-r8, pc}
+.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+.globl bsaes_xts_encrypt
+.type bsaes_xts_encrypt,%function
+.align 4
+bsaes_xts_encrypt:
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr} @ 0x20
+ VFP_ABI_PUSH
+ mov r6, sp @ future r3
+
+ mov r7, r0
+ mov r8, r1
+ mov r9, r2
+ mov r10, r3
+
+ sub r0, sp, #0x10 @ 0x10
+ bic r0, #0xf @ align at 16 bytes
+ mov sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+ ldr r0, [ip] @ pointer to input tweak
+#else
+ @ generate initial tweak
+ ldr r0, [ip, #4] @ iv[]
+ mov r1, sp
+ ldr r2, [ip, #0] @ key2
+ bl AES_encrypt
+ mov r0,sp @ pointer to initial tweak
+#endif
+
+ ldr r1, [r10, #240] @ get # of rounds
+ mov r3, r6
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, r1, lsl#7 @ 128 bytes per inner round key
+ @ add r12, #96 @ size of bit-sliced key schedule
+ sub r12, #48 @ place for tweak[9]
+
+ @ populate the key schedule
+ mov r4, r10 @ pass key
+ mov r5, r1 @ pass # of rounds
+ mov sp, r12
+ add r12, #0x90 @ pass key schedule
+ bl _bsaes_key_convert
+ veor q7, q7, q15 @ fix up last round key
+ vstmia r12, {q7} @ save last round key
+#else
+ ldr r12, [r10, #244]
+ eors r12, #1
+ beq 0f
+
+ str r12, [r10, #244]
+ mov r4, r10 @ pass key
+ mov r5, r1 @ pass # of rounds
+ add r12, r10, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ veor q7, q7, q15 @ fix up last round key
+ vstmia r12, {q7}
+
+.align 2
+0: sub sp, #0x90 @ place for tweak[9]
+#endif
+
+ vld1.8 {q8}, [r0] @ initial tweak
+ adr r2, .Lxts_magic
+
+ subs r9, #0x80
+ blo .Lxts_enc_short
+ b .Lxts_enc_loop
+
+.align 4
+.Lxts_enc_loop:
+ vldmia r2, {q5} @ load XTS magic
+ vshr.s64 q6, q8, #63
+ mov r0, sp
+ vand q6, q6, q5
+ vadd.u64 q9, q8, q8
+ vst1.64 {q8}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q9, #63
+ veor q9, q9, q6
+ vand q7, q7, q5
+ vadd.u64 q10, q9, q9
+ vst1.64 {q9}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q10, #63
+ veor q10, q10, q7
+ vand q6, q6, q5
+ vld1.8 {q0}, [r7]!
+ vadd.u64 q11, q10, q10
+ vst1.64 {q10}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q11, #63
+ veor q11, q11, q6
+ vand q7, q7, q5
+ vld1.8 {q1}, [r7]!
+ veor q0, q0, q8
+ vadd.u64 q12, q11, q11
+ vst1.64 {q11}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q12, #63
+ veor q12, q12, q7
+ vand q6, q6, q5
+ vld1.8 {q2}, [r7]!
+ veor q1, q1, q9
+ vadd.u64 q13, q12, q12
+ vst1.64 {q12}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q13, #63
+ veor q13, q13, q6
+ vand q7, q7, q5
+ vld1.8 {q3}, [r7]!
+ veor q2, q2, q10
+ vadd.u64 q14, q13, q13
+ vst1.64 {q13}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q14, #63
+ veor q14, q14, q7
+ vand q6, q6, q5
+ vld1.8 {q4}, [r7]!
+ veor q3, q3, q11
+ vadd.u64 q15, q14, q14
+ vst1.64 {q14}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q15, #63
+ veor q15, q15, q6
+ vand q7, q7, q5
+ vld1.8 {q5}, [r7]!
+ veor q4, q4, q12
+ vadd.u64 q8, q15, q15
+ vst1.64 {q15}, [r0,:128]!
+ vswp d15,d14
+ veor q8, q8, q7
+ vst1.64 {q8}, [r0,:128] @ next round tweak
+
+ vld1.8 {q6-q7}, [r7]!
+ veor q5, q5, q13
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q6, q6, q14
+ mov r5, r1 @ pass rounds
+ veor q7, q7, q15
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ vld1.64 {q14-q15}, [r0,:128]!
+ veor q10, q3, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ veor q12, q2, q14
+ vst1.8 {q10-q11}, [r8]!
+ veor q13, q5, q15
+ vst1.8 {q12-q13}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+
+ subs r9, #0x80
+ bpl .Lxts_enc_loop
+
+.Lxts_enc_short:
+ adds r9, #0x70
+ bmi .Lxts_enc_done
+
+ vldmia r2, {q5} @ load XTS magic
+ vshr.s64 q7, q8, #63
+ mov r0, sp
+ vand q7, q7, q5
+ vadd.u64 q9, q8, q8
+ vst1.64 {q8}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q9, #63
+ veor q9, q9, q7
+ vand q6, q6, q5
+ vadd.u64 q10, q9, q9
+ vst1.64 {q9}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q10, #63
+ veor q10, q10, q6
+ vand q7, q7, q5
+ vld1.8 {q0}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_1
+ vadd.u64 q11, q10, q10
+ vst1.64 {q10}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q11, #63
+ veor q11, q11, q7
+ vand q6, q6, q5
+ vld1.8 {q1}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_2
+ veor q0, q0, q8
+ vadd.u64 q12, q11, q11
+ vst1.64 {q11}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q12, #63
+ veor q12, q12, q6
+ vand q7, q7, q5
+ vld1.8 {q2}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_3
+ veor q1, q1, q9
+ vadd.u64 q13, q12, q12
+ vst1.64 {q12}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q13, #63
+ veor q13, q13, q7
+ vand q6, q6, q5
+ vld1.8 {q3}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_4
+ veor q2, q2, q10
+ vadd.u64 q14, q13, q13
+ vst1.64 {q13}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q14, #63
+ veor q14, q14, q6
+ vand q7, q7, q5
+ vld1.8 {q4}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_5
+ veor q3, q3, q11
+ vadd.u64 q15, q14, q14
+ vst1.64 {q14}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q15, #63
+ veor q15, q15, q7
+ vand q6, q6, q5
+ vld1.8 {q5}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_enc_6
+ veor q4, q4, q12
+ sub r9, #0x10
+ vst1.64 {q15}, [r0,:128] @ next round tweak
+
+ vld1.8 {q6}, [r7]!
+ veor q5, q5, q13
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q6, q6, q14
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ vld1.64 {q14}, [r0,:128]!
+ veor q10, q3, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ veor q12, q2, q14
+ vst1.8 {q10-q11}, [r8]!
+ vst1.8 {q12}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_6:
+ vst1.64 {q14}, [r0,:128] @ next round tweak
+
+ veor q4, q4, q12
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q5, q5, q13
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ veor q10, q3, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ vst1.8 {q10-q11}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align 5
+.Lxts_magic:
+ .quad 1, 0x87
+
+.align 5
+.Lxts_enc_5:
+ vst1.64 {q13}, [r0,:128] @ next round tweak
+
+ veor q3, q3, q11
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q4, q4, q12
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ veor q10, q3, q12
+ vst1.8 {q8-q9}, [r8]!
+ vst1.8 {q10}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_4:
+ vst1.64 {q12}, [r0,:128] @ next round tweak
+
+ veor q2, q2, q10
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q3, q3, q11
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q6, q11
+ vst1.8 {q8-q9}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_3:
+ vst1.64 {q11}, [r0,:128] @ next round tweak
+
+ veor q1, q1, q9
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q2, q2, q10
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ veor q8, q4, q10
+ vst1.8 {q0-q1}, [r8]!
+ vst1.8 {q8}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_2:
+ vst1.64 {q10}, [r0,:128] @ next round tweak
+
+ veor q0, q0, q8
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q1, q1, q9
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ vst1.8 {q0-q1}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_1:
+ mov r0, sp
+ veor q0, q8
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+ mov r4, r3 @ preserve fp
+
+ bl AES_encrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q8
+ vst1.8 {q0}, [r8]!
+ mov r3, r4
+
+ vmov q8, q9 @ next round tweak
+
+.Lxts_enc_done:
+#ifndef XTS_CHAIN_TWEAK
+ adds r9, #0x10
+ beq .Lxts_enc_ret
+ sub r6, r8, #0x10
+
+.Lxts_enc_steal:
+ ldrb r0, [r7], #1
+ ldrb r1, [r8, #-0x10]
+ strb r0, [r8, #-0x10]
+ strb r1, [r8], #1
+
+ subs r9, #1
+ bhi .Lxts_enc_steal
+
+ vld1.8 {q0}, [r6]
+ mov r0, sp
+ veor q0, q0, q8
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+ mov r4, r3 @ preserve fp
+
+ bl AES_encrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q8
+ vst1.8 {q0}, [r6]
+ mov r3, r4
+#endif
+
+.Lxts_enc_ret:
+ bic r0, r3, #0xf
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifdef XTS_CHAIN_TWEAK
+ ldr r1, [r3, #0x20+VFP_ABI_FRAME] @ chain tweak
+#endif
+.Lxts_enc_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r0
+ bne .Lxts_enc_bzero
+
+ mov sp, r3
+#ifdef XTS_CHAIN_TWEAK
+ vst1.8 {q8}, [r1]
+#endif
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type bsaes_xts_decrypt,%function
+.align 4
+bsaes_xts_decrypt:
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr} @ 0x20
+ VFP_ABI_PUSH
+ mov r6, sp @ future r3
+
+ mov r7, r0
+ mov r8, r1
+ mov r9, r2
+ mov r10, r3
+
+ sub r0, sp, #0x10 @ 0x10
+ bic r0, #0xf @ align at 16 bytes
+ mov sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+ ldr r0, [ip] @ pointer to input tweak
+#else
+ @ generate initial tweak
+ ldr r0, [ip, #4] @ iv[]
+ mov r1, sp
+ ldr r2, [ip, #0] @ key2
+ bl AES_encrypt
+ mov r0, sp @ pointer to initial tweak
+#endif
+
+ ldr r1, [r10, #240] @ get # of rounds
+ mov r3, r6
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, r1, lsl#7 @ 128 bytes per inner round key
+ @ add r12, #96 @ size of bit-sliced key schedule
+ sub r12, #48 @ place for tweak[9]
+
+ @ populate the key schedule
+ mov r4, r10 @ pass key
+ mov r5, r1 @ pass # of rounds
+ mov sp, r12
+ add r12, #0x90 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, sp, #0x90
+ vldmia r4, {q6}
+ vstmia r12, {q15} @ save last round key
+ veor q7, q7, q6 @ fix up round 0 key
+ vstmia r4, {q7}
+#else
+ ldr r12, [r10, #244]
+ eors r12, #1
+ beq 0f
+
+ str r12, [r10, #244]
+ mov r4, r10 @ pass key
+ mov r5, r1 @ pass # of rounds
+ add r12, r10, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, r10, #248
+ vldmia r4, {q6}
+ vstmia r12, {q15} @ save last round key
+ veor q7, q7, q6 @ fix up round 0 key
+ vstmia r4, {q7}
+
+.align 2
+0: sub sp, #0x90 @ place for tweak[9]
+#endif
+ vld1.8 {q8}, [r0] @ initial tweak
+ adr r2, .Lxts_magic
+
+ tst r9, #0xf @ if not multiple of 16
+ it ne @ Thumb2 thing, sanity check in ARM
+ subne r9, #0x10 @ subtract another 16 bytes
+ subs r9, #0x80
+
+ blo .Lxts_dec_short
+ b .Lxts_dec_loop
+
+.align 4
+.Lxts_dec_loop:
+ vldmia r2, {q5} @ load XTS magic
+ vshr.s64 q6, q8, #63
+ mov r0, sp
+ vand q6, q6, q5
+ vadd.u64 q9, q8, q8
+ vst1.64 {q8}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q9, #63
+ veor q9, q9, q6
+ vand q7, q7, q5
+ vadd.u64 q10, q9, q9
+ vst1.64 {q9}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q10, #63
+ veor q10, q10, q7
+ vand q6, q6, q5
+ vld1.8 {q0}, [r7]!
+ vadd.u64 q11, q10, q10
+ vst1.64 {q10}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q11, #63
+ veor q11, q11, q6
+ vand q7, q7, q5
+ vld1.8 {q1}, [r7]!
+ veor q0, q0, q8
+ vadd.u64 q12, q11, q11
+ vst1.64 {q11}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q12, #63
+ veor q12, q12, q7
+ vand q6, q6, q5
+ vld1.8 {q2}, [r7]!
+ veor q1, q1, q9
+ vadd.u64 q13, q12, q12
+ vst1.64 {q12}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q13, #63
+ veor q13, q13, q6
+ vand q7, q7, q5
+ vld1.8 {q3}, [r7]!
+ veor q2, q2, q10
+ vadd.u64 q14, q13, q13
+ vst1.64 {q13}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q14, #63
+ veor q14, q14, q7
+ vand q6, q6, q5
+ vld1.8 {q4}, [r7]!
+ veor q3, q3, q11
+ vadd.u64 q15, q14, q14
+ vst1.64 {q14}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q15, #63
+ veor q15, q15, q6
+ vand q7, q7, q5
+ vld1.8 {q5}, [r7]!
+ veor q4, q4, q12
+ vadd.u64 q8, q15, q15
+ vst1.64 {q15}, [r0,:128]!
+ vswp d15,d14
+ veor q8, q8, q7
+ vst1.64 {q8}, [r0,:128] @ next round tweak
+
+ vld1.8 {q6-q7}, [r7]!
+ veor q5, q5, q13
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q6, q6, q14
+ mov r5, r1 @ pass rounds
+ veor q7, q7, q15
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ vld1.64 {q14-q15}, [r0,:128]!
+ veor q10, q2, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ veor q12, q3, q14
+ vst1.8 {q10-q11}, [r8]!
+ veor q13, q5, q15
+ vst1.8 {q12-q13}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+
+ subs r9, #0x80
+ bpl .Lxts_dec_loop
+
+.Lxts_dec_short:
+ adds r9, #0x70
+ bmi .Lxts_dec_done
+
+ vldmia r2, {q5} @ load XTS magic
+ vshr.s64 q7, q8, #63
+ mov r0, sp
+ vand q7, q7, q5
+ vadd.u64 q9, q8, q8
+ vst1.64 {q8}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q9, #63
+ veor q9, q9, q7
+ vand q6, q6, q5
+ vadd.u64 q10, q9, q9
+ vst1.64 {q9}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q10, #63
+ veor q10, q10, q6
+ vand q7, q7, q5
+ vld1.8 {q0}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_1
+ vadd.u64 q11, q10, q10
+ vst1.64 {q10}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q11, #63
+ veor q11, q11, q7
+ vand q6, q6, q5
+ vld1.8 {q1}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_2
+ veor q0, q0, q8
+ vadd.u64 q12, q11, q11
+ vst1.64 {q11}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q12, #63
+ veor q12, q12, q6
+ vand q7, q7, q5
+ vld1.8 {q2}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_3
+ veor q1, q1, q9
+ vadd.u64 q13, q12, q12
+ vst1.64 {q12}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q13, #63
+ veor q13, q13, q7
+ vand q6, q6, q5
+ vld1.8 {q3}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_4
+ veor q2, q2, q10
+ vadd.u64 q14, q13, q13
+ vst1.64 {q13}, [r0,:128]!
+ vswp d13,d12
+ vshr.s64 q7, q14, #63
+ veor q14, q14, q6
+ vand q7, q7, q5
+ vld1.8 {q4}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_5
+ veor q3, q3, q11
+ vadd.u64 q15, q14, q14
+ vst1.64 {q14}, [r0,:128]!
+ vswp d15,d14
+ vshr.s64 q6, q15, #63
+ veor q15, q15, q7
+ vand q6, q6, q5
+ vld1.8 {q5}, [r7]!
+ subs r9, #0x10
+ bmi .Lxts_dec_6
+ veor q4, q4, q12
+ sub r9, #0x10
+ vst1.64 {q15}, [r0,:128] @ next round tweak
+
+ vld1.8 {q6}, [r7]!
+ veor q5, q5, q13
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q6, q6, q14
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ vld1.64 {q14}, [r0,:128]!
+ veor q10, q2, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ veor q12, q3, q14
+ vst1.8 {q10-q11}, [r8]!
+ vst1.8 {q12}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_6:
+ vst1.64 {q14}, [r0,:128] @ next round tweak
+
+ veor q4, q4, q12
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q5, q5, q13
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12-q13}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ veor q10, q2, q12
+ vst1.8 {q8-q9}, [r8]!
+ veor q11, q7, q13
+ vst1.8 {q10-q11}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_5:
+ vst1.64 {q13}, [r0,:128] @ next round tweak
+
+ veor q3, q3, q11
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q4, q4, q12
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ vld1.64 {q12}, [r0,:128]!
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ veor q10, q2, q12
+ vst1.8 {q8-q9}, [r8]!
+ vst1.8 {q10}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_4:
+ vst1.64 {q12}, [r0,:128] @ next round tweak
+
+ veor q2, q2, q10
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q3, q3, q11
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10-q11}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ veor q9, q4, q11
+ vst1.8 {q8-q9}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_3:
+ vst1.64 {q11}, [r0,:128] @ next round tweak
+
+ veor q1, q1, q9
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q2, q2, q10
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ vld1.64 {q10}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ veor q8, q6, q10
+ vst1.8 {q0-q1}, [r8]!
+ vst1.8 {q8}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_2:
+ vst1.64 {q10}, [r0,:128] @ next round tweak
+
+ veor q0, q0, q8
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, r10, #248 @ pass key schedule
+#endif
+ veor q1, q1, q9
+ mov r5, r1 @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {q8-q9}, [r0,:128]!
+ veor q0, q0, q8
+ veor q1, q1, q9
+ vst1.8 {q0-q1}, [r8]!
+
+ vld1.64 {q8}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_1:
+ mov r0, sp
+ veor q0, q8
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+ mov r4, r3 @ preserve fp
+ mov r5, r2 @ preserve magic
+
+ bl AES_decrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q8
+ vst1.8 {q0}, [r8]!
+ mov r3, r4
+ mov r2, r5
+
+ vmov q8, q9 @ next round tweak
+
+.Lxts_dec_done:
+#ifndef XTS_CHAIN_TWEAK
+ adds r9, #0x10
+ beq .Lxts_dec_ret
+
+ @ calculate one round of extra tweak for the stolen ciphertext
+ vldmia r2, {q5}
+ vshr.s64 q6, q8, #63
+ vand q6, q6, q5
+ vadd.u64 q9, q8, q8
+ vswp d13,d12
+ veor q9, q9, q6
+
+ @ perform the final decryption with the last tweak value
+ vld1.8 {q0}, [r7]!
+ mov r0, sp
+ veor q0, q0, q9
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+ mov r4, r3 @ preserve fp
+
+ bl AES_decrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q9
+ vst1.8 {q0}, [r8]
+
+ mov r6, r8
+.Lxts_dec_steal:
+ ldrb r1, [r8]
+ ldrb r0, [r7], #1
+ strb r1, [r8, #0x10]
+ strb r0, [r8], #1
+
+ subs r9, #1
+ bhi .Lxts_dec_steal
+
+ vld1.8 {q0}, [r6]
+ mov r0, sp
+ veor q0, q8
+ mov r1, sp
+ vst1.8 {q0}, [sp,:128]
+ mov r2, r10
+
+ bl AES_decrypt
+
+ vld1.8 {q0}, [sp,:128]
+ veor q0, q0, q8
+ vst1.8 {q0}, [r6]
+ mov r3, r4
+#endif
+
+.Lxts_dec_ret:
+ bic r0, r3, #0xf
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifdef XTS_CHAIN_TWEAK
+ ldr r1, [r3, #0x20+VFP_ABI_FRAME] @ chain tweak
+#endif
+.Lxts_dec_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r0
+ bne .Lxts_dec_bzero
+
+ mov sp, r3
+#ifdef XTS_CHAIN_TWEAK
+ vst1.8 {q8}, [r1]
+#endif
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
+#endif
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
new file mode 100644
index 000000000000..4522366da759
--- /dev/null
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -0,0 +1,434 @@
+/*
+ * linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <crypto/aes.h>
+#include <crypto/ablk_helper.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+#include "aes_glue.h"
+
+#define BIT_SLICED_KEY_MAXSIZE (128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE)
+
+struct BS_KEY {
+ struct AES_KEY rk;
+ int converted;
+ u8 __aligned(8) bs[BIT_SLICED_KEY_MAXSIZE];
+} __aligned(8);
+
+asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in);
+asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in);
+
+asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes,
+ struct BS_KEY *key, u8 iv[]);
+
+asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks,
+ struct BS_KEY *key, u8 const iv[]);
+
+asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes,
+ struct BS_KEY *key, u8 tweak[]);
+
+asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes,
+ struct BS_KEY *key, u8 tweak[]);
+
+struct aesbs_cbc_ctx {
+ struct AES_KEY enc;
+ struct BS_KEY dec;
+};
+
+struct aesbs_ctr_ctx {
+ struct BS_KEY enc;
+};
+
+struct aesbs_xts_ctx {
+ struct BS_KEY enc;
+ struct BS_KEY dec;
+ struct AES_KEY twkey;
+};
+
+static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ int bits = key_len * 8;
+
+ if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ ctx->dec.rk = ctx->enc;
+ private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+ ctx->dec.converted = 0;
+ return 0;
+}
+
+static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+ int bits = key_len * 8;
+
+ if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ ctx->enc.converted = 0;
+ return 0;
+}
+
+static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+ int bits = key_len * 4;
+
+ if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+ ctx->dec.rk = ctx->enc.rk;
+ private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+ private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey);
+ ctx->enc.converted = ctx->dec.converted = 0;
+ return 0;
+}
+
+static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while (walk.nbytes) {
+ u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+ u8 *src = walk.src.virt.addr;
+
+ if (walk.dst.virt.addr == walk.src.virt.addr) {
+ u8 *iv = walk.iv;
+
+ do {
+ crypto_xor(src, iv, AES_BLOCK_SIZE);
+ AES_encrypt(src, src, &ctx->enc);
+ iv = src;
+ src += AES_BLOCK_SIZE;
+ } while (--blocks);
+ memcpy(walk.iv, iv, AES_BLOCK_SIZE);
+ } else {
+ u8 *dst = walk.dst.virt.addr;
+
+ do {
+ crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
+ AES_encrypt(walk.iv, dst, &ctx->enc);
+ memcpy(walk.iv, dst, AES_BLOCK_SIZE);
+ src += AES_BLOCK_SIZE;
+ dst += AES_BLOCK_SIZE;
+ } while (--blocks);
+ }
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+ while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
+ kernel_neon_begin();
+ bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, &ctx->dec, walk.iv);
+ kernel_neon_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ while (walk.nbytes) {
+ u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+ u8 *dst = walk.dst.virt.addr;
+ u8 *src = walk.src.virt.addr;
+ u8 bk[2][AES_BLOCK_SIZE];
+ u8 *iv = walk.iv;
+
+ do {
+ if (walk.dst.virt.addr == walk.src.virt.addr)
+ memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
+
+ AES_decrypt(src, dst, &ctx->dec.rk);
+ crypto_xor(dst, iv, AES_BLOCK_SIZE);
+
+ if (walk.dst.virt.addr == walk.src.virt.addr)
+ iv = bk[blocks & 1];
+ else
+ iv = src;
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ } while (--blocks);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static void inc_be128_ctr(__be32 ctr[], u32 addend)
+{
+ int i;
+
+ for (i = 3; i >= 0; i--, addend = 1) {
+ u32 n = be32_to_cpu(ctr[i]) + addend;
+
+ ctr[i] = cpu_to_be32(n);
+ if (n >= addend)
+ break;
+ }
+}
+
+static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ u32 blocks;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+ while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
+ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+ __be32 *ctr = (__be32 *)walk.iv;
+ u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);
+
+ /* avoid 32 bit counter overflow in the NEON code */
+ if (unlikely(headroom < blocks)) {
+ blocks = headroom + 1;
+ tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
+ }
+ kernel_neon_begin();
+ bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
+ walk.dst.virt.addr, blocks,
+ &ctx->enc, walk.iv);
+ kernel_neon_end();
+ inc_be128_ctr(ctr, blocks);
+
+ nbytes -= blocks * AES_BLOCK_SIZE;
+ if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
+ break;
+
+ err = blkcipher_walk_done(desc, &walk, tail);
+ }
+ if (walk.nbytes) {
+ u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+ u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ u8 ks[AES_BLOCK_SIZE];
+
+ AES_encrypt(walk.iv, ks, &ctx->enc.rk);
+ if (tdst != tsrc)
+ memcpy(tdst, tsrc, nbytes);
+ crypto_xor(tdst, ks, nbytes);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+ /* generate the initial tweak */
+ AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+ while (walk.nbytes) {
+ kernel_neon_begin();
+ bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, &ctx->enc, walk.iv);
+ kernel_neon_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+ /* generate the initial tweak */
+ AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+ while (walk.nbytes) {
+ kernel_neon_begin();
+ bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ walk.nbytes, &ctx->dec, walk.iv);
+ kernel_neon_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ return err;
+}
+
+static struct crypto_alg aesbs_algs[] = { {
+ .cra_name = "__cbc-aes-neonbs",
+ .cra_driver_name = "__driver-cbc-aes-neonbs",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aesbs_cbc_set_key,
+ .encrypt = aesbs_cbc_encrypt,
+ .decrypt = aesbs_cbc_decrypt,
+ },
+}, {
+ .cra_name = "__ctr-aes-neonbs",
+ .cra_driver_name = "__driver-ctr-aes-neonbs",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aesbs_ctr_set_key,
+ .encrypt = aesbs_ctr_encrypt,
+ .decrypt = aesbs_ctr_encrypt,
+ },
+}, {
+ .cra_name = "__xts-aes-neonbs",
+ .cra_driver_name = "__driver-xts-aes-neonbs",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aesbs_xts_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aesbs_xts_set_key,
+ .encrypt = aesbs_xts_encrypt,
+ .decrypt = aesbs_xts_decrypt,
+ },
+}, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-neonbs",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = __ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-neonbs",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+}, {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-neonbs",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct async_helper_ctx),
+ .cra_alignmask = 7,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = ablk_init,
+ .cra_exit = ablk_exit,
+ .cra_ablkcipher = {
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ }
+} };
+
+static int __init aesbs_mod_init(void)
+{
+ if (!cpu_has_neon())
+ return -ENODEV;
+
+ return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+static void __exit aesbs_mod_exit(void)
+{
+ crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+module_init(aesbs_mod_init);
+module_exit(aesbs_mod_exit);
+
+MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
new file mode 100644
index 000000000000..f3d96d932573
--- /dev/null
+++ b/arch/arm/crypto/bsaes-armv7.pl
@@ -0,0 +1,2467 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+# <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+# granted.
+# ====================================================================
+
+# Bit-sliced AES for ARM NEON
+#
+# February 2012.
+#
+# This implementation is direct adaptation of bsaes-x86_64 module for
+# ARM NEON. Except that this module is endian-neutral [in sense that
+# it can be compiled for either endianness] by courtesy of vld1.8's
+# neutrality. Initial version doesn't implement interface to OpenSSL,
+# only low-level primitives and unsupported entry points, just enough
+# to collect performance results, which for Cortex-A8 core are:
+#
+# encrypt 19.5 cycles per byte processed with 128-bit key
+# decrypt 22.1 cycles per byte processed with 128-bit key
+# key conv. 440 cycles per 128-bit key/0.18 of 8x block
+#
+# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+# which is [much] worse than anticipated (for further details see
+# http://www.openssl.org/~appro/Snapdragon-S4.html).
+#
+# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+# manages in 20.0 cycles].
+#
+# When comparing to x86_64 results keep in mind that NEON unit is
+# [mostly] single-issue and thus can't [fully] benefit from
+# instruction-level parallelism. And when comparing to aes-armv4
+# results keep in mind key schedule conversion overhead (see
+# bsaes-x86_64.pl for further details)...
+#
+# <appro@openssl.org>
+
+# April-August 2013
+#
+# Add CBC, CTR and XTS subroutines, adapt for kernel use.
+#
+# <ard.biesheuvel@linaro.org>
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+my ($inp,$out,$len,$key)=("r0","r1","r2","r3");
+my @XMM=map("q$_",(0..15));
+
+{
+my ($key,$rounds,$const)=("r4","r5","r6");
+
+sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
+sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
+
+sub Sbox {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+ &InBasisChange (@b);
+ &Inv_GF256 (@b[6,5,0,3,7,1,4,2],@t,@s);
+ &OutBasisChange (@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb
+my @b=@_[0..7];
+$code.=<<___;
+ veor @b[2], @b[2], @b[1]
+ veor @b[5], @b[5], @b[6]
+ veor @b[3], @b[3], @b[0]
+ veor @b[6], @b[6], @b[2]
+ veor @b[5], @b[5], @b[0]
+
+ veor @b[6], @b[6], @b[3]
+ veor @b[3], @b[3], @b[7]
+ veor @b[7], @b[7], @b[5]
+ veor @b[3], @b[3], @b[4]
+ veor @b[4], @b[4], @b[5]
+
+ veor @b[2], @b[2], @b[7]
+ veor @b[3], @b[3], @b[1]
+ veor @b[1], @b[1], @b[5]
+___
+}
+
+sub OutBasisChange {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+ veor @b[0], @b[0], @b[6]
+ veor @b[1], @b[1], @b[4]
+ veor @b[4], @b[4], @b[6]
+ veor @b[2], @b[2], @b[0]
+ veor @b[6], @b[6], @b[1]
+
+ veor @b[1], @b[1], @b[5]
+ veor @b[5], @b[5], @b[3]
+ veor @b[3], @b[3], @b[7]
+ veor @b[7], @b[7], @b[5]
+ veor @b[2], @b[2], @b[5]
+
+ veor @b[4], @b[4], @b[7]
+___
+}
+
+sub InvSbox {
+# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+ &InvInBasisChange (@b);
+ &Inv_GF256 (@b[5,1,2,6,3,7,0,4],@t,@s);
+ &InvOutBasisChange (@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange { # OutBasisChange in reverse (with twist)
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+ veor @b[1], @b[1], @b[7]
+ veor @b[4], @b[4], @b[7]
+
+ veor @b[7], @b[7], @b[5]
+ veor @b[1], @b[1], @b[3]
+ veor @b[2], @b[2], @b[5]
+ veor @b[3], @b[3], @b[7]
+
+ veor @b[6], @b[6], @b[1]
+ veor @b[2], @b[2], @b[0]
+ veor @b[5], @b[5], @b[3]
+ veor @b[4], @b[4], @b[6]
+ veor @b[0], @b[0], @b[6]
+ veor @b[1], @b[1], @b[4]
+___
+}
+
+sub InvOutBasisChange { # InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+ veor @b[1], @b[1], @b[5]
+ veor @b[2], @b[2], @b[7]
+
+ veor @b[3], @b[3], @b[1]
+ veor @b[4], @b[4], @b[5]
+ veor @b[7], @b[7], @b[5]
+ veor @b[3], @b[3], @b[4]
+ veor @b[5], @b[5], @b[0]
+ veor @b[3], @b[3], @b[7]
+ veor @b[6], @b[6], @b[2]
+ veor @b[2], @b[2], @b[1]
+ veor @b[6], @b[6], @b[3]
+
+ veor @b[3], @b[3], @b[0]
+ veor @b[5], @b[5], @b[6]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0,$t1)=@_;
+$code.=<<___;
+ veor $t0, $y0, $y1
+ vand $t0, $t0, $x0
+ veor $x0, $x0, $x1
+ vand $t1, $x1, $y0
+ vand $x0, $x0, $y1
+ veor $x1, $t1, $t0
+ veor $x0, $x0, $t1
+___
+}
+
+sub Mul_GF4_N { # not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+ veor $t0, $y0, $y1
+ vand $t0, $t0, $x0
+ veor $x0, $x0, $x1
+ vand $x1, $x1, $y0
+ vand $x0, $x0, $y1
+ veor $x1, $x1, $x0
+ veor $x0, $x0, $t0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+ $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+ veor $t0, $y0, $y1
+ veor $t1, $y2, $y3
+ vand $t0, $t0, $x0
+ vand $t1, $t1, $x2
+ veor $x0, $x0, $x1
+ veor $x2, $x2, $x3
+ vand $x1, $x1, $y0
+ vand $x3, $x3, $y2
+ vand $x0, $x0, $y1
+ vand $x2, $x2, $y3
+ veor $x1, $x1, $x0
+ veor $x2, $x2, $x3
+ veor $x0, $x0, $t0
+ veor $x3, $x3, $t1
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+ veor @t[0], @x[0], @x[2]
+ veor @t[1], @x[1], @x[3]
+___
+ &Mul_GF4 (@x[0], @x[1], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+ veor @y[0], @y[0], @y[2]
+ veor @y[1], @y[1], @y[3]
+___
+ Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
+ @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+ veor @x[0], @x[0], @t[0]
+ veor @x[2], @x[2], @t[0]
+ veor @x[1], @x[1], @t[1]
+ veor @x[3], @x[3], @t[1]
+
+ veor @t[0], @x[4], @x[6]
+ veor @t[1], @x[5], @x[7]
+___
+ &Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
+ @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+ veor @y[0], @y[0], @y[2]
+ veor @y[1], @y[1], @y[3]
+___
+ &Mul_GF4 (@x[4], @x[5], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+ veor @x[4], @x[4], @t[0]
+ veor @x[6], @x[6], @t[0]
+ veor @x[5], @x[5], @t[1]
+ veor @x[7], @x[7], @t[1]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144) *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+ veor @t[3], @x[4], @x[6]
+ veor @t[2], @x[5], @x[7]
+ veor @t[1], @x[1], @x[3]
+ veor @s[1], @x[7], @x[6]
+ vmov @t[0], @t[2]
+ veor @s[0], @x[0], @x[2]
+
+ vorr @t[2], @t[2], @t[1]
+ veor @s[3], @t[3], @t[0]
+ vand @s[2], @t[3], @s[0]
+ vorr @t[3], @t[3], @s[0]
+ veor @s[0], @s[0], @t[1]
+ vand @t[0], @t[0], @t[1]
+ veor @t[1], @x[3], @x[2]
+ vand @s[3], @s[3], @s[0]
+ vand @s[1], @s[1], @t[1]
+ veor @t[1], @x[4], @x[5]
+ veor @s[0], @x[1], @x[0]
+ veor @t[3], @t[3], @s[1]
+ veor @t[2], @t[2], @s[1]
+ vand @s[1], @t[1], @s[0]
+ vorr @t[1], @t[1], @s[0]
+ veor @t[3], @t[3], @s[3]
+ veor @t[0], @t[0], @s[1]
+ veor @t[2], @t[2], @s[2]
+ veor @t[1], @t[1], @s[3]
+ veor @t[0], @t[0], @s[2]
+ vand @s[0], @x[7], @x[3]
+ veor @t[1], @t[1], @s[2]
+ vand @s[1], @x[6], @x[2]
+ vand @s[2], @x[5], @x[1]
+ vorr @s[3], @x[4], @x[0]
+ veor @t[3], @t[3], @s[0]
+ veor @t[1], @t[1], @s[2]
+ veor @t[0], @t[0], @s[3]
+ veor @t[2], @t[2], @s[1]
+
+ @ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+ @ new smaller inversion
+
+ vand @s[2], @t[3], @t[1]
+ vmov @s[0], @t[0]
+
+ veor @s[1], @t[2], @s[2]
+ veor @s[3], @t[0], @s[2]
+ veor @s[2], @t[0], @s[2] @ @s[2]=@s[3]
+
+ vbsl @s[1], @t[1], @t[0]
+ vbsl @s[3], @t[3], @t[2]
+ veor @t[3], @t[3], @t[2]
+
+ vbsl @s[0], @s[1], @s[2]
+ vbsl @t[0], @s[2], @s[1]
+
+ vand @s[2], @s[0], @s[3]
+ veor @t[1], @t[1], @t[0]
+
+ veor @s[2], @s[2], @t[3]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+ &Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my @t=@_[8..11];
+my $mask=pop;
+$code.=<<___;
+ vldmia $key!, {@t[0]-@t[3]}
+ veor @t[0], @t[0], @x[0]
+ veor @t[1], @t[1], @x[1]
+ vtbl.8 `&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)`
+ vldmia $key!, {@t[0]}
+ veor @t[2], @t[2], @x[2]
+ vtbl.8 `&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)`
+ vldmia $key!, {@t[1]}
+ veor @t[3], @t[3], @x[3]
+ vtbl.8 `&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)`
+ vldmia $key!, {@t[2]}
+ vtbl.8 `&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)`
+ vldmia $key!, {@t[3]}
+ veor @t[0], @t[0], @x[4]
+ veor @t[1], @t[1], @x[5]
+ vtbl.8 `&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)`
+ veor @t[2], @t[2], @x[6]
+ vtbl.8 `&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)`
+ veor @t[3], @t[3], @x[7]
+ vtbl.8 `&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)`
+ vtbl.8 `&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)`
+ vtbl.8 `&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)`
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+my $inv=@_[16]; # optional
+$code.=<<___;
+ vext.8 @t[0], @x[0], @x[0], #12 @ x0 <<< 32
+ vext.8 @t[1], @x[1], @x[1], #12
+ veor @x[0], @x[0], @t[0] @ x0 ^ (x0 <<< 32)
+ vext.8 @t[2], @x[2], @x[2], #12
+ veor @x[1], @x[1], @t[1]
+ vext.8 @t[3], @x[3], @x[3], #12
+ veor @x[2], @x[2], @t[2]
+ vext.8 @t[4], @x[4], @x[4], #12
+ veor @x[3], @x[3], @t[3]
+ vext.8 @t[5], @x[5], @x[5], #12
+ veor @x[4], @x[4], @t[4]
+ vext.8 @t[6], @x[6], @x[6], #12
+ veor @x[5], @x[5], @t[5]
+ vext.8 @t[7], @x[7], @x[7], #12
+ veor @x[6], @x[6], @t[6]
+
+ veor @t[1], @t[1], @x[0]
+ veor @x[7], @x[7], @t[7]
+ vext.8 @x[0], @x[0], @x[0], #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
+ veor @t[2], @t[2], @x[1]
+ veor @t[0], @t[0], @x[7]
+ veor @t[1], @t[1], @x[7]
+ vext.8 @x[1], @x[1], @x[1], #8
+ veor @t[5], @t[5], @x[4]
+ veor @x[0], @x[0], @t[0]
+ veor @t[6], @t[6], @x[5]
+ veor @x[1], @x[1], @t[1]
+ vext.8 @t[0], @x[4], @x[4], #8
+ veor @t[4], @t[4], @x[3]
+ vext.8 @t[1], @x[5], @x[5], #8
+ veor @t[7], @t[7], @x[6]
+ vext.8 @x[4], @x[3], @x[3], #8
+ veor @t[3], @t[3], @x[2]
+ vext.8 @x[5], @x[7], @x[7], #8
+ veor @t[4], @t[4], @x[7]
+ vext.8 @x[3], @x[6], @x[6], #8
+ veor @t[3], @t[3], @x[7]
+ vext.8 @x[6], @x[2], @x[2], #8
+ veor @x[7], @t[1], @t[5]
+___
+$code.=<<___ if (!$inv);
+ veor @x[2], @t[0], @t[4]
+ veor @x[4], @x[4], @t[3]
+ veor @x[5], @x[5], @t[7]
+ veor @x[3], @x[3], @t[6]
+ @ vmov @x[2], @t[0]
+ veor @x[6], @x[6], @t[2]
+ @ vmov @x[7], @t[1]
+___
+$code.=<<___ if ($inv);
+ veor @t[3], @t[3], @x[4]
+ veor @x[5], @x[5], @t[7]
+ veor @x[2], @x[3], @t[6]
+ veor @x[3], @t[0], @t[4]
+ veor @x[4], @x[6], @t[2]
+ vmov @x[6], @t[3]
+ @ vmov @x[7], @t[1]
+___
+}
+
+sub InvMixColumns_orig {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+ @ multiplication by 0x0e
+ vext.8 @t[7], @x[7], @x[7], #12
+ vmov @t[2], @x[2]
+ veor @x[2], @x[2], @x[5] @ 2 5
+ veor @x[7], @x[7], @x[5] @ 7 5
+ vext.8 @t[0], @x[0], @x[0], #12
+ vmov @t[5], @x[5]
+ veor @x[5], @x[5], @x[0] @ 5 0 [1]
+ veor @x[0], @x[0], @x[1] @ 0 1
+ vext.8 @t[1], @x[1], @x[1], #12
+ veor @x[1], @x[1], @x[2] @ 1 25
+ veor @x[0], @x[0], @x[6] @ 01 6 [2]
+ vext.8 @t[3], @x[3], @x[3], #12
+ veor @x[1], @x[1], @x[3] @ 125 3 [4]
+ veor @x[2], @x[2], @x[0] @ 25 016 [3]
+ veor @x[3], @x[3], @x[7] @ 3 75
+ veor @x[7], @x[7], @x[6] @ 75 6 [0]
+ vext.8 @t[6], @x[6], @x[6], #12
+ vmov @t[4], @x[4]
+ veor @x[6], @x[6], @x[4] @ 6 4
+ veor @x[4], @x[4], @x[3] @ 4 375 [6]
+ veor @x[3], @x[3], @x[7] @ 375 756=36
+ veor @x[6], @x[6], @t[5] @ 64 5 [7]
+ veor @x[3], @x[3], @t[2] @ 36 2
+ vext.8 @t[5], @t[5], @t[5], #12
+ veor @x[3], @x[3], @t[4] @ 362 4 [5]
+___
+ my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+ @ multiplication by 0x0b
+ veor @y[1], @y[1], @y[0]
+ veor @y[0], @y[0], @t[0]
+ vext.8 @t[2], @t[2], @t[2], #12
+ veor @y[1], @y[1], @t[1]
+ veor @y[0], @y[0], @t[5]
+ vext.8 @t[4], @t[4], @t[4], #12
+ veor @y[1], @y[1], @t[6]
+ veor @y[0], @y[0], @t[7]
+ veor @t[7], @t[7], @t[6] @ clobber t[7]
+
+ veor @y[3], @y[3], @t[0]
+ veor @y[1], @y[1], @y[0]
+ vext.8 @t[0], @t[0], @t[0], #12
+ veor @y[2], @y[2], @t[1]
+ veor @y[4], @y[4], @t[1]
+ vext.8 @t[1], @t[1], @t[1], #12
+ veor @y[2], @y[2], @t[2]
+ veor @y[3], @y[3], @t[2]
+ veor @y[5], @y[5], @t[2]
+ veor @y[2], @y[2], @t[7]
+ vext.8 @t[2], @t[2], @t[2], #12
+ veor @y[3], @y[3], @t[3]
+ veor @y[6], @y[6], @t[3]
+ veor @y[4], @y[4], @t[3]
+ veor @y[7], @y[7], @t[4]
+ vext.8 @t[3], @t[3], @t[3], #12
+ veor @y[5], @y[5], @t[4]
+ veor @y[7], @y[7], @t[7]
+ veor @t[7], @t[7], @t[5] @ clobber t[7] even more
+ veor @y[3], @y[3], @t[5]
+ veor @y[4], @y[4], @t[4]
+
+ veor @y[5], @y[5], @t[7]
+ vext.8 @t[4], @t[4], @t[4], #12
+ veor @y[6], @y[6], @t[7]
+ veor @y[4], @y[4], @t[7]
+
+ veor @t[7], @t[7], @t[5]
+ vext.8 @t[5], @t[5], @t[5], #12
+
+ @ multiplication by 0x0d
+ veor @y[4], @y[4], @y[7]
+ veor @t[7], @t[7], @t[6] @ restore t[7]
+ veor @y[7], @y[7], @t[4]
+ vext.8 @t[6], @t[6], @t[6], #12
+ veor @y[2], @y[2], @t[0]
+ veor @y[7], @y[7], @t[5]
+ vext.8 @t[7], @t[7], @t[7], #12
+ veor @y[2], @y[2], @t[2]
+
+ veor @y[3], @y[3], @y[1]
+ veor @y[1], @y[1], @t[1]
+ veor @y[0], @y[0], @t[0]
+ veor @y[3], @y[3], @t[0]
+ veor @y[1], @y[1], @t[5]
+ veor @y[0], @y[0], @t[5]
+ vext.8 @t[0], @t[0], @t[0], #12
+ veor @y[1], @y[1], @t[7]
+ veor @y[0], @y[0], @t[6]
+ veor @y[3], @y[3], @y[1]
+ veor @y[4], @y[4], @t[1]
+ vext.8 @t[1], @t[1], @t[1], #12
+
+ veor @y[7], @y[7], @t[7]
+ veor @y[4], @y[4], @t[2]
+ veor @y[5], @y[5], @t[2]
+ veor @y[2], @y[2], @t[6]
+ veor @t[6], @t[6], @t[3] @ clobber t[6]
+ vext.8 @t[2], @t[2], @t[2], #12
+ veor @y[4], @y[4], @y[7]
+ veor @y[3], @y[3], @t[6]
+
+ veor @y[6], @y[6], @t[6]
+ veor @y[5], @y[5], @t[5]
+ vext.8 @t[5], @t[5], @t[5], #12
+ veor @y[6], @y[6], @t[4]
+ vext.8 @t[4], @t[4], @t[4], #12
+ veor @y[5], @y[5], @t[6]
+ veor @y[6], @y[6], @t[7]
+ vext.8 @t[7], @t[7], @t[7], #12
+ veor @t[6], @t[6], @t[3] @ restore t[6]
+ vext.8 @t[3], @t[3], @t[3], #12
+
+ @ multiplication by 0x09
+ veor @y[4], @y[4], @y[1]
+ veor @t[1], @t[1], @y[1] @ t[1]=y[1]
+ veor @t[0], @t[0], @t[5] @ clobber t[0]
+ vext.8 @t[6], @t[6], @t[6], #12
+ veor @t[1], @t[1], @t[5]
+ veor @y[3], @y[3], @t[0]
+ veor @t[0], @t[0], @y[0] @ t[0]=y[0]
+ veor @t[1], @t[1], @t[6]
+ veor @t[6], @t[6], @t[7] @ clobber t[6]
+ veor @y[4], @y[4], @t[1]
+ veor @y[7], @y[7], @t[4]
+ veor @y[6], @y[6], @t[3]
+ veor @y[5], @y[5], @t[2]
+ veor @t[4], @t[4], @y[4] @ t[4]=y[4]
+ veor @t[3], @t[3], @y[3] @ t[3]=y[3]
+ veor @t[5], @t[5], @y[5] @ t[5]=y[5]
+ veor @t[2], @t[2], @y[2] @ t[2]=y[2]
+ veor @t[3], @t[3], @t[7]
+ veor @XMM[5], @t[5], @t[6]
+ veor @XMM[6], @t[6], @y[6] @ t[6]=y[6]
+ veor @XMM[2], @t[2], @t[6]
+ veor @XMM[7], @t[7], @y[7] @ t[7]=y[7]
+
+ vmov @XMM[0], @t[0]
+ vmov @XMM[1], @t[1]
+ @ vmov @XMM[2], @t[2]
+ vmov @XMM[3], @t[3]
+ vmov @XMM[4], @t[4]
+ @ vmov @XMM[5], @t[5]
+ @ vmov @XMM[6], @t[6]
+ @ vmov @XMM[7], @t[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+# Thanks to Jussi Kivilinna for providing pointer to
+#
+# | 0e 0b 0d 09 | | 02 03 01 01 | | 05 00 04 00 |
+# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
+# | 0d 09 0e 0b | | 01 01 02 03 | | 04 00 05 00 |
+# | 0b 0d 09 0e | | 03 01 01 02 | | 00 04 00 05 |
+
+$code.=<<___;
+ @ multiplication by 0x05-0x00-0x04-0x00
+ vext.8 @t[0], @x[0], @x[0], #8
+ vext.8 @t[6], @x[6], @x[6], #8
+ vext.8 @t[7], @x[7], @x[7], #8
+ veor @t[0], @t[0], @x[0]
+ vext.8 @t[1], @x[1], @x[1], #8
+ veor @t[6], @t[6], @x[6]
+ vext.8 @t[2], @x[2], @x[2], #8
+ veor @t[7], @t[7], @x[7]
+ vext.8 @t[3], @x[3], @x[3], #8
+ veor @t[1], @t[1], @x[1]
+ vext.8 @t[4], @x[4], @x[4], #8
+ veor @t[2], @t[2], @x[2]
+ vext.8 @t[5], @x[5], @x[5], #8
+ veor @t[3], @t[3], @x[3]
+ veor @t[4], @t[4], @x[4]
+ veor @t[5], @t[5], @x[5]
+
+ veor @x[0], @x[0], @t[6]
+ veor @x[1], @x[1], @t[6]
+ veor @x[2], @x[2], @t[0]
+ veor @x[4], @x[4], @t[2]
+ veor @x[3], @x[3], @t[1]
+ veor @x[1], @x[1], @t[7]
+ veor @x[2], @x[2], @t[7]
+ veor @x[4], @x[4], @t[6]
+ veor @x[5], @x[5], @t[3]
+ veor @x[3], @x[3], @t[6]
+ veor @x[6], @x[6], @t[4]
+ veor @x[4], @x[4], @t[7]
+ veor @x[5], @x[5], @t[7]
+ veor @x[7], @x[7], @t[5]
+___
+ &MixColumns (@x,@t,1); # flipped 2<->3 and 4<->6
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+ vshr.u64 $t, $b, #$n
+ veor $t, $t, $a
+ vand $t, $t, $mask
+ veor $a, $a, $t
+ vshl.u64 $t, $t, #$n
+ veor $b, $b, $t
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+ vshr.u64 $t0, $b0, #$n
+ vshr.u64 $t1, $b1, #$n
+ veor $t0, $t0, $a0
+ veor $t1, $t1, $a1
+ vand $t0, $t0, $mask
+ vand $t1, $t1, $mask
+ veor $a0, $a0, $t0
+ vshl.u64 $t0, $t0, #$n
+ veor $a1, $a1, $t1
+ vshl.u64 $t1, $t1, #$n
+ veor $b0, $b0, $t0
+ veor $b1, $b1, $t1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+ vmov.i8 $t0,#0x55 @ compose .LBS0
+ vmov.i8 $t1,#0x33 @ compose .LBS1
+___
+ &swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+ &swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+ vmov.i8 $t0,#0x0f @ compose .LBS2
+___
+ &swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+ &swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+ &swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
+# define VFP_ABI_POP vldmia sp!,{d8-d15}
+# define VFP_ABI_FRAME 0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME 0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax unified @ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code 32
+#endif
+
+.fpu neon
+
+.type _bsaes_decrypt8,%function
+.align 4
+_bsaes_decrypt8:
+ adr $const,_bsaes_decrypt8
+ vldmia $key!, {@XMM[9]} @ round 0 key
+ add $const,$const,#.LM0ISR-_bsaes_decrypt8
+
+ vldmia $const!, {@XMM[8]} @ .LM0ISR
+ veor @XMM[10], @XMM[0], @XMM[9] @ xor with round0 key
+ veor @XMM[11], @XMM[1], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+ veor @XMM[12], @XMM[2], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+ veor @XMM[13], @XMM[3], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+ veor @XMM[14], @XMM[4], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+ veor @XMM[15], @XMM[5], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+ veor @XMM[10], @XMM[6], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+ veor @XMM[11], @XMM[7], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+ vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+___
+ &bitslice (@XMM[0..7, 8..11]);
+$code.=<<___;
+ sub $rounds,$rounds,#1
+ b .Ldec_sbox
+.align 4
+.Ldec_loop:
+___
+ &ShiftRows (@XMM[0..7, 8..12]);
+$code.=".Ldec_sbox:\n";
+ &InvSbox (@XMM[0..7, 8..15]);
+$code.=<<___;
+ subs $rounds,$rounds,#1
+ bcc .Ldec_done
+___
+ &InvMixColumns (@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+ vldmia $const, {@XMM[12]} @ .LISR
+ ite eq @ Thumb2 thing, sanity check in ARM
+ addeq $const,$const,#0x10
+ bne .Ldec_loop
+ vldmia $const, {@XMM[12]} @ .LISRM0
+ b .Ldec_loop
+.align 4
+.Ldec_done:
+___
+ &bitslice (@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+ vldmia $key, {@XMM[8]} @ last round key
+ veor @XMM[6], @XMM[6], @XMM[8]
+ veor @XMM[4], @XMM[4], @XMM[8]
+ veor @XMM[2], @XMM[2], @XMM[8]
+ veor @XMM[7], @XMM[7], @XMM[8]
+ veor @XMM[3], @XMM[3], @XMM[8]
+ veor @XMM[5], @XMM[5], @XMM[8]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ veor @XMM[1], @XMM[1], @XMM[8]
+ bx lr
+.size _bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type _bsaes_const,%object
+.align 6
+_bsaes_const:
+.LM0ISR: @ InvShiftRows constants
+ .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+ .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+ .quad 0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR: @ ShiftRows constants
+ .quad 0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+ .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+ .quad 0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+ .quad 0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+ .quad 0x090d01050c000408, 0x03070b0f060a0e02
+.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 6
+.size _bsaes_const,.-_bsaes_const
+
+.type _bsaes_encrypt8,%function
+.align 4
+_bsaes_encrypt8:
+ adr $const,_bsaes_encrypt8
+ vldmia $key!, {@XMM[9]} @ round 0 key
+ sub $const,$const,#_bsaes_encrypt8-.LM0SR
+
+ vldmia $const!, {@XMM[8]} @ .LM0SR
+_bsaes_encrypt8_alt:
+ veor @XMM[10], @XMM[0], @XMM[9] @ xor with round0 key
+ veor @XMM[11], @XMM[1], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+ veor @XMM[12], @XMM[2], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+ veor @XMM[13], @XMM[3], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+ veor @XMM[14], @XMM[4], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+ veor @XMM[15], @XMM[5], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+ veor @XMM[10], @XMM[6], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+ veor @XMM[11], @XMM[7], @XMM[9]
+ vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+ vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+ vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+_bsaes_encrypt8_bitslice:
+___
+ &bitslice (@XMM[0..7, 8..11]);
+$code.=<<___;
+ sub $rounds,$rounds,#1
+ b .Lenc_sbox
+.align 4
+.Lenc_loop:
+___
+ &ShiftRows (@XMM[0..7, 8..12]);
+$code.=".Lenc_sbox:\n";
+ &Sbox (@XMM[0..7, 8..15]);
+$code.=<<___;
+ subs $rounds,$rounds,#1
+ bcc .Lenc_done
+___
+ &MixColumns (@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+ vldmia $const, {@XMM[12]} @ .LSR
+ ite eq @ Thumb2 thing, samity check in ARM
+ addeq $const,$const,#0x10
+ bne .Lenc_loop
+ vldmia $const, {@XMM[12]} @ .LSRM0
+ b .Lenc_loop
+.align 4
+.Lenc_done:
+___
+ # output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+ &bitslice (@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+ vldmia $key, {@XMM[8]} @ last round key
+ veor @XMM[4], @XMM[4], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[8]
+ veor @XMM[3], @XMM[3], @XMM[8]
+ veor @XMM[7], @XMM[7], @XMM[8]
+ veor @XMM[2], @XMM[2], @XMM[8]
+ veor @XMM[5], @XMM[5], @XMM[8]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ veor @XMM[1], @XMM[1], @XMM[8]
+ bx lr
+.size _bsaes_encrypt8,.-_bsaes_encrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+ &swapmove (@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+ @ &swapmove(@x[2,3],1,$t0,$t2,$t3);
+ vmov @x[2], @x[0]
+ vmov @x[3], @x[1]
+___
+ #&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+ &swapmove2x (@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+ @ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+ vmov @x[4], @x[0]
+ vmov @x[6], @x[2]
+ vmov @x[5], @x[1]
+ vmov @x[7], @x[3]
+___
+ &swapmove2x (@x[0,4,1,5],4,$bs2,$t2,$t3);
+ &swapmove2x (@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type _bsaes_key_convert,%function
+.align 4
+_bsaes_key_convert:
+ adr $const,_bsaes_key_convert
+ vld1.8 {@XMM[7]}, [$inp]! @ load round 0 key
+ sub $const,$const,#_bsaes_key_convert-.LM0
+ vld1.8 {@XMM[15]}, [$inp]! @ load round 1 key
+
+ vmov.i8 @XMM[8], #0x01 @ bit masks
+ vmov.i8 @XMM[9], #0x02
+ vmov.i8 @XMM[10], #0x04
+ vmov.i8 @XMM[11], #0x08
+ vmov.i8 @XMM[12], #0x10
+ vmov.i8 @XMM[13], #0x20
+ vldmia $const, {@XMM[14]} @ .LM0
+
+#ifdef __ARMEL__
+ vrev32.8 @XMM[7], @XMM[7]
+ vrev32.8 @XMM[15], @XMM[15]
+#endif
+ sub $rounds,$rounds,#1
+ vstmia $out!, {@XMM[7]} @ save round 0 key
+ b .Lkey_loop
+
+.align 4
+.Lkey_loop:
+ vtbl.8 `&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])`
+ vtbl.8 `&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])`
+ vmov.i8 @XMM[6], #0x40
+ vmov.i8 @XMM[15], #0x80
+
+ vtst.8 @XMM[0], @XMM[7], @XMM[8]
+ vtst.8 @XMM[1], @XMM[7], @XMM[9]
+ vtst.8 @XMM[2], @XMM[7], @XMM[10]
+ vtst.8 @XMM[3], @XMM[7], @XMM[11]
+ vtst.8 @XMM[4], @XMM[7], @XMM[12]
+ vtst.8 @XMM[5], @XMM[7], @XMM[13]
+ vtst.8 @XMM[6], @XMM[7], @XMM[6]
+ vtst.8 @XMM[7], @XMM[7], @XMM[15]
+ vld1.8 {@XMM[15]}, [$inp]! @ load next round key
+ vmvn @XMM[0], @XMM[0] @ "pnot"
+ vmvn @XMM[1], @XMM[1]
+ vmvn @XMM[5], @XMM[5]
+ vmvn @XMM[6], @XMM[6]
+#ifdef __ARMEL__
+ vrev32.8 @XMM[15], @XMM[15]
+#endif
+ subs $rounds,$rounds,#1
+ vstmia $out!,{@XMM[0]-@XMM[7]} @ write bit-sliced round key
+ bne .Lkey_loop
+
+ vmov.i8 @XMM[7],#0x63 @ compose .L63
+ @ don't save last round key
+ bx lr
+.size _bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0) { # following four functions are unsupported interface
+ # used for benchmarking...
+$code.=<<___;
+.globl bsaes_enc_key_convert
+.type bsaes_enc_key_convert,%function
+.align 4
+bsaes_enc_key_convert:
+ stmdb sp!,{r4-r6,lr}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+
+ ldr r5,[$inp,#240] @ pass rounds
+ mov r4,$inp @ pass key
+ mov r12,$out @ pass key schedule
+ bl _bsaes_key_convert
+ veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]} @ save last round key
+
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r6,pc}
+.size bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl bsaes_encrypt_128
+.type bsaes_encrypt_128,%function
+.align 4
+bsaes_encrypt_128:
+ stmdb sp!,{r4-r6,lr}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+.Lenc128_loop:
+ vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
+ vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
+ mov r4,$key @ pass the key
+ vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
+ mov r5,#10 @ pass rounds
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
+
+ bl _bsaes_encrypt8
+
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[3]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ subs $len,$len,#0x80
+ vst1.8 {@XMM[5]}, [$out]!
+ bhi .Lenc128_loop
+
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r6,pc}
+.size bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl bsaes_dec_key_convert
+.type bsaes_dec_key_convert,%function
+.align 4
+bsaes_dec_key_convert:
+ stmdb sp!,{r4-r6,lr}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+
+ ldr r5,[$inp,#240] @ pass rounds
+ mov r4,$inp @ pass key
+ mov r12,$out @ pass key schedule
+ bl _bsaes_key_convert
+ vldmia $out, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia $out, {@XMM[7]}
+
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r6,pc}
+.size bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl bsaes_decrypt_128
+.type bsaes_decrypt_128,%function
+.align 4
+bsaes_decrypt_128:
+ stmdb sp!,{r4-r6,lr}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+.Ldec128_loop:
+ vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
+ vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
+ mov r4,$key @ pass the key
+ vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
+ mov r5,#10 @ pass rounds
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
+
+ bl _bsaes_decrypt8
+
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ vst1.8 {@XMM[3]}, [$out]!
+ subs $len,$len,#0x80
+ vst1.8 {@XMM[5]}, [$out]!
+ bhi .Ldec128_loop
+
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r6,pc}
+.size bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10));
+my ($keysched)=("sp");
+
+$code.=<<___;
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global bsaes_cbc_encrypt
+.type bsaes_cbc_encrypt,%function
+.align 5
+bsaes_cbc_encrypt:
+#ifndef __KERNEL__
+ cmp $len, #128
+#ifndef __thumb__
+ blo AES_cbc_encrypt
+#else
+ bhs 1f
+ b AES_cbc_encrypt
+1:
+#endif
+#endif
+
+ @ it is up to the caller to make sure we are called with enc == 0
+
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr}
+ VFP_ABI_PUSH
+ ldr $ivp, [ip] @ IV is 1st arg on the stack
+ mov $len, $len, lsr#4 @ len in 16 byte blocks
+ sub sp, #0x10 @ scratch space to carry over the IV
+ mov $fp, sp @ save sp
+
+ ldr $rounds, [$key, #240] @ get # of rounds
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
+ add r12, #`128-32` @ sifze of bit-slices key schedule
+
+ @ populate the key schedule
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ mov sp, r12 @ sp is $keysched
+ bl _bsaes_key_convert
+ vldmia $keysched, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia $keysched, {@XMM[7]}
+#else
+ ldr r12, [$key, #244]
+ eors r12, #1
+ beq 0f
+
+ @ populate the key schedule
+ str r12, [$key, #244]
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ add r12, $key, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, $key, #248
+ vldmia r4, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia r4, {@XMM[7]}
+
+.align 2
+0:
+#endif
+
+ vld1.8 {@XMM[15]}, [$ivp] @ load IV
+ b .Lcbc_dec_loop
+
+.align 4
+.Lcbc_dec_loop:
+ subs $len, $len, #0x8
+ bmi .Lcbc_dec_loop_finish
+
+ vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
+ vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
+#ifndef BSAES_ASM_EXTENDED_KEY
+ mov r4, $keysched @ pass the key
+#else
+ add r4, $key, #248
+#endif
+ vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
+ mov r5, $rounds
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]
+ sub $inp, $inp, #0x60
+ vstmia $fp, {@XMM[15]} @ put aside IV
+
+ bl _bsaes_decrypt8
+
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ veor @XMM[2], @XMM[2], @XMM[11]
+ vld1.8 {@XMM[14]-@XMM[15]}, [$inp]!
+ veor @XMM[7], @XMM[7], @XMM[12]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ veor @XMM[3], @XMM[3], @XMM[13]
+ vst1.8 {@XMM[6]}, [$out]!
+ veor @XMM[5], @XMM[5], @XMM[14]
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ vst1.8 {@XMM[3]}, [$out]!
+ vst1.8 {@XMM[5]}, [$out]!
+
+ b .Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+ adds $len, $len, #8
+ beq .Lcbc_dec_done
+
+ vld1.8 {@XMM[0]}, [$inp]! @ load input
+ cmp $len, #2
+ blo .Lcbc_dec_one
+ vld1.8 {@XMM[1]}, [$inp]!
+#ifndef BSAES_ASM_EXTENDED_KEY
+ mov r4, $keysched @ pass the key
+#else
+ add r4, $key, #248
+#endif
+ mov r5, $rounds
+ vstmia $fp, {@XMM[15]} @ put aside IV
+ beq .Lcbc_dec_two
+ vld1.8 {@XMM[2]}, [$inp]!
+ cmp $len, #4
+ blo .Lcbc_dec_three
+ vld1.8 {@XMM[3]}, [$inp]!
+ beq .Lcbc_dec_four
+ vld1.8 {@XMM[4]}, [$inp]!
+ cmp $len, #6
+ blo .Lcbc_dec_five
+ vld1.8 {@XMM[5]}, [$inp]!
+ beq .Lcbc_dec_six
+ vld1.8 {@XMM[6]}, [$inp]!
+ sub $inp, $inp, #0x70
+
+ bl _bsaes_decrypt8
+
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ veor @XMM[2], @XMM[2], @XMM[11]
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[7], @XMM[7], @XMM[12]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ veor @XMM[3], @XMM[3], @XMM[13]
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ vst1.8 {@XMM[3]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_six:
+ sub $inp, $inp, #0x60
+ bl _bsaes_decrypt8
+ vldmia $fp,{@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[12]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ veor @XMM[2], @XMM[2], @XMM[11]
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[7], @XMM[7], @XMM[12]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ vst1.8 {@XMM[7]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_five:
+ sub $inp, $inp, #0x50
+ bl _bsaes_decrypt8
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ veor @XMM[2], @XMM[2], @XMM[11]
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ vst1.8 {@XMM[2]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_four:
+ sub $inp, $inp, #0x40
+ bl _bsaes_decrypt8
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[10]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[4], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[6]}, [$out]!
+ vst1.8 {@XMM[4]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_three:
+ sub $inp, $inp, #0x30
+ bl _bsaes_decrypt8
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[15]}, [$inp]!
+ veor @XMM[1], @XMM[1], @XMM[8]
+ veor @XMM[6], @XMM[6], @XMM[9]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ vst1.8 {@XMM[6]}, [$out]!
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_two:
+ sub $inp, $inp, #0x20
+ bl _bsaes_decrypt8
+ vldmia $fp, {@XMM[14]} @ reload IV
+ vld1.8 {@XMM[8]}, [$inp]! @ reload input
+ veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
+ vld1.8 {@XMM[15]}, [$inp]! @ reload input
+ veor @XMM[1], @XMM[1], @XMM[8]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ b .Lcbc_dec_done
+.align 4
+.Lcbc_dec_one:
+ sub $inp, $inp, #0x10
+ mov $rounds, $out @ save original out pointer
+ mov $out, $fp @ use the iv scratch space as out buffer
+ mov r2, $key
+ vmov @XMM[4],@XMM[15] @ just in case ensure that IV
+ vmov @XMM[5],@XMM[0] @ and input are preserved
+ bl AES_decrypt
+ vld1.8 {@XMM[0]}, [$fp,:64] @ load result
+ veor @XMM[0], @XMM[0], @XMM[4] @ ^= IV
+ vmov @XMM[15], @XMM[5] @ @XMM[5] holds input
+ vst1.8 {@XMM[0]}, [$rounds] @ write output
+
+.Lcbc_dec_done:
+#ifndef BSAES_ASM_EXTENDED_KEY
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+.Lcbc_dec_bzero: @ wipe key schedule [if any]
+ vstmia $keysched!, {q0-q1}
+ cmp $keysched, $fp
+ bne .Lcbc_dec_bzero
+#endif
+
+ mov sp, $fp
+ add sp, #0x10 @ add sp,$fp,#0x10 is no good for thumb
+ vst1.8 {@XMM[15]}, [$ivp] @ return IV
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc}
+.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+___
+}
+{
+my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10)));
+my $const = "r6"; # shared with _bsaes_encrypt8_alt
+my $keysched = "sp";
+
+$code.=<<___;
+.extern AES_encrypt
+.global bsaes_ctr32_encrypt_blocks
+.type bsaes_ctr32_encrypt_blocks,%function
+.align 5
+bsaes_ctr32_encrypt_blocks:
+ cmp $len, #8 @ use plain AES for
+ blo .Lctr_enc_short @ small sizes
+
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr}
+ VFP_ABI_PUSH
+ ldr $ctr, [ip] @ ctr is 1st arg on the stack
+ sub sp, sp, #0x10 @ scratch space to carry over the ctr
+ mov $fp, sp @ save sp
+
+ ldr $rounds, [$key, #240] @ get # of rounds
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
+ add r12, #`128-32` @ size of bit-sliced key schedule
+
+ @ populate the key schedule
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ mov sp, r12 @ sp is $keysched
+ bl _bsaes_key_convert
+ veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]} @ save last round key
+
+ vld1.8 {@XMM[0]}, [$ctr] @ load counter
+ add $ctr, $const, #.LREVM0SR-.LM0 @ borrow $ctr
+ vldmia $keysched, {@XMM[4]} @ load round0 key
+#else
+ ldr r12, [$key, #244]
+ eors r12, #1
+ beq 0f
+
+ @ populate the key schedule
+ str r12, [$key, #244]
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ add r12, $key, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]} @ save last round key
+
+.align 2
+0: add r12, $key, #248
+ vld1.8 {@XMM[0]}, [$ctr] @ load counter
+ adrl $ctr, .LREVM0SR @ borrow $ctr
+ vldmia r12, {@XMM[4]} @ load round0 key
+ sub sp, #0x10 @ place for adjusted round0 key
+#endif
+
+ vmov.i32 @XMM[8],#1 @ compose 1<<96
+ veor @XMM[9],@XMM[9],@XMM[9]
+ vrev32.8 @XMM[0],@XMM[0]
+ vext.8 @XMM[8],@XMM[9],@XMM[8],#4
+ vrev32.8 @XMM[4],@XMM[4]
+ vadd.u32 @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
+ vstmia $keysched, {@XMM[4]} @ save adjusted round0 key
+ b .Lctr_enc_loop
+
+.align 4
+.Lctr_enc_loop:
+ vadd.u32 @XMM[10], @XMM[8], @XMM[9] @ compose 3<<96
+ vadd.u32 @XMM[1], @XMM[0], @XMM[8] @ +1
+ vadd.u32 @XMM[2], @XMM[0], @XMM[9] @ +2
+ vadd.u32 @XMM[3], @XMM[0], @XMM[10] @ +3
+ vadd.u32 @XMM[4], @XMM[1], @XMM[10]
+ vadd.u32 @XMM[5], @XMM[2], @XMM[10]
+ vadd.u32 @XMM[6], @XMM[3], @XMM[10]
+ vadd.u32 @XMM[7], @XMM[4], @XMM[10]
+ vadd.u32 @XMM[10], @XMM[5], @XMM[10] @ next counter
+
+ @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+ @ to flip byte order in 32-bit counter
+
+ vldmia $keysched, {@XMM[9]} @ load round0 key
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, $keysched, #0x10 @ pass next round key
+#else
+ add r4, $key, #`248+16`
+#endif
+ vldmia $ctr, {@XMM[8]} @ .LREVM0SR
+ mov r5, $rounds @ pass rounds
+ vstmia $fp, {@XMM[10]} @ save next counter
+ sub $const, $ctr, #.LREVM0SR-.LSR @ pass constants
+
+ bl _bsaes_encrypt8_alt
+
+ subs $len, $len, #8
+ blo .Lctr_enc_loop_done
+
+ vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ load input
+ vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
+ veor @XMM[0], @XMM[8]
+ veor @XMM[1], @XMM[9]
+ vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
+ veor @XMM[4], @XMM[10]
+ veor @XMM[6], @XMM[11]
+ vld1.8 {@XMM[14]-@XMM[15]}, [$inp]!
+ veor @XMM[3], @XMM[12]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
+ veor @XMM[7], @XMM[13]
+ veor @XMM[2], @XMM[14]
+ vst1.8 {@XMM[4]}, [$out]!
+ veor @XMM[5], @XMM[15]
+ vst1.8 {@XMM[6]}, [$out]!
+ vmov.i32 @XMM[8], #1 @ compose 1<<96
+ vst1.8 {@XMM[3]}, [$out]!
+ veor @XMM[9], @XMM[9], @XMM[9]
+ vst1.8 {@XMM[7]}, [$out]!
+ vext.8 @XMM[8], @XMM[9], @XMM[8], #4
+ vst1.8 {@XMM[2]}, [$out]!
+ vadd.u32 @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
+ vst1.8 {@XMM[5]}, [$out]!
+ vldmia $fp, {@XMM[0]} @ load counter
+
+ bne .Lctr_enc_loop
+ b .Lctr_enc_done
+
+.align 4
+.Lctr_enc_loop_done:
+ add $len, $len, #8
+ vld1.8 {@XMM[8]}, [$inp]! @ load input
+ veor @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [$out]! @ write output
+ cmp $len, #2
+ blo .Lctr_enc_done
+ vld1.8 {@XMM[9]}, [$inp]!
+ veor @XMM[1], @XMM[9]
+ vst1.8 {@XMM[1]}, [$out]!
+ beq .Lctr_enc_done
+ vld1.8 {@XMM[10]}, [$inp]!
+ veor @XMM[4], @XMM[10]
+ vst1.8 {@XMM[4]}, [$out]!
+ cmp $len, #4
+ blo .Lctr_enc_done
+ vld1.8 {@XMM[11]}, [$inp]!
+ veor @XMM[6], @XMM[11]
+ vst1.8 {@XMM[6]}, [$out]!
+ beq .Lctr_enc_done
+ vld1.8 {@XMM[12]}, [$inp]!
+ veor @XMM[3], @XMM[12]
+ vst1.8 {@XMM[3]}, [$out]!
+ cmp $len, #6
+ blo .Lctr_enc_done
+ vld1.8 {@XMM[13]}, [$inp]!
+ veor @XMM[7], @XMM[13]
+ vst1.8 {@XMM[7]}, [$out]!
+ beq .Lctr_enc_done
+ vld1.8 {@XMM[14]}, [$inp]
+ veor @XMM[2], @XMM[14]
+ vst1.8 {@XMM[2]}, [$out]!
+
+.Lctr_enc_done:
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifndef BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero: @ wipe key schedule [if any]
+ vstmia $keysched!, {q0-q1}
+ cmp $keysched, $fp
+ bne .Lctr_enc_bzero
+#else
+ vstmia $keysched, {q0-q1}
+#endif
+
+ mov sp, $fp
+ add sp, #0x10 @ add sp,$fp,#0x10 is no good for thumb
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.align 4
+.Lctr_enc_short:
+ ldr ip, [sp] @ ctr pointer is passed on stack
+ stmdb sp!, {r4-r8, lr}
+
+ mov r4, $inp @ copy arguments
+ mov r5, $out
+ mov r6, $len
+ mov r7, $key
+ ldr r8, [ip, #12] @ load counter LSW
+ vld1.8 {@XMM[1]}, [ip] @ load whole counter value
+#ifdef __ARMEL__
+ rev r8, r8
+#endif
+ sub sp, sp, #0x10
+ vst1.8 {@XMM[1]}, [sp,:64] @ copy counter value
+ sub sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+ add r0, sp, #0x10 @ input counter value
+ mov r1, sp @ output on the stack
+ mov r2, r7 @ key
+
+ bl AES_encrypt
+
+ vld1.8 {@XMM[0]}, [r4]! @ load input
+ vld1.8 {@XMM[1]}, [sp,:64] @ load encrypted counter
+ add r8, r8, #1
+#ifdef __ARMEL__
+ rev r0, r8
+ str r0, [sp, #0x1c] @ next counter value
+#else
+ str r8, [sp, #0x1c] @ next counter value
+#endif
+ veor @XMM[0],@XMM[0],@XMM[1]
+ vst1.8 {@XMM[0]}, [r5]! @ store output
+ subs r6, r6, #1
+ bne .Lctr_enc_short_loop
+
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+ vstmia sp!, {q0-q1}
+
+ ldmia sp!, {r4-r8, pc}
+.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+}
+{
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+# const AES_KEY *key1, const AES_KEY *key2,
+# const unsigned char iv[16]);
+#
+my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3)));
+my $const="r6"; # returned by _bsaes_key_convert
+my $twmask=@XMM[5];
+my @T=@XMM[6..7];
+
+$code.=<<___;
+.globl bsaes_xts_encrypt
+.type bsaes_xts_encrypt,%function
+.align 4
+bsaes_xts_encrypt:
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr} @ 0x20
+ VFP_ABI_PUSH
+ mov r6, sp @ future $fp
+
+ mov $inp, r0
+ mov $out, r1
+ mov $len, r2
+ mov $key, r3
+
+ sub r0, sp, #0x10 @ 0x10
+ bic r0, #0xf @ align at 16 bytes
+ mov sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+ ldr r0, [ip] @ pointer to input tweak
+#else
+ @ generate initial tweak
+ ldr r0, [ip, #4] @ iv[]
+ mov r1, sp
+ ldr r2, [ip, #0] @ key2
+ bl AES_encrypt
+ mov r0,sp @ pointer to initial tweak
+#endif
+
+ ldr $rounds, [$key, #240] @ get # of rounds
+ mov $fp, r6
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
+ @ add r12, #`128-32` @ size of bit-sliced key schedule
+ sub r12, #`32+16` @ place for tweak[9]
+
+ @ populate the key schedule
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ mov sp, r12
+ add r12, #0x90 @ pass key schedule
+ bl _bsaes_key_convert
+ veor @XMM[7], @XMM[7], @XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]} @ save last round key
+#else
+ ldr r12, [$key, #244]
+ eors r12, #1
+ beq 0f
+
+ str r12, [$key, #244]
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ add r12, $key, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ veor @XMM[7], @XMM[7], @XMM[15] @ fix up last round key
+ vstmia r12, {@XMM[7]}
+
+.align 2
+0: sub sp, #0x90 @ place for tweak[9]
+#endif
+
+ vld1.8 {@XMM[8]}, [r0] @ initial tweak
+ adr $magic, .Lxts_magic
+
+ subs $len, #0x80
+ blo .Lxts_enc_short
+ b .Lxts_enc_loop
+
+.align 4
+.Lxts_enc_loop:
+ vldmia $magic, {$twmask} @ load XTS magic
+ vshr.s64 @T[0], @XMM[8], #63
+ mov r0, sp
+ vand @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+ vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+ vst1.64 {@XMM[$i-1]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ vshr.s64 @T[1], @XMM[$i], #63
+ veor @XMM[$i], @XMM[$i], @T[0]
+ vand @T[1], @T[1], $twmask
+___
+ @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+ vld1.8 {@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+ veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+ vadd.u64 @XMM[8], @XMM[15], @XMM[15]
+ vst1.64 {@XMM[15]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ veor @XMM[8], @XMM[8], @T[0]
+ vst1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
+ veor @XMM[5], @XMM[5], @XMM[13]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[6], @XMM[6], @XMM[14]
+ mov r5, $rounds @ pass rounds
+ veor @XMM[7], @XMM[7], @XMM[15]
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ vld1.64 {@XMM[14]-@XMM[15]}, [r0,:128]!
+ veor @XMM[10], @XMM[3], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ veor @XMM[12], @XMM[2], @XMM[14]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+ veor @XMM[13], @XMM[5], @XMM[15]
+ vst1.8 {@XMM[12]-@XMM[13]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+
+ subs $len, #0x80
+ bpl .Lxts_enc_loop
+
+.Lxts_enc_short:
+ adds $len, #0x70
+ bmi .Lxts_enc_done
+
+ vldmia $magic, {$twmask} @ load XTS magic
+ vshr.s64 @T[0], @XMM[8], #63
+ mov r0, sp
+ vand @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+ vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+ vst1.64 {@XMM[$i-1]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ vshr.s64 @T[1], @XMM[$i], #63
+ veor @XMM[$i], @XMM[$i], @T[0]
+ vand @T[1], @T[1], $twmask
+___
+ @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+ vld1.8 {@XMM[$i-10]}, [$inp]!
+ subs $len, #0x10
+ bmi .Lxts_enc_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+ veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+ sub $len, #0x10
+ vst1.64 {@XMM[15]}, [r0,:128] @ next round tweak
+
+ vld1.8 {@XMM[6]}, [$inp]!
+ veor @XMM[5], @XMM[5], @XMM[13]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[6], @XMM[6], @XMM[14]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ vld1.64 {@XMM[14]}, [r0,:128]!
+ veor @XMM[10], @XMM[3], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ veor @XMM[12], @XMM[2], @XMM[14]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+ vst1.8 {@XMM[12]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_6:
+ vst1.64 {@XMM[14]}, [r0,:128] @ next round tweak
+
+ veor @XMM[4], @XMM[4], @XMM[12]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[5], @XMM[5], @XMM[13]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ veor @XMM[10], @XMM[3], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align 5
+.Lxts_magic:
+ .quad 1, 0x87
+
+.align 5
+.Lxts_enc_5:
+ vst1.64 {@XMM[13]}, [r0,:128] @ next round tweak
+
+ veor @XMM[3], @XMM[3], @XMM[11]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[4], @XMM[4], @XMM[12]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ veor @XMM[10], @XMM[3], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ vst1.8 {@XMM[10]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_4:
+ vst1.64 {@XMM[12]}, [r0,:128] @ next round tweak
+
+ veor @XMM[2], @XMM[2], @XMM[10]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[3], @XMM[3], @XMM[11]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[6], @XMM[11]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_3:
+ vst1.64 {@XMM[11]}, [r0,:128] @ next round tweak
+
+ veor @XMM[1], @XMM[1], @XMM[9]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[2], @XMM[2], @XMM[10]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
+ vld1.64 {@XMM[10]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[4], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ vst1.8 {@XMM[8]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_2:
+ vst1.64 {@XMM[10]}, [r0,:128] @ next round tweak
+
+ veor @XMM[0], @XMM[0], @XMM[8]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[1], @XMM[1], @XMM[9]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_encrypt8
+
+ vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_enc_done
+.align 4
+.Lxts_enc_1:
+ mov r0, sp
+ veor @XMM[0], @XMM[8]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+ mov r4, $fp @ preserve fp
+
+ bl AES_encrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [$out]!
+ mov $fp, r4
+
+ vmov @XMM[8], @XMM[9] @ next round tweak
+
+.Lxts_enc_done:
+#ifndef XTS_CHAIN_TWEAK
+ adds $len, #0x10
+ beq .Lxts_enc_ret
+ sub r6, $out, #0x10
+
+.Lxts_enc_steal:
+ ldrb r0, [$inp], #1
+ ldrb r1, [$out, #-0x10]
+ strb r0, [$out, #-0x10]
+ strb r1, [$out], #1
+
+ subs $len, #1
+ bhi .Lxts_enc_steal
+
+ vld1.8 {@XMM[0]}, [r6]
+ mov r0, sp
+ veor @XMM[0], @XMM[0], @XMM[8]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+ mov r4, $fp @ preserve fp
+
+ bl AES_encrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [r6]
+ mov $fp, r4
+#endif
+
+.Lxts_enc_ret:
+ bic r0, $fp, #0xf
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifdef XTS_CHAIN_TWEAK
+ ldr r1, [$fp, #0x20+VFP_ABI_FRAME] @ chain tweak
+#endif
+.Lxts_enc_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r0
+ bne .Lxts_enc_bzero
+
+ mov sp, $fp
+#ifdef XTS_CHAIN_TWEAK
+ vst1.8 {@XMM[8]}, [r1]
+#endif
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type bsaes_xts_decrypt,%function
+.align 4
+bsaes_xts_decrypt:
+ mov ip, sp
+ stmdb sp!, {r4-r10, lr} @ 0x20
+ VFP_ABI_PUSH
+ mov r6, sp @ future $fp
+
+ mov $inp, r0
+ mov $out, r1
+ mov $len, r2
+ mov $key, r3
+
+ sub r0, sp, #0x10 @ 0x10
+ bic r0, #0xf @ align at 16 bytes
+ mov sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+ ldr r0, [ip] @ pointer to input tweak
+#else
+ @ generate initial tweak
+ ldr r0, [ip, #4] @ iv[]
+ mov r1, sp
+ ldr r2, [ip, #0] @ key2
+ bl AES_encrypt
+ mov r0, sp @ pointer to initial tweak
+#endif
+
+ ldr $rounds, [$key, #240] @ get # of rounds
+ mov $fp, r6
+#ifndef BSAES_ASM_EXTENDED_KEY
+ @ allocate the key schedule on the stack
+ sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
+ @ add r12, #`128-32` @ size of bit-sliced key schedule
+ sub r12, #`32+16` @ place for tweak[9]
+
+ @ populate the key schedule
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ mov sp, r12
+ add r12, #0x90 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, sp, #0x90
+ vldmia r4, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia r4, {@XMM[7]}
+#else
+ ldr r12, [$key, #244]
+ eors r12, #1
+ beq 0f
+
+ str r12, [$key, #244]
+ mov r4, $key @ pass key
+ mov r5, $rounds @ pass # of rounds
+ add r12, $key, #248 @ pass key schedule
+ bl _bsaes_key_convert
+ add r4, $key, #248
+ vldmia r4, {@XMM[6]}
+ vstmia r12, {@XMM[15]} @ save last round key
+ veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
+ vstmia r4, {@XMM[7]}
+
+.align 2
+0: sub sp, #0x90 @ place for tweak[9]
+#endif
+ vld1.8 {@XMM[8]}, [r0] @ initial tweak
+ adr $magic, .Lxts_magic
+
+ tst $len, #0xf @ if not multiple of 16
+ it ne @ Thumb2 thing, sanity check in ARM
+ subne $len, #0x10 @ subtract another 16 bytes
+ subs $len, #0x80
+
+ blo .Lxts_dec_short
+ b .Lxts_dec_loop
+
+.align 4
+.Lxts_dec_loop:
+ vldmia $magic, {$twmask} @ load XTS magic
+ vshr.s64 @T[0], @XMM[8], #63
+ mov r0, sp
+ vand @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+ vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+ vst1.64 {@XMM[$i-1]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ vshr.s64 @T[1], @XMM[$i], #63
+ veor @XMM[$i], @XMM[$i], @T[0]
+ vand @T[1], @T[1], $twmask
+___
+ @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+ vld1.8 {@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+ veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+ vadd.u64 @XMM[8], @XMM[15], @XMM[15]
+ vst1.64 {@XMM[15]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ veor @XMM[8], @XMM[8], @T[0]
+ vst1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+
+ vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
+ veor @XMM[5], @XMM[5], @XMM[13]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[6], @XMM[6], @XMM[14]
+ mov r5, $rounds @ pass rounds
+ veor @XMM[7], @XMM[7], @XMM[15]
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ vld1.64 {@XMM[14]-@XMM[15]}, [r0,:128]!
+ veor @XMM[10], @XMM[2], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ veor @XMM[12], @XMM[3], @XMM[14]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+ veor @XMM[13], @XMM[5], @XMM[15]
+ vst1.8 {@XMM[12]-@XMM[13]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+
+ subs $len, #0x80
+ bpl .Lxts_dec_loop
+
+.Lxts_dec_short:
+ adds $len, #0x70
+ bmi .Lxts_dec_done
+
+ vldmia $magic, {$twmask} @ load XTS magic
+ vshr.s64 @T[0], @XMM[8], #63
+ mov r0, sp
+ vand @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+ vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+ vst1.64 {@XMM[$i-1]}, [r0,:128]!
+ vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+ vshr.s64 @T[1], @XMM[$i], #63
+ veor @XMM[$i], @XMM[$i], @T[0]
+ vand @T[1], @T[1], $twmask
+___
+ @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+ vld1.8 {@XMM[$i-10]}, [$inp]!
+ subs $len, #0x10
+ bmi .Lxts_dec_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+ veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+ sub $len, #0x10
+ vst1.64 {@XMM[15]}, [r0,:128] @ next round tweak
+
+ vld1.8 {@XMM[6]}, [$inp]!
+ veor @XMM[5], @XMM[5], @XMM[13]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[6], @XMM[6], @XMM[14]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ vld1.64 {@XMM[14]}, [r0,:128]!
+ veor @XMM[10], @XMM[2], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ veor @XMM[12], @XMM[3], @XMM[14]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+ vst1.8 {@XMM[12]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_6:
+ vst1.64 {@XMM[14]}, [r0,:128] @ next round tweak
+
+ veor @XMM[4], @XMM[4], @XMM[12]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[5], @XMM[5], @XMM[13]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ veor @XMM[10], @XMM[2], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ veor @XMM[11], @XMM[7], @XMM[13]
+ vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_5:
+ vst1.64 {@XMM[13]}, [r0,:128] @ next round tweak
+
+ veor @XMM[3], @XMM[3], @XMM[11]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[4], @XMM[4], @XMM[12]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ vld1.64 {@XMM[12]}, [r0,:128]!
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ veor @XMM[10], @XMM[2], @XMM[12]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+ vst1.8 {@XMM[10]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_4:
+ vst1.64 {@XMM[12]}, [r0,:128] @ next round tweak
+
+ veor @XMM[2], @XMM[2], @XMM[10]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[3], @XMM[3], @XMM[11]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+ vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ veor @XMM[9], @XMM[4], @XMM[11]
+ vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_3:
+ vst1.64 {@XMM[11]}, [r0,:128] @ next round tweak
+
+ veor @XMM[1], @XMM[1], @XMM[9]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[2], @XMM[2], @XMM[10]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
+ vld1.64 {@XMM[10]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ veor @XMM[8], @XMM[6], @XMM[10]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+ vst1.8 {@XMM[8]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_2:
+ vst1.64 {@XMM[10]}, [r0,:128] @ next round tweak
+
+ veor @XMM[0], @XMM[0], @XMM[8]
+#ifndef BSAES_ASM_EXTENDED_KEY
+ add r4, sp, #0x90 @ pass key schedule
+#else
+ add r4, $key, #248 @ pass key schedule
+#endif
+ veor @XMM[1], @XMM[1], @XMM[9]
+ mov r5, $rounds @ pass rounds
+ mov r0, sp
+
+ bl _bsaes_decrypt8
+
+ vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
+ veor @XMM[0], @XMM[0], @XMM[ 8]
+ veor @XMM[1], @XMM[1], @XMM[ 9]
+ vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
+
+ vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
+ b .Lxts_dec_done
+.align 4
+.Lxts_dec_1:
+ mov r0, sp
+ veor @XMM[0], @XMM[8]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+ mov r4, $fp @ preserve fp
+ mov r5, $magic @ preserve magic
+
+ bl AES_decrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [$out]!
+ mov $fp, r4
+ mov $magic, r5
+
+ vmov @XMM[8], @XMM[9] @ next round tweak
+
+.Lxts_dec_done:
+#ifndef XTS_CHAIN_TWEAK
+ adds $len, #0x10
+ beq .Lxts_dec_ret
+
+ @ calculate one round of extra tweak for the stolen ciphertext
+ vldmia $magic, {$twmask}
+ vshr.s64 @XMM[6], @XMM[8], #63
+ vand @XMM[6], @XMM[6], $twmask
+ vadd.u64 @XMM[9], @XMM[8], @XMM[8]
+ vswp `&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")`
+ veor @XMM[9], @XMM[9], @XMM[6]
+
+ @ perform the final decryption with the last tweak value
+ vld1.8 {@XMM[0]}, [$inp]!
+ mov r0, sp
+ veor @XMM[0], @XMM[0], @XMM[9]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+ mov r4, $fp @ preserve fp
+
+ bl AES_decrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[9]
+ vst1.8 {@XMM[0]}, [$out]
+
+ mov r6, $out
+.Lxts_dec_steal:
+ ldrb r1, [$out]
+ ldrb r0, [$inp], #1
+ strb r1, [$out, #0x10]
+ strb r0, [$out], #1
+
+ subs $len, #1
+ bhi .Lxts_dec_steal
+
+ vld1.8 {@XMM[0]}, [r6]
+ mov r0, sp
+ veor @XMM[0], @XMM[8]
+ mov r1, sp
+ vst1.8 {@XMM[0]}, [sp,:128]
+ mov r2, $key
+
+ bl AES_decrypt
+
+ vld1.8 {@XMM[0]}, [sp,:128]
+ veor @XMM[0], @XMM[0], @XMM[8]
+ vst1.8 {@XMM[0]}, [r6]
+ mov $fp, r4
+#endif
+
+.Lxts_dec_ret:
+ bic r0, $fp, #0xf
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+#ifdef XTS_CHAIN_TWEAK
+ ldr r1, [$fp, #0x20+VFP_ABI_FRAME] @ chain tweak
+#endif
+.Lxts_dec_bzero: @ wipe key schedule [if any]
+ vstmia sp!, {q0-q1}
+ cmp sp, r0
+ bne .Lxts_dec_bzero
+
+ mov sp, $fp
+#ifdef XTS_CHAIN_TWEAK
+ vst1.8 {@XMM[8]}, [r1]
+#endif
+ VFP_ABI_POP
+ ldmia sp!, {r4-r10, pc} @ return
+
+.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+open SELF,$0;
+while(<SELF>) {
+ next if (/^#!/);
+ last if (!s/^#/@/ and !/^$/);
+ print;
+}
+close SELF;
+
+print $code;
+
+close STDOUT;
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index d3db39860b9c..6577b8aeb711 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -24,6 +24,7 @@ generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h
generic-y += siginfo.h
+generic-y += simd.h
generic-y += sizes.h
generic-y += socket.h
generic-y += sockios.h
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7e1f76027f66..72abdc541f38 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -19,6 +19,13 @@
#include <asm/unified.h>
#include <asm/compiler.h>
+#if __LINUX_ARM_ARCH__ < 6
+#include <asm-generic/uaccess-unaligned.h>
+#else
+#define __get_user_unaligned __get_user
+#define __put_user_unaligned __put_user
+#endif
+
#define VERIFY_READ 0
#define VERIFY_WRITE 1
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 74ad15d1a065..bc6bd9683ba4 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -442,10 +442,10 @@ local_restart:
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
add r1, sp, #S_OFF
- cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
+2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
bcs arm_syscall
-2: mov why, #0 @ no longer a real syscall
+ mov why, #0 @ no longer a real syscall
b sys_ni_syscall @ not private func
#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index de23a9beed13..39f89fbd5111 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -329,10 +329,10 @@
#ifdef CONFIG_CONTEXT_TRACKING
.if \save
stmdb sp!, {r0-r3, ip, lr}
- bl user_exit
+ bl context_tracking_user_exit
ldmia sp!, {r0-r3, ip, lr}
.else
- bl user_exit
+ bl context_tracking_user_exit
.endif
#endif
.endm
@@ -341,10 +341,10 @@
#ifdef CONFIG_CONTEXT_TRACKING
.if \save
stmdb sp!, {r0-r3, ip, lr}
- bl user_enter
+ bl context_tracking_user_enter
ldmia sp!, {r0-r3, ip, lr}
.else
- bl user_enter
+ bl context_tracking_user_enter
.endif
#endif
.endm
diff --git a/arch/arm/mach-imx/clk-fixup-mux.c b/arch/arm/mach-imx/clk-fixup-mux.c
index deb4b8093b30..0d40b35c557c 100644
--- a/arch/arm/mach-imx/clk-fixup-mux.c
+++ b/arch/arm/mach-imx/clk-fixup-mux.c
@@ -90,6 +90,7 @@ struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
init.ops = &clk_fixup_mux_ops;
init.parent_names = parents;
init.num_parents = num_parents;
+ init.flags = 0;
fixup_mux->mux.reg = reg;
fixup_mux->mux.shift = shift;
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
index c3cfa4116dc0..c6b40f386786 100644
--- a/arch/arm/mach-imx/clk-imx27.c
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -285,7 +285,7 @@ int __init mx27_clocks_init(unsigned long fref)
clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL);
clk_register_clkdev(clk[rtc_ipg_gate], NULL, "imx21-rtc");
clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL);
- clk_register_clkdev(clk[cpu_div], NULL, "cpufreq-cpu0.0");
+ clk_register_clkdev(clk[cpu_div], NULL, "cpu0");
clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL);
mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index 1a56a3319997..7c0dc4540aa4 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -328,7 +328,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
- clk_register_clkdev(clk[cpu_podf], NULL, "cpufreq-cpu0.0");
+ clk_register_clkdev(clk[cpu_podf], NULL, "cpu0");
clk_register_clkdev(clk[iim_gate], "iim", NULL);
clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
@@ -397,7 +397,7 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
mx51_spdif_xtal_sel, ARRAY_SIZE(mx51_spdif_xtal_sel));
clk[spdif1_sel] = imx_clk_mux("spdif1_sel", MXC_CCM_CSCMR2, 2, 2,
spdif_sel, ARRAY_SIZE(spdif_sel));
- clk[spdif1_pred] = imx_clk_divider("spdif1_podf", "spdif1_sel", MXC_CCM_CDCDR, 16, 3);
+ clk[spdif1_pred] = imx_clk_divider("spdif1_pred", "spdif1_sel", MXC_CCM_CDCDR, 16, 3);
clk[spdif1_podf] = imx_clk_divider("spdif1_podf", "spdif1_pred", MXC_CCM_CDCDR, 9, 6);
clk[spdif1_com_sel] = imx_clk_mux("spdif1_com_sel", MXC_CCM_CSCMR2, 5, 1,
mx51_spdif1_com_sel, ARRAY_SIZE(mx51_spdif1_com_sel));
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 85a1b51346c8..90372a21087f 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -233,10 +233,15 @@ put_node:
of_node_put(np);
}
-static void __init imx6q_opp_init(struct device *cpu_dev)
+static void __init imx6q_opp_init(void)
{
struct device_node *np;
+ struct device *cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_warn("failed to get cpu0 device\n");
+ return;
+ }
np = of_node_get(cpu_dev->of_node);
if (!np) {
pr_warn("failed to find cpu0 node\n");
@@ -268,7 +273,7 @@ static void __init imx6q_init_late(void)
imx6q_cpuidle_init();
if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
- imx6q_opp_init(&imx6q_cpufreq_pdev.dev);
+ imx6q_opp_init();
platform_device_register(&imx6q_cpufreq_pdev);
}
}
diff --git a/arch/arm/mach-imx/system.c b/arch/arm/mach-imx/system.c
index 64ff37ea72b1..80c177c36c5f 100644
--- a/arch/arm/mach-imx/system.c
+++ b/arch/arm/mach-imx/system.c
@@ -117,6 +117,17 @@ void __init imx_init_l2cache(void)
/* Configure the L2 PREFETCH and POWER registers */
val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
val |= 0x70800000;
+ /*
+ * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0
+ * The L2 cache controller(PL310) version on the i.MX6DL/SOLO/SL is r3p2
+ * But according to ARM PL310 errata: 752271
+ * ID: 752271: Double linefill feature can cause data corruption
+ * Fault Status: Present in: r3p0, r3p1, r3p1-50rel0. Fixed in r3p2
+ * Workaround: The only workaround to this erratum is to disable the
+ * double linefill feature. This is the default behavior.
+ */
+ if (cpu_is_imx6q())
+ val &= ~(1 << 30 | 1 << 23);
writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL);
val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN;
writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL);
diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c
index 1d5b5290d2af..b237950eb8a3 100644
--- a/arch/arm/mach-omap2/cclock44xx_data.c
+++ b/arch/arm/mach-omap2/cclock44xx_data.c
@@ -1632,7 +1632,7 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "auxclk5_src_ck", &auxclk5_src_ck),
CLK(NULL, "auxclk5_ck", &auxclk5_ck),
CLK(NULL, "auxclkreq5_ck", &auxclkreq5_ck),
- CLK("omap-gpmc", "fck", &dummy_ck),
+ CLK("50000000.gpmc", "fck", &dummy_ck),
CLK("omap_i2c.1", "ick", &dummy_ck),
CLK("omap_i2c.2", "ick", &dummy_ck),
CLK("omap_i2c.3", "ick", &dummy_ck),
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index c443f2e97e10..4c8982ae9529 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -143,7 +143,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
* Call idle CPU cluster PM exit notifier chain
* to restore GIC and wakeupgen context.
*/
- if ((cx->mpu_state == PWRDM_POWER_RET) &&
+ if (dev->cpu == 0 && (cx->mpu_state == PWRDM_POWER_RET) &&
(cx->mpu_logic_state == PWRDM_POWER_OFF))
cpu_cluster_pm_exit();
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 9f4795aff48a..579697adaae7 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1491,8 +1491,8 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
*/
ret = gpmc_cs_remap(cs, res.start);
if (ret < 0) {
- dev_err(&pdev->dev, "cannot remap GPMC CS %d to 0x%x\n",
- cs, res.start);
+ dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n",
+ cs, &res.start);
goto err;
}
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index c53609f46294..be271f1d585b 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -620,7 +620,7 @@ static struct omap_mux __initdata omap3_muxmodes[] = {
"uart1_rts", "ssi1_flag_tx", NULL, NULL,
"gpio_149", NULL, NULL, "safe_mode"),
_OMAP3_MUXENTRY(UART1_RX, 151,
- "uart1_rx", "ss1_wake_tx", "mcbsp1_clkr", "mcspi4_clk",
+ "uart1_rx", "ssi1_wake_tx", "mcbsp1_clkr", "mcspi4_clk",
"gpio_151", NULL, NULL, "safe_mode"),
_OMAP3_MUXENTRY(UART1_TX, 148,
"uart1_tx", "ssi1_dat_tx", NULL, NULL,
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 8708b2a9da45..891211093295 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -1,5 +1,5 @@
/*
- * OMAP4 SMP source file. It contains platform specific fucntions
+ * OMAP4 SMP source file. It contains platform specific functions
* needed for the linux smp kernel.
*
* Copyright (C) 2009 Texas Instruments, Inc.
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index f99f68e1e85b..b69dd9abb50a 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -158,7 +158,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
}
od = omap_device_alloc(pdev, hwmods, oh_cnt);
- if (!od) {
+ if (IS_ERR(od)) {
dev_err(&pdev->dev, "Cannot allocate omap_device for :%s\n",
oh_name);
ret = PTR_ERR(od);
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index 612a45689770..7fb96ebdc0fb 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -289,7 +289,7 @@ static void collie_flash_exit(void)
}
static struct flash_platform_data collie_flash_data = {
- .map_name = "cfi_probe",
+ .map_name = "jedec_probe",
.init = collie_flash_init,
.set_vpp = collie_set_vpp,
.exit = collie_flash_exit,
diff --git a/arch/arm/mach-shmobile/clock-r8a73a4.c b/arch/arm/mach-shmobile/clock-r8a73a4.c
index 8ea5ef6c79cc..5bd2e851e3c7 100644
--- a/arch/arm/mach-shmobile/clock-r8a73a4.c
+++ b/arch/arm/mach-shmobile/clock-r8a73a4.c
@@ -555,7 +555,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("pll2h", &pll2h_clk),
/* CPU clock */
- CLKDEV_DEV_ID("cpufreq-cpu0", &z_clk),
+ CLKDEV_DEV_ID("cpu0", &z_clk),
/* DIV6 */
CLKDEV_CON_ID("zb", &div6_clks[DIV6_ZB]),
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 1942eaef5181..c92c023f0d27 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -616,7 +616,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("smp_twd", &twd_clk), /* smp_twd */
/* DIV4 clocks */
- CLKDEV_DEV_ID("cpufreq-cpu0", &div4_clks[DIV4_Z]),
+ CLKDEV_DEV_ID("cpu0", &div4_clks[DIV4_Z]),
/* DIV6 clocks */
CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index a85adcd00882..a1659863bfd5 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -1,7 +1,3 @@
-menu "ST-Ericsson AB U300/U335 Platform"
-
-comment "ST-Ericsson Mobile Platform Products"
-
config ARCH_U300
bool "ST-Ericsson U300 Series" if ARCH_MULTI_V5
depends on MMU
@@ -25,7 +21,9 @@ config ARCH_U300
help
Support for ST-Ericsson U300 series mobile platforms.
-comment "ST-Ericsson U300/U335 Feature Selections"
+if ARCH_U300
+
+menu "ST-Ericsson AB U300/U335 Platform"
config MACH_U300
depends on ARCH_U300
@@ -53,3 +51,5 @@ config MACH_U300_SPIDUMMY
SPI framework and ARM PL022 support.
endmenu
+
+endif
diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c
index 82ccf1d98735..264f894c0e3d 100644
--- a/arch/arm/mach-ux500/cache-l2x0.c
+++ b/arch/arm/mach-ux500/cache-l2x0.c
@@ -69,6 +69,7 @@ static int __init ux500_l2x0_init(void)
* some SMI service available.
*/
outer_cache.disable = NULL;
+ outer_cache.set_debug = NULL;
return 0;
}
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 6d4482fa35bc..e2950b098e76 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -43,6 +43,6 @@
COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
-extern unsigned int elf_hwcap;
+extern unsigned long elf_hwcap;
#endif
#endif
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 57fb55c44c90..7ae8a1f00c3c 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -143,15 +143,26 @@ void machine_restart(char *cmd)
void __show_regs(struct pt_regs *regs)
{
- int i;
+ int i, top_reg;
+ u64 lr, sp;
+
+ if (compat_user_mode(regs)) {
+ lr = regs->compat_lr;
+ sp = regs->compat_sp;
+ top_reg = 12;
+ } else {
+ lr = regs->regs[30];
+ sp = regs->sp;
+ top_reg = 29;
+ }
show_regs_print_info(KERN_DEFAULT);
print_symbol("PC is at %s\n", instruction_pointer(regs));
- print_symbol("LR is at %s\n", regs->regs[30]);
+ print_symbol("LR is at %s\n", lr);
printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
- regs->pc, regs->regs[30], regs->pstate);
- printk("sp : %016llx\n", regs->sp);
- for (i = 29; i >= 0; i--) {
+ regs->pc, lr, regs->pstate);
+ printk("sp : %016llx\n", sp);
+ for (i = top_reg; i >= 0; i--) {
printk("x%-2d: %016llx ", i, regs->regs[i]);
if (i % 2 == 0)
printk("\n");
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 12ad8f3d0cfd..055cfb80e05c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -57,7 +57,7 @@
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
-unsigned int elf_hwcap __read_mostly;
+unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
static const char *cpu_name;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 6d6acf153bff..c23751b06120 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -130,7 +130,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
force_sig_info(sig, &si, tsk);
}
-void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 75a36ad11ff5..ca8f8340d75f 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -288,9 +288,6 @@ endif
vmlinux.32: vmlinux
$(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
-
-#obj-$(CONFIG_KPROBES) += kprobes.o
-
#
# The 64-bit ELF tools are pretty broken so at this time we generate 64-bit
# ELF files from 32-bit files by conversion.
diff --git a/arch/mips/alchemy/common/usb.c b/arch/mips/alchemy/common/usb.c
index fcc695626117..2adc7edda49c 100644
--- a/arch/mips/alchemy/common/usb.c
+++ b/arch/mips/alchemy/common/usb.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
+#include <asm/cpu.h>
#include <asm/mach-au1x00/au1000.h>
/* control register offsets */
@@ -358,7 +359,7 @@ static inline int au1200_coherency_bug(void)
{
#if defined(CONFIG_DMA_COHERENT)
/* Au1200 AB USB does not support coherent memory */
- if (!(read_c0_prid() & 0xff)) {
+ if (!(read_c0_prid() & PRID_REV_MASK)) {
printk(KERN_INFO "Au1200 USB: this is chip revision AB !!\n");
printk(KERN_INFO "Au1200 USB: update your board or re-configure"
" the kernel\n");
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index 7e17374a9ae8..b713cd64b087 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -306,14 +306,14 @@ void __init bcm63xx_cpu_init(void)
switch (c->cputype) {
case CPU_BMIPS3300:
- if ((read_c0_prid() & 0xff00) != PRID_IMP_BMIPS3300_ALT)
+ if ((read_c0_prid() & PRID_IMP_MASK) != PRID_IMP_BMIPS3300_ALT)
__cpu_name[cpu] = "Broadcom BCM6338";
/* fall-through */
case CPU_BMIPS32:
chipid_reg = BCM_6345_PERF_BASE;
break;
case CPU_BMIPS4350:
- switch ((read_c0_prid() & 0xff)) {
+ switch ((read_c0_prid() & PRID_REV_MASK)) {
case 0x04:
chipid_reg = BCM_3368_PERF_BASE;
break;
diff --git a/arch/mips/boot/dts/include/dt-bindings b/arch/mips/boot/dts/include/dt-bindings
index 68ae3887b3e5..08c00e4972fa 120000
--- a/arch/mips/boot/dts/include/dt-bindings
+++ b/arch/mips/boot/dts/include/dt-bindings
@@ -1 +1 @@
-../../../../../include/dt-bindings
+../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
index 02193953eb9e..b752c4ed0b79 100644
--- a/arch/mips/cavium-octeon/csrc-octeon.c
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -12,6 +12,7 @@
#include <linux/smp.h>
#include <asm/cpu-info.h>
+#include <asm/cpu-type.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c
index ab169046e442..468f665de7bb 100644
--- a/arch/mips/dec/prom/init.c
+++ b/arch/mips/dec/prom/init.c
@@ -13,6 +13,7 @@
#include <asm/bootinfo.h>
#include <asm/cpu.h>
+#include <asm/cpu-type.h>
#include <asm/processor.h>
#include <asm/dec/prom.h>
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index fa44f3ec5302..d445d060e346 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -13,12 +13,6 @@
#include <asm/cpu-info.h>
#include <cpu-feature-overrides.h>
-#ifndef current_cpu_type
-#define current_cpu_type() current_cpu_data.cputype
-#endif
-
-#define boot_cpu_type() cpu_data[0].cputype
-
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
* This is true for all known MIPS systems.
@@ -193,7 +187,7 @@
/*
* MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
- * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
+ * pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
* has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
* cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
*/
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index 41401d8eb7d1..21c8e29c8f91 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -84,6 +84,7 @@ struct cpuinfo_mips {
extern struct cpuinfo_mips cpu_data[];
#define current_cpu_data cpu_data[smp_processor_id()]
#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
+#define boot_cpu_data cpu_data[0]
extern void cpu_probe(void);
extern void cpu_report(void);
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
new file mode 100644
index 000000000000..4a402cc60c03
--- /dev/null
+++ b/arch/mips/include/asm/cpu-type.h
@@ -0,0 +1,203 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#ifndef __ASM_CPU_TYPE_H
+#define __ASM_CPU_TYPE_H
+
+#include <linux/smp.h>
+#include <linux/compiler.h>
+
+static inline int __pure __get_cpu_type(const int cpu_type)
+{
+ switch (cpu_type) {
+#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \
+ defined(CONFIG_SYS_HAS_CPU_LOONGSON2F)
+ case CPU_LOONGSON2:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B
+ case CPU_LOONGSON1:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R1
+ case CPU_4KC:
+ case CPU_ALCHEMY:
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4350:
+ case CPU_PR4450:
+ case CPU_BMIPS32:
+ case CPU_JZRISC:
+#endif
+
+#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R1) || \
+ defined(CONFIG_SYS_HAS_CPU_MIPS32_R2)
+ case CPU_4KEC:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R2
+ case CPU_4KSC:
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_1004K:
+ case CPU_74K:
+ case CPU_M14KC:
+ case CPU_M14KEC:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
+ case CPU_5KC:
+ case CPU_5KE:
+ case CPU_20KC:
+ case CPU_25KF:
+ case CPU_SB1:
+ case CPU_SB1A:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R2
+ /*
+ * All MIPS64 R2 processors have their own special symbols. That is,
+ * there currently is no pure R2 core
+ */
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R3000
+ case CPU_R2000:
+ case CPU_R3000:
+ case CPU_R3000A:
+ case CPU_R3041:
+ case CPU_R3051:
+ case CPU_R3052:
+ case CPU_R3081:
+ case CPU_R3081E:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_TX39XX
+ case CPU_TX3912:
+ case CPU_TX3922:
+ case CPU_TX3927:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_VR41XX
+ case CPU_VR41XX:
+ case CPU_VR4111:
+ case CPU_VR4121:
+ case CPU_VR4122:
+ case CPU_VR4131:
+ case CPU_VR4133:
+ case CPU_VR4181:
+ case CPU_VR4181A:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R4300
+ case CPU_R4300:
+ case CPU_R4310:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R4X00
+ case CPU_R4000PC:
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ case CPU_R4200:
+ case CPU_R4400PC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ case CPU_R4600:
+ case CPU_R4700:
+ case CPU_R4640:
+ case CPU_R4650:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_TX49XX
+ case CPU_TX49XX:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R5000
+ case CPU_R5000:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R5432
+ case CPU_R5432:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R5500
+ case CPU_R5500:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R6000
+ case CPU_R6000:
+ case CPU_R6000A:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_NEVADA
+ case CPU_NEVADA:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R8000
+ case CPU_R8000:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_R10000
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
+#endif
+#ifdef CONFIG_SYS_HAS_CPU_RM7000
+ case CPU_RM7000:
+ case CPU_SR71000:
+#endif
+#ifdef CONFIG_SYS_HAS_CPU_RM9000
+ case CPU_RM9000:
+#endif
+#ifdef CONFIG_SYS_HAS_CPU_SB1
+ case CPU_SB1:
+ case CPU_SB1A:
+#endif
+#ifdef CONFIG_SYS_HAS_CPU_CAVIUM_OCTEON
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_BMIPS4380
+ case CPU_BMIPS4380:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_BMIPS5000
+ case CPU_BMIPS5000:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_XLP
+ case CPU_XLP:
+#endif
+
+#ifdef CONFIG_SYS_HAS_CPU_XLR
+ case CPU_XLR:
+#endif
+ break;
+ default:
+ unreachable();
+ }
+
+ return cpu_type;
+}
+
+static inline int __pure current_cpu_type(void)
+{
+ const int cpu_type = current_cpu_data.cputype;
+
+ return __get_cpu_type(cpu_type);
+}
+
+static inline int __pure boot_cpu_type(void)
+{
+ const int cpu_type = cpu_data[0].cputype;
+
+ return __get_cpu_type(cpu_type);
+}
+
+#endif /* __ASM_CPU_TYPE_H */
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 71b9f1998be7..d2035e16502a 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -3,15 +3,14 @@
* various MIPS cpu types.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- * Copyright (C) 2004 Maciej W. Rozycki
+ * Copyright (C) 2004, 2013 Maciej W. Rozycki
*/
#ifndef _ASM_CPU_H
#define _ASM_CPU_H
-/* Assigned Company values for bits 23:16 of the PRId Register
- (CP0 register 15, select 0). As of the MIPS32 and MIPS64 specs from
- MTI, the PRId register is defined in this (backwards compatible)
- way:
+/*
+ As of the MIPS32 and MIPS64 specs from MTI, the PRId register (CP0
+ register 15, select 0) is defined in this (backwards compatible) way:
+----------------+----------------+----------------+----------------+
| Company Options| Company ID | Processor ID | Revision |
@@ -23,6 +22,14 @@
spec.
*/
+#define PRID_OPT_MASK 0xff000000
+
+/*
+ * Assigned Company values for bits 23:16 of the PRId register.
+ */
+
+#define PRID_COMP_MASK 0xff0000
+
#define PRID_COMP_LEGACY 0x000000
#define PRID_COMP_MIPS 0x010000
#define PRID_COMP_BROADCOM 0x020000
@@ -38,10 +45,17 @@
#define PRID_COMP_INGENIC 0xd00000
/*
- * Assigned values for the product ID register. In order to detect a
- * certain CPU type exactly eventually additional registers may need to
- * be examined. These are valid when 23:16 == PRID_COMP_LEGACY
+ * Assigned Processor ID (implementation) values for bits 15:8 of the PRId
+ * register. In order to detect a certain CPU type exactly eventually
+ * additional registers may need to be examined.
*/
+
+#define PRID_IMP_MASK 0xff00
+
+/*
+ * These are valid when 23:16 == PRID_COMP_LEGACY
+ */
+
#define PRID_IMP_R2000 0x0100
#define PRID_IMP_AU1_REV1 0x0100
#define PRID_IMP_AU1_REV2 0x0200
@@ -182,11 +196,15 @@
#define PRID_IMP_NETLOGIC_XLP2XX 0x1200
/*
- * Definitions for 7:0 on legacy processors
+ * Particular Revision values for bits 7:0 of the PRId register.
*/
#define PRID_REV_MASK 0x00ff
+/*
+ * Definitions for 7:0 on legacy processors
+ */
+
#define PRID_REV_TX4927 0x0022
#define PRID_REV_TX4937 0x0030
#define PRID_REV_R4400 0x0040
@@ -227,6 +245,8 @@
* 31 16 15 8 7 0
*/
+#define FPIR_IMP_MASK 0xff00
+
#define FPIR_IMP_NONE 0x0000
enum cpu_type_enum {
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index 3e11a468cdf8..54f9e84db8ac 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -43,6 +43,8 @@
#include <linux/io.h>
#include <linux/irq.h>
+#include <asm/cpu.h>
+
/* cpu pipeline flush */
void static inline au_sync(void)
{
@@ -140,7 +142,7 @@ static inline int au1xxx_cpu_needs_config_od(void)
static inline int alchemy_get_cputype(void)
{
- switch (read_c0_prid() & 0xffff0000) {
+ switch (read_c0_prid() & (PRID_OPT_MASK | PRID_COMP_MASK)) {
case 0x00030000:
return ALCHEMY_CPU_AU1000;
break;
diff --git a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
index f4caacd25552..1bcb6421205e 100644
--- a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
@@ -8,6 +8,8 @@
#ifndef __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H
+#include <asm/cpu.h>
+
/*
* IP22 with a variety of processors so we can't use defaults for everything.
*/
diff --git a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
index 1d2b6ff60d33..d6111aa2e886 100644
--- a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
@@ -8,6 +8,8 @@
#ifndef __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H
+#include <asm/cpu.h>
+
/*
* IP27 only comes with R10000 family processors all using the same config
*/
diff --git a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
index 65e9c856390d..4cec06d133db 100644
--- a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
@@ -9,6 +9,8 @@
#ifndef __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H
+#include <asm/cpu.h>
+
/*
* IP28 only comes with R10000 family processors all using the same config
*/
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index fed1c3e9b486..e0331414c7d6 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -603,6 +603,13 @@
#define MIPS_CONF4_MMUEXTDEF (_ULCAST_(3) << 14)
#define MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT (_ULCAST_(1) << 14)
+#define MIPS_CONF5_NF (_ULCAST_(1) << 0)
+#define MIPS_CONF5_UFR (_ULCAST_(1) << 2)
+#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
+#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
+#define MIPS_CONF5_CV (_ULCAST_(1) << 29)
+#define MIPS_CONF5_K (_ULCAST_(1) << 30)
+
#define MIPS_CONF6_SYND (_ULCAST_(1) << 13)
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index f194c08bd057..12d6842962be 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -83,6 +83,18 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
+#define HAVE_ARCH_PCI_RESOURCE_TO_USER
+
+static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ const struct resource *rsrc, resource_size_t *start,
+ resource_size_t *end)
+{
+ phys_t size = resource_size(rsrc);
+
+ *start = fixup_bigphys_addr(rsrc->start, size);
+ *end = rsrc->start + size;
+}
+
/*
* Dynamic DMA mapping stuff.
* MIPS has everything mapped statically.
diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
index 6529704aa73a..c5424757da65 100644
--- a/arch/mips/include/asm/timex.h
+++ b/arch/mips/include/asm/timex.h
@@ -10,7 +10,9 @@
#ifdef __KERNEL__
+#include <asm/cpu-features.h>
#include <asm/mipsregs.h>
+#include <asm/cpu-type.h>
/*
* This is the clock rate of the i8253 PIT. A MIPS system may not have
@@ -33,9 +35,38 @@
typedef unsigned int cycles_t;
+/*
+ * On R4000/R4400 before version 5.0 an erratum exists such that if the
+ * cycle counter is read in the exact moment that it is matching the
+ * compare register, no interrupt will be generated.
+ *
+ * There is a suggested workaround and also the erratum can't strike if
+ * the compare interrupt isn't being used as the clock source device.
+ * However for now the implementaton of this function doesn't get these
+ * fine details right.
+ */
static inline cycles_t get_cycles(void)
{
- return 0;
+ switch (boot_cpu_type()) {
+ case CPU_R4400PC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ if ((read_c0_prid() & 0xff) >= 0x0050)
+ return read_c0_count();
+ break;
+
+ case CPU_R4000PC:
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ break;
+
+ default:
+ if (cpu_has_counter)
+ return read_c0_count();
+ break;
+ }
+
+ return 0; /* no usable counter */
}
#endif /* __KERNEL__ */
diff --git a/arch/mips/include/asm/vga.h b/arch/mips/include/asm/vga.h
index f4cff7e4fa8a..f82c83749a08 100644
--- a/arch/mips/include/asm/vga.h
+++ b/arch/mips/include/asm/vga.h
@@ -6,6 +6,7 @@
#ifndef _ASM_VGA_H
#define _ASM_VGA_H
+#include <asm/addrspace.h>
#include <asm/byteorder.h>
/*
@@ -13,7 +14,7 @@
* access the videoram directly without any black magic.
*/
-#define VGA_MAP_MEM(x, s) (0xb0000000L + (unsigned long)(x))
+#define VGA_MAP_MEM(x, s) CKSEG1ADDR(0x10000000L + (unsigned long)(x))
#define vga_readb(x) (*(x))
#define vga_writeb(x, y) (*(y) = (x))
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 37663c7862a5..5465dc183e5a 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -20,6 +20,7 @@
#include <asm/bugs.h>
#include <asm/cpu.h>
+#include <asm/cpu-type.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/watch.h>
@@ -55,7 +56,7 @@ static inline void check_errata(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_34K:
/*
* Erratum "RPS May Cause Incorrect Instruction Execution"
@@ -122,7 +123,7 @@ static inline unsigned long cpu_get_fpu_id(void)
*/
static inline int __cpu_has_fpu(void)
{
- return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE);
+ return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE);
}
static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
@@ -290,6 +291,17 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
return config4 & MIPS_CONF_M;
}
+static inline unsigned int decode_config5(struct cpuinfo_mips *c)
+{
+ unsigned int config5;
+
+ config5 = read_c0_config5();
+ config5 &= ~MIPS_CONF5_UFR;
+ write_c0_config5(config5);
+
+ return config5 & MIPS_CONF_M;
+}
+
static void decode_configs(struct cpuinfo_mips *c)
{
int ok;
@@ -310,6 +322,8 @@ static void decode_configs(struct cpuinfo_mips *c)
ok = decode_config3(c);
if (ok)
ok = decode_config4(c);
+ if (ok)
+ ok = decode_config5(c);
mips_probe_watch_registers(c);
@@ -322,7 +336,7 @@ static void decode_configs(struct cpuinfo_mips *c)
static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
{
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_R2000:
c->cputype = CPU_R2000;
__cpu_name[cpu] = "R2000";
@@ -333,7 +347,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->tlbsize = 64;
break;
case PRID_IMP_R3000:
- if ((c->processor_id & 0xff) == PRID_REV_R3000A) {
+ if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) {
if (cpu_has_confreg()) {
c->cputype = CPU_R3081E;
__cpu_name[cpu] = "R3081";
@@ -353,7 +367,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
break;
case PRID_IMP_R4000:
if (read_c0_config() & CONF_SC) {
- if ((c->processor_id & 0xff) >= PRID_REV_R4400) {
+ if ((c->processor_id & PRID_REV_MASK) >=
+ PRID_REV_R4400) {
c->cputype = CPU_R4400PC;
__cpu_name[cpu] = "R4400PC";
} else {
@@ -361,7 +376,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R4000PC";
}
} else {
- if ((c->processor_id & 0xff) >= PRID_REV_R4400) {
+ if ((c->processor_id & PRID_REV_MASK) >=
+ PRID_REV_R4400) {
c->cputype = CPU_R4400SC;
__cpu_name[cpu] = "R4400SC";
} else {
@@ -454,7 +470,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "TX3927";
c->tlbsize = 64;
} else {
- switch (c->processor_id & 0xff) {
+ switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_TX3912:
c->cputype = CPU_TX3912;
__cpu_name[cpu] = "TX3912";
@@ -640,7 +656,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_4KC:
c->cputype = CPU_4KC;
__cpu_name[cpu] = "MIPS 4Kc";
@@ -711,7 +727,7 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_AU1_REV1:
case PRID_IMP_AU1_REV2:
c->cputype = CPU_ALCHEMY;
@@ -730,7 +746,7 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
break;
case 4:
__cpu_name[cpu] = "Au1200";
- if ((c->processor_id & 0xff) == 2)
+ if ((c->processor_id & PRID_REV_MASK) == 2)
__cpu_name[cpu] = "Au1250";
break;
case 5:
@@ -748,12 +764,12 @@ static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_SB1:
c->cputype = CPU_SB1;
__cpu_name[cpu] = "SiByte SB1";
/* FPU in pass1 is known to have issues. */
- if ((c->processor_id & 0xff) < 0x02)
+ if ((c->processor_id & PRID_REV_MASK) < 0x02)
c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
break;
case PRID_IMP_SB1A:
@@ -766,7 +782,7 @@ static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu)
static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_SR71000:
c->cputype = CPU_SR71000;
__cpu_name[cpu] = "Sandcraft SR71000";
@@ -779,7 +795,7 @@ static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu)
static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_PR4450:
c->cputype = CPU_PR4450;
__cpu_name[cpu] = "Philips PR4450";
@@ -791,7 +807,7 @@ static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_BMIPS32_REV4:
case PRID_IMP_BMIPS32_REV8:
c->cputype = CPU_BMIPS32;
@@ -806,7 +822,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
set_elf_platform(cpu, "bmips3300");
break;
case PRID_IMP_BMIPS43XX: {
- int rev = c->processor_id & 0xff;
+ int rev = c->processor_id & PRID_REV_MASK;
if (rev >= PRID_REV_BMIPS4380_LO &&
rev <= PRID_REV_BMIPS4380_HI) {
@@ -832,7 +848,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_CAVIUM_CN38XX:
case PRID_IMP_CAVIUM_CN31XX:
case PRID_IMP_CAVIUM_CN30XX:
@@ -875,7 +891,7 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
decode_configs(c);
/* JZRISC does not implement the CP0 counter. */
c->options &= ~MIPS_CPU_COUNTER;
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_JZRISC:
c->cputype = CPU_JZRISC;
__cpu_name[cpu] = "Ingenic JZRISC";
@@ -890,7 +906,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
{
decode_configs(c);
- if ((c->processor_id & 0xff00) == PRID_IMP_NETLOGIC_AU13XX) {
+ if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_NETLOGIC_AU13XX) {
c->cputype = CPU_ALCHEMY;
__cpu_name[cpu] = "Au1300";
/* following stuff is not for Alchemy */
@@ -905,7 +921,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
MIPS_CPU_EJTAG |
MIPS_CPU_LLSC);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_NETLOGIC_XLP2XX:
c->cputype = CPU_XLP;
__cpu_name[cpu] = "Broadcom XLPII";
@@ -984,7 +1000,7 @@ void cpu_probe(void)
c->cputype = CPU_UNKNOWN;
c->processor_id = read_c0_prid();
- switch (c->processor_id & 0xff0000) {
+ switch (c->processor_id & PRID_COMP_MASK) {
case PRID_COMP_LEGACY:
cpu_probe_legacy(c, cpu);
break;
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 42f8875d2444..f7991d95bff9 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
+#include <asm/cpu-type.h>
#include <asm/idle.h>
#include <asm/mipsregs.h>
@@ -136,7 +137,7 @@ void __init check_wait(void)
return;
}
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_R3081:
case CPU_R3081E:
cpu_wait = r3081_wait;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 364d26ae4215..dcb8e5d3bb8a 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -24,6 +24,7 @@
#include <linux/export.h>
#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
#include <asm/div64.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index aec3408edd4b..524841f02803 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -39,6 +39,7 @@
#include <asm/break.h>
#include <asm/cop2.h>
#include <asm/cpu.h>
+#include <asm/cpu-type.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
@@ -622,7 +623,7 @@ static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
regs->regs[rt] = read_c0_count();
return 0;
case 3: /* Count register resolution */
- switch (current_cpu_data.cputype) {
+ switch (current_cpu_type()) {
case CPU_20KC:
case CPU_25KF:
regs->regs[rt] = 1;
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 729e7702b1de..c8efdb5b6ee0 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -19,6 +19,7 @@
#include <asm/bootinfo.h>
#include <asm/cacheops.h>
#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/r4kcache.h>
@@ -186,9 +187,10 @@ static void probe_octeon(void)
unsigned long dcache_size;
unsigned int config1;
struct cpuinfo_mips *c = &current_cpu_data;
+ int cputype = current_cpu_type();
config1 = read_c0_config1();
- switch (c->cputype) {
+ switch (cputype) {
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
c->icache.linesz = 2 << ((config1 >> 19) & 7);
@@ -199,7 +201,7 @@ static void probe_octeon(void)
c->icache.sets * c->icache.ways * c->icache.linesz;
c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
c->dcache.linesz = 128;
- if (c->cputype == CPU_CAVIUM_OCTEON_PLUS)
+ if (cputype == CPU_CAVIUM_OCTEON_PLUS)
c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
else
c->dcache.sets = 1; /* CN3XXX has one Dcache set */
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index f749f687ee87..627883bc6d5f 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -12,6 +12,7 @@
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
+#include <linux/preempt.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
@@ -24,6 +25,7 @@
#include <asm/cacheops.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -601,6 +603,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
/* Catch bad driver code */
BUG_ON(size == 0);
+ preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size)
r4k_blast_scache();
@@ -621,6 +624,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
R4600_HIT_CACHEOP_WAR_IMPL;
blast_dcache_range(addr, addr + size);
}
+ preempt_enable();
bc_wback_inv(addr, size);
__sync();
@@ -631,6 +635,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
/* Catch bad driver code */
BUG_ON(size == 0);
+ preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size)
r4k_blast_scache();
@@ -655,6 +660,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
R4600_HIT_CACHEOP_WAR_IMPL;
blast_inv_dcache_range(addr, addr + size);
}
+ preempt_enable();
bc_inv(addr, size);
__sync();
@@ -780,20 +786,30 @@ static inline void rm7k_erratum31(void)
static inline void alias_74k_erratum(struct cpuinfo_mips *c)
{
+ unsigned int imp = c->processor_id & PRID_IMP_MASK;
+ unsigned int rev = c->processor_id & PRID_REV_MASK;
+
/*
* Early versions of the 74K do not update the cache tags on a
* vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
* aliases. In this case it is better to treat the cache as always
* having aliases.
*/
- if ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(2, 4, 0))
- c->dcache.flags |= MIPS_CACHE_VTAG;
- if ((c->processor_id & 0xff) == PRID_REV_ENCODE_332(2, 4, 0))
- write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
- if (((c->processor_id & 0xff00) == PRID_IMP_1074K) &&
- ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(1, 1, 0))) {
- c->dcache.flags |= MIPS_CACHE_VTAG;
- write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
+ switch (imp) {
+ case PRID_IMP_74K:
+ if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
+ c->dcache.flags |= MIPS_CACHE_VTAG;
+ if (rev == PRID_REV_ENCODE_332(2, 4, 0))
+ write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
+ break;
+ case PRID_IMP_1074K:
+ if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
+ c->dcache.flags |= MIPS_CACHE_VTAG;
+ write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
+ }
+ break;
+ default:
+ BUG();
}
}
@@ -809,7 +825,7 @@ static void probe_pcache(void)
unsigned long config1;
unsigned int lsize;
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_R4600: /* QED style two way caches? */
case CPU_R4700:
case CPU_R5000:
@@ -1025,7 +1041,8 @@ static void probe_pcache(void)
* presumably no vendor is shipping his hardware in the "bad"
* configuration.
*/
- if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
+ if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
+ (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
!(config & CONF_SC) && c->icache.linesz != 16 &&
PAGE_SIZE <= 0x8000)
panic("Improper R4000SC processor configuration detected");
@@ -1045,7 +1062,7 @@ static void probe_pcache(void)
* normally they'd suffer from aliases but magic in the hardware deals
* with that for us so we don't need to take care ourselves.
*/
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_20KC:
case CPU_25KF:
case CPU_SB1:
@@ -1065,7 +1082,7 @@ static void probe_pcache(void)
case CPU_34K:
case CPU_74K:
case CPU_1004K:
- if (c->cputype == CPU_74K)
+ if (current_cpu_type() == CPU_74K)
alias_74k_erratum(c);
if ((read_c0_config7() & (1 << 16))) {
/* effectively physically indexed dcache,
@@ -1078,7 +1095,7 @@ static void probe_pcache(void)
c->dcache.flags |= MIPS_CACHE_ALIASES;
}
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_20KC:
/*
* Some older 20Kc chips doesn't have the 'VI' bit in
@@ -1207,7 +1224,7 @@ static void setup_scache(void)
* processors don't have a S-cache that would be relevant to the
* Linux memory management.
*/
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
@@ -1384,9 +1401,8 @@ static void r4k_cache_error_setup(void)
{
extern char __weak except_vec2_generic;
extern char __weak except_vec2_sb1;
- struct cpuinfo_mips *c = &current_cpu_data;
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_SB1:
case CPU_SB1A:
set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 664e523653d0..5f8b95512580 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -18,6 +18,7 @@
#include <linux/highmem.h>
#include <asm/cache.h>
+#include <asm/cpu-type.h>
#include <asm/io.h>
#include <dma-coherence.h>
@@ -307,12 +308,10 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
{
int i;
- /* Make sure that gcc doesn't leave the empty loop body. */
- for (i = 0; i < nelems; i++, sg++) {
- if (cpu_needs_post_dma_flush(dev))
+ if (cpu_needs_post_dma_flush(dev))
+ for (i = 0; i < nelems; i++, sg++)
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
- }
}
static void mips_dma_sync_sg_for_device(struct device *dev,
@@ -320,12 +319,10 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
{
int i;
- /* Make sure that gcc doesn't leave the empty loop body. */
- for (i = 0; i < nelems; i++, sg++) {
- if (!plat_device_is_coherent(dev))
+ if (!plat_device_is_coherent(dev))
+ for (i = 0; i < nelems; i++, sg++)
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
- }
}
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 218c2109a55d..cbd81d17793a 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -18,6 +18,7 @@
#include <asm/bugs.h>
#include <asm/cacheops.h>
+#include <asm/cpu-type.h>
#include <asm/inst.h>
#include <asm/io.h>
#include <asm/page.h>
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 5d01392e3518..08d05aee8788 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
+#include <asm/cpu-type.h>
#include <asm/mipsregs.h>
#include <asm/bcache.h>
#include <asm/cacheops.h>
@@ -71,7 +72,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
unsigned int tmp;
/* Check the bypass bit (L2B) */
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_34K:
case CPU_74K:
case CPU_1004K:
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 00b26a67a06d..bb3a5f643e97 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <asm/cpu.h>
+#include <asm/cpu-type.h>
#include <asm/bootinfo.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 821b45175dc1..9bb3a9363b06 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -30,6 +30,7 @@
#include <linux/cache.h>
#include <asm/cacheflush.h>
+#include <asm/cpu-type.h>
#include <asm/pgtable.h>
#include <asm/war.h>
#include <asm/uasm.h>
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 53aad4a35375..a18af5fce67e 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -27,6 +27,7 @@
#include <linux/timex.h>
#include <linux/mc146818rtc.h>
+#include <asm/cpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/hardirq.h>
@@ -76,7 +77,7 @@ static void __init estimate_frequencies(void)
#endif
#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
- unsigned int prid = read_c0_prid() & 0xffff00;
+ unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK);
/*
* XXXKYMA: hardwire the CPU frequency to Host Freq/4
@@ -169,7 +170,7 @@ unsigned int get_c0_compare_int(void)
void __init plat_time_init(void)
{
- unsigned int prid = read_c0_prid() & 0xffff00;
+ unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK);
unsigned int freq;
estimate_frequencies();
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index a43ea3cc0a3b..552d26c34386 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -7,6 +7,7 @@
*/
#include <linux/init.h>
+#include <asm/cpu.h>
#include <asm/setup.h>
#include <asm/time.h>
#include <asm/irq.h>
@@ -34,7 +35,7 @@ static void __iomem *status_reg = (void __iomem *)0xbf000410;
*/
static unsigned int __init estimate_cpu_frequency(void)
{
- unsigned int prid = read_c0_prid() & 0xffff00;
+ unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK);
unsigned int tick = 0;
unsigned int freq;
unsigned int orig;
diff --git a/arch/mips/netlogic/xlr/fmn-config.c b/arch/mips/netlogic/xlr/fmn-config.c
index ed3bf0e3f309..c7622c6e5f67 100644
--- a/arch/mips/netlogic/xlr/fmn-config.c
+++ b/arch/mips/netlogic/xlr/fmn-config.c
@@ -36,6 +36,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <asm/cpu.h>
#include <asm/mipsregs.h>
#include <asm/netlogic/xlr/fmn.h>
#include <asm/netlogic/xlr/xlr.h>
@@ -187,7 +188,7 @@ void xlr_board_info_setup(void)
int processor_id, num_core;
num_core = hweight32(nlm_current_node()->coremask);
- processor_id = read_c0_prid() & 0xff00;
+ processor_id = read_c0_prid() & PRID_IMP_MASK;
setup_cpu_fmninfo(cpu, num_core);
switch (processor_id) {
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 5e5424753b56..4d1736fc1955 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -12,6 +12,7 @@
#include <linux/oprofile.h>
#include <linux/smp.h>
#include <asm/cpu-info.h>
+#include <asm/cpu-type.h>
#include "op_impl.h"
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
index 44dd5aa2e36f..5ec2a7bae02c 100644
--- a/arch/mips/pci/pci-bcm1480.c
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -39,6 +39,7 @@
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/tty.h>
+#include <linux/vt.h>
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_scd.h>
diff --git a/arch/mips/sibyte/bcm1480/setup.c b/arch/mips/sibyte/bcm1480/setup.c
index 05ed92c92b69..8e2e04f77870 100644
--- a/arch/mips/sibyte/bcm1480/setup.c
+++ b/arch/mips/sibyte/bcm1480/setup.c
@@ -22,6 +22,7 @@
#include <linux/string.h>
#include <asm/bootinfo.h>
+#include <asm/cpu.h>
#include <asm/mipsregs.h>
#include <asm/io.h>
#include <asm/sibyte/sb1250.h>
@@ -119,7 +120,7 @@ void __init bcm1480_setup(void)
uint64_t sys_rev;
int plldiv;
- sb1_pass = read_c0_prid() & 0xff;
+ sb1_pass = read_c0_prid() & PRID_REV_MASK;
sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION));
soc_type = SYS_SOC_TYPE(sys_rev);
part_type = G_SYS_PART(sys_rev);
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index a14bd4cb0bc0..3c02b2a77ae9 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -22,6 +22,7 @@
#include <linux/string.h>
#include <asm/bootinfo.h>
+#include <asm/cpu.h>
#include <asm/mipsregs.h>
#include <asm/io.h>
#include <asm/sibyte/sb1250.h>
@@ -182,7 +183,7 @@ void __init sb1250_setup(void)
int plldiv;
int bad_config = 0;
- sb1_pass = read_c0_prid() & 0xff;
+ sb1_pass = read_c0_prid() & PRID_REV_MASK;
sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION));
soc_type = SYS_SOC_TYPE(sys_rev);
soc_pass = G_SYS_REVISION(sys_rev);
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index 5b09b3544edd..efad85c8c823 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -25,6 +25,7 @@
#endif
#include <asm/bootinfo.h>
+#include <asm/cpu.h>
#include <asm/io.h>
#include <asm/reboot.h>
#include <asm/sni.h>
@@ -173,7 +174,7 @@ void __init plat_mem_setup(void)
system_type = "RM300-Cxx";
break;
case SNI_BRD_PCI_DESKTOP:
- switch (read_c0_prid() & 0xff00) {
+ switch (read_c0_prid() & PRID_IMP_MASK) {
case PRID_IMP_R4600:
case PRID_IMP_R4700:
system_type = "RM200-C20";
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
index eb59bfe23e85..93c9980e1b6b 100644
--- a/arch/openrisc/include/asm/prom.h
+++ b/arch/openrisc/include/asm/prom.h
@@ -14,53 +14,9 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-
-#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
-
#ifndef _ASM_OPENRISC_PROM_H
#define _ASM_OPENRISC_PROM_H
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#include <asm/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/atomic.h>
-#include <linux/of_irq.h>
-#include <linux/of_fdt.h>
-#include <linux/of_address.h>
-#include <linux/proc_fs.h>
-#include <linux/platform_device.h>
#define HAVE_ARCH_DEVTREE_FIXUPS
-/* Other Prototypes */
-extern int early_uartlite_console(void);
-
-/* Parse the ibm,dma-window property of an OF node into the busno, phys and
- * size parameters.
- */
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
- unsigned long *busno, unsigned long *phys, unsigned long *size);
-
-extern void kdump_move_device_tree(void);
-
-/* Get the MAC address */
-extern const void *of_get_mac_address(struct device_node *np);
-
-/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
- * @pdev: the device whose interrupt is to be resolved
- * @out_irq: structure of_irq filled by this function
- *
- * This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
- * walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
- * resolving using the OF tree walking.
- */
-struct pci_dev;
-extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
#endif /* _ASM_OPENRISC_PROM_H */
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 6a15c968d214..15ca2255f438 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
-src-plat-y := of.c
+src-plat-y := of.c epapr.c
src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
treeboot-walnut.c cuboot-acadia.c \
cuboot-kilauea.c simpleboot.c \
@@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
prpmc2800.c
src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
-src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c
+src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
src-wlib := $(sort $(src-wlib-y))
src-plat := $(sort $(src-plat-y))
diff --git a/arch/powerpc/boot/epapr-wrapper.c b/arch/powerpc/boot/epapr-wrapper.c
new file mode 100644
index 000000000000..c10191006673
--- /dev/null
+++ b/arch/powerpc/boot/epapr-wrapper.c
@@ -0,0 +1,9 @@
+extern void epapr_platform_init(unsigned long r3, unsigned long r4,
+ unsigned long r5, unsigned long r6,
+ unsigned long r7);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ epapr_platform_init(r3, r4, r5, r6, r7);
+}
diff --git a/arch/powerpc/boot/epapr.c b/arch/powerpc/boot/epapr.c
index 06c1961bd124..02e91aa2194a 100644
--- a/arch/powerpc/boot/epapr.c
+++ b/arch/powerpc/boot/epapr.c
@@ -48,8 +48,8 @@ static void platform_fixups(void)
fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
}
-void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
+void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
{
epapr_magic = r6;
ima_size = r7;
diff --git a/arch/powerpc/boot/of.c b/arch/powerpc/boot/of.c
index 61d9899aa0d0..62e2f43ec1df 100644
--- a/arch/powerpc/boot/of.c
+++ b/arch/powerpc/boot/of.c
@@ -26,6 +26,9 @@
static unsigned long claim_base;
+void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7);
+
static void *of_try_claim(unsigned long size)
{
unsigned long addr = 0;
@@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
}
}
-void platform_init(unsigned long a1, unsigned long a2, void *promptr)
+static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
{
platform_ops.image_hdr = of_image_hdr;
platform_ops.malloc = of_try_claim;
@@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
loader_info.initrd_size = a2;
}
}
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ /* Detect OF vs. ePAPR boot */
+ if (r5)
+ of_platform_init(r3, r4, (void *)r5);
+ else
+ epapr_platform_init(r3, r4, r5, r6, r7);
+}
+
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 6761c746048d..cd7af841ba05 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -148,18 +148,18 @@ make_space=y
case "$platform" in
pseries)
- platformo=$object/of.o
+ platformo="$object/of.o $object/epapr.o"
link_address='0x4000000'
;;
maple)
- platformo=$object/of.o
+ platformo="$object/of.o $object/epapr.o"
link_address='0x400000'
;;
pmac|chrp)
- platformo=$object/of.o
+ platformo="$object/of.o $object/epapr.o"
;;
coff)
- platformo="$object/crt0.o $object/of.o"
+ platformo="$object/crt0.o $object/of.o $object/epapr.o"
lds=$object/zImage.coff.lds
link_address='0x500000'
pie=
@@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
platformo="$object/treeboot-iss4xx.o"
;;
epapr)
+ platformo="$object/epapr.o $object/epapr-wrapper.o"
link_address='0x20000000'
pie=-pie
;;
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 0e40843a1c6e..41f13cec8a8f 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
extern void irq_ctx_init(void);
extern void call_do_softirq(struct thread_info *tp);
-extern int call_handle_irq(int irq, void *p1,
- struct thread_info *tp, void *func);
+extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
extern void do_IRQ(struct pt_regs *regs);
+extern void __do_irq(struct pt_regs *regs);
int irq_choose_cpu(const struct cpumask *mask);
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index e378cccfca55..ce4de5aed7b5 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -149,8 +149,6 @@ typedef struct {
struct thread_struct {
unsigned long ksp; /* Kernel stack pointer */
- unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
-
#ifdef CONFIG_PPC64
unsigned long ksp_vsid;
#endif
@@ -162,6 +160,7 @@ struct thread_struct {
#endif
#ifdef CONFIG_PPC32
void *pgdir; /* root of page-table tree */
+ unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
#endif
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
@@ -321,7 +320,6 @@ struct thread_struct {
#else
#define INIT_THREAD { \
.ksp = INIT_SP, \
- .ksp_limit = INIT_SP_LIMIT, \
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
.fs = KERNEL_DS, \
.fpr = {{0}}, \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d8958be5f31a..502c7a4e73f7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -80,10 +80,11 @@ int main(void)
DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
+ DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
+ DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
#endif /* CONFIG_PPC64 */
DEFINE(KSP, offsetof(struct thread_struct, ksp));
- DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE
DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c69440cef7af..57d286a78f86 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -441,50 +441,6 @@ void migrate_irqs(void)
}
#endif
-static inline void handle_one_irq(unsigned int irq)
-{
- struct thread_info *curtp, *irqtp;
- unsigned long saved_sp_limit;
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- if (!desc)
- return;
-
- /* Switch to the irq stack to handle this */
- curtp = current_thread_info();
- irqtp = hardirq_ctx[smp_processor_id()];
-
- if (curtp == irqtp) {
- /* We're already on the irq stack, just handle it */
- desc->handle_irq(irq, desc);
- return;
- }
-
- saved_sp_limit = current->thread.ksp_limit;
-
- irqtp->task = curtp->task;
- irqtp->flags = 0;
-
- /* Copy the softirq bits in preempt_count so that the
- * softirq checks work in the hardirq context. */
- irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
- (curtp->preempt_count & SOFTIRQ_MASK);
-
- current->thread.ksp_limit = (unsigned long)irqtp +
- _ALIGN_UP(sizeof(struct thread_info), 16);
-
- call_handle_irq(irq, desc, irqtp, desc->handle_irq);
- current->thread.ksp_limit = saved_sp_limit;
- irqtp->task = NULL;
-
- /* Set any flag that may have been set on the
- * alternate stack
- */
- if (irqtp->flags)
- set_bits(irqtp->flags, &curtp->flags);
-}
-
static inline void check_stack_overflow(void)
{
#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
#endif
}
-void do_IRQ(struct pt_regs *regs)
+void __do_irq(struct pt_regs *regs)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
+ struct irq_desc *desc;
unsigned int irq;
irq_enter();
@@ -519,18 +475,56 @@ void do_IRQ(struct pt_regs *regs)
*/
irq = ppc_md.get_irq();
- /* We can hard enable interrupts now */
+ /* We can hard enable interrupts now to allow perf interrupts */
may_hard_irq_enable();
/* And finally process it */
- if (irq != NO_IRQ)
- handle_one_irq(irq);
- else
+ if (unlikely(irq == NO_IRQ))
__get_cpu_var(irq_stat).spurious_irqs++;
+ else {
+ desc = irq_to_desc(irq);
+ if (likely(desc))
+ desc->handle_irq(irq, desc);
+ }
trace_irq_exit(regs);
irq_exit();
+}
+
+void do_IRQ(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ struct thread_info *curtp, *irqtp;
+
+ /* Switch to the irq stack to handle this */
+ curtp = current_thread_info();
+ irqtp = hardirq_ctx[raw_smp_processor_id()];
+
+ /* Already there ? */
+ if (unlikely(curtp == irqtp)) {
+ __do_irq(regs);
+ set_irq_regs(old_regs);
+ return;
+ }
+
+ /* Prepare the thread_info in the irq stack */
+ irqtp->task = curtp->task;
+ irqtp->flags = 0;
+
+ /* Copy the preempt_count so that the [soft]irq checks work. */
+ irqtp->preempt_count = curtp->preempt_count;
+
+ /* Switch stack and call */
+ call_do_irq(regs, irqtp);
+
+ /* Restore stack limit */
+ irqtp->task = NULL;
+
+ /* Copy back updates to the thread_info */
+ if (irqtp->flags)
+ set_bits(irqtp->flags, &curtp->flags);
+
set_irq_regs(old_regs);
}
@@ -592,28 +586,22 @@ void irq_ctx_init(void)
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
tp = softirq_ctx[i];
tp->cpu = i;
- tp->preempt_count = 0;
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
tp = hardirq_ctx[i];
tp->cpu = i;
- tp->preempt_count = HARDIRQ_OFFSET;
}
}
static inline void do_softirq_onstack(void)
{
struct thread_info *curtp, *irqtp;
- unsigned long saved_sp_limit = current->thread.ksp_limit;
curtp = current_thread_info();
irqtp = softirq_ctx[smp_processor_id()];
irqtp->task = curtp->task;
irqtp->flags = 0;
- current->thread.ksp_limit = (unsigned long)irqtp +
- _ALIGN_UP(sizeof(struct thread_info), 16);
call_do_softirq(irqtp);
- current->thread.ksp_limit = saved_sp_limit;
irqtp->task = NULL;
/* Set any flag that may have been set on the
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 777d999f563b..2b0ad9845363 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -36,26 +36,41 @@
.text
+/*
+ * We store the saved ksp_limit in the unused part
+ * of the STACK_FRAME_OVERHEAD
+ */
_GLOBAL(call_do_softirq)
mflr r0
stw r0,4(r1)
+ lwz r10,THREAD+KSP_LIMIT(r2)
+ addi r11,r3,THREAD_INFO_GAP
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
mr r1,r3
+ stw r10,8(r1)
+ stw r11,THREAD+KSP_LIMIT(r2)
bl __do_softirq
+ lwz r10,8(r1)
lwz r1,0(r1)
lwz r0,4(r1)
+ stw r10,THREAD+KSP_LIMIT(r2)
mtlr r0
blr
-_GLOBAL(call_handle_irq)
+_GLOBAL(call_do_irq)
mflr r0
stw r0,4(r1)
- mtctr r6
- stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
- mr r1,r5
- bctrl
+ lwz r10,THREAD+KSP_LIMIT(r2)
+ addi r11,r3,THREAD_INFO_GAP
+ stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+ mr r1,r4
+ stw r10,8(r1)
+ stw r11,THREAD+KSP_LIMIT(r2)
+ bl __do_irq
+ lwz r10,8(r1)
lwz r1,0(r1)
lwz r0,4(r1)
+ stw r10,THREAD+KSP_LIMIT(r2)
mtlr r0
blr
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 971d7e78aff2..e59caf874d05 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
mtlr r0
blr
-_GLOBAL(call_handle_irq)
- ld r8,0(r6)
+_GLOBAL(call_do_irq)
mflr r0
std r0,16(r1)
- mtctr r8
- stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
- mr r1,r5
- bctrl
+ stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+ mr r1,r4
+ bl .__do_irq
ld r1,0(r1)
ld r0,16(r1)
mtlr r0
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 6f428da53e20..96d2fdf3aa9e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
kregs = (struct pt_regs *) sp;
sp -= STACK_FRAME_OVERHEAD;
p->thread.ksp = sp;
+#ifdef CONFIG_PPC32
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
_ALIGN_UP(sizeof(struct thread_info), 16);
-
+#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
p->thread.ptrace_bps[0] = NULL;
#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 12e656ffe60e..5fe2842e8bab 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
static cell_t __initdata regbuf[1024];
+static bool rtas_has_query_cpu_stopped;
+
/*
* Error results ... some OF calls will return "-1" on error, some
@@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
&val, sizeof(val));
+ /* Check if it supports "query-cpu-stopped-state" */
+ if (prom_getprop(rtas_node, "query-cpu-stopped-state",
+ &val, sizeof(val)) != PROM_ERROR)
+ rtas_has_query_cpu_stopped = true;
+
#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
/* PowerVN takeover hack */
prom_rtas_data = base;
@@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
= (void *) LOW_ADDR(__secondary_hold_acknowledge);
unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
+ /*
+ * On pseries, if RTAS supports "query-cpu-stopped-state",
+ * we skip this stage, the CPUs will be started by the
+ * kernel using RTAS.
+ */
+ if ((of_platform == PLATFORM_PSERIES ||
+ of_platform == PLATFORM_PSERIES_LPAR) &&
+ rtas_has_query_cpu_stopped) {
+ prom_printf("prom_hold_cpus: skipped\n");
+ return;
+ }
+
prom_debug("prom_hold_cpus: start...\n");
prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
@@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* On non-powermacs, put all CPUs in spin-loops.
*
* PowerMacs use a different mechanism to spin CPUs
+ *
+ * (This must be done after instanciating RTAS)
*/
if (of_platform != PLATFORM_POWERMAC &&
of_platform != PLATFORM_OPAL)
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index a7ee978fb860..b1faa1593c90 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
*/
if ((ra == 1) && !(regs->msr & MSR_PR) \
&& (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
+#ifdef CONFIG_PPC32
/*
* Check if we will touch kernel sack overflow
*/
@@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
err = -EINVAL;
break;
}
-
+#endif /* CONFIG_PPC32 */
/*
* Check if we already set since that means we'll
* lose the previous value.
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 1c1771a40250..24f58cb0a543 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -233,18 +233,24 @@ static void __init smp_init_pseries(void)
alloc_bootmem_cpumask_var(&of_spin_mask);
- /* Mark threads which are still spinning in hold loops. */
- if (cpu_has_feature(CPU_FTR_SMT)) {
- for_each_present_cpu(i) {
- if (cpu_thread_in_core(i) == 0)
- cpumask_set_cpu(i, of_spin_mask);
- }
- } else {
- cpumask_copy(of_spin_mask, cpu_present_mask);
+ /*
+ * Mark threads which are still spinning in hold loops
+ *
+ * We know prom_init will not have started them if RTAS supports
+ * query-cpu-stopped-state.
+ */
+ if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
+ if (cpu_has_feature(CPU_FTR_SMT)) {
+ for_each_present_cpu(i) {
+ if (cpu_thread_in_core(i) == 0)
+ cpumask_set_cpu(i, of_spin_mask);
+ }
+ } else
+ cpumask_copy(of_spin_mask, cpu_present_mask);
+
+ cpumask_clear_cpu(boot_cpuid, of_spin_mask);
}
- cpumask_clear_cpu(boot_cpuid, of_spin_mask);
-
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
smp_ops->give_timebase = rtas_give_timebase;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index dcc6ac2d8026..7143793859fa 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -93,6 +93,7 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
+ select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS2
@@ -102,7 +103,6 @@ config S390
select GENERIC_TIME_VSYSCALL_OLD
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
- select HAVE_ARCH_MUTEX_CPU_RELAX
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h
index 688271f5f2e4..458c1f7fbc18 100644
--- a/arch/s390/include/asm/mutex.h
+++ b/arch/s390/include/asm/mutex.h
@@ -7,5 +7,3 @@
*/
#include <asm-generic/mutex-dec.h>
-
-#define arch_mutex_cpu_relax() barrier()
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 0eb37505cab1..ca7821f07260 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -198,6 +198,8 @@ static inline void cpu_relax(void)
barrier();
}
+#define arch_mutex_cpu_relax() barrier()
+
static inline void psw_set_key(unsigned int key)
{
asm volatile("spka 0(%0)" : : "d" (key));
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 701fe8c59e1f..83e5d216105e 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
extern int arch_spin_trylock_retry(arch_spinlock_t *);
extern void arch_spin_relax(arch_spinlock_t *lock);
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.owner_cpu == 0;
+}
+
static inline void arch_spin_lock(arch_spinlock_t *lp)
{
int old;
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 8a7cc663b3f8..d45a2c48f185 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -361,7 +361,7 @@ config CMDLINE_OVERRIDE
config VMALLOC_RESERVE
hex
- default 0x1000000
+ default 0x2000000
config HARDWALL
bool "Hardwall support to allow access to user dynamic network"
diff --git a/arch/tile/gxio/iorpc_mpipe.c b/arch/tile/gxio/iorpc_mpipe.c
index 4f8f3d619c4a..e19325c4c431 100644
--- a/arch/tile/gxio/iorpc_mpipe.c
+++ b/arch/tile/gxio/iorpc_mpipe.c
@@ -21,7 +21,7 @@ struct alloc_buffer_stacks_param {
unsigned int flags;
};
-int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context,
+int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags)
{
@@ -45,7 +45,7 @@ struct init_buffer_stack_aux_param {
unsigned int buffer_size_enum;
};
-int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context,
+int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context,
void *mem_va, size_t mem_size,
unsigned int mem_flags, unsigned int stack,
unsigned int buffer_size_enum)
@@ -80,7 +80,7 @@ struct alloc_notif_rings_param {
unsigned int flags;
};
-int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context,
+int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags)
{
@@ -102,7 +102,7 @@ struct init_notif_ring_aux_param {
unsigned int ring;
};
-int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
+int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
size_t mem_size, unsigned int mem_flags,
unsigned int ring)
{
@@ -133,7 +133,7 @@ struct request_notif_ring_interrupt_param {
unsigned int ring;
};
-int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context,
+int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context,
int inter_x, int inter_y,
int inter_ipi, int inter_event,
unsigned int ring)
@@ -158,7 +158,7 @@ struct enable_notif_ring_interrupt_param {
unsigned int ring;
};
-int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context,
+int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context,
unsigned int ring)
{
struct enable_notif_ring_interrupt_param temp;
@@ -179,7 +179,7 @@ struct alloc_notif_groups_param {
unsigned int flags;
};
-int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context,
+int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags)
{
@@ -201,7 +201,7 @@ struct init_notif_group_param {
gxio_mpipe_notif_group_bits_t bits;
};
-int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context,
+int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,
unsigned int group,
gxio_mpipe_notif_group_bits_t bits)
{
@@ -223,7 +223,7 @@ struct alloc_buckets_param {
unsigned int flags;
};
-int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count,
+int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count,
unsigned int first, unsigned int flags)
{
struct alloc_buckets_param temp;
@@ -244,7 +244,7 @@ struct init_bucket_param {
MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info;
};
-int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket,
+int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,
MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info)
{
struct init_bucket_param temp;
@@ -265,7 +265,7 @@ struct alloc_edma_rings_param {
unsigned int flags;
};
-int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context,
+int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags)
{
@@ -288,7 +288,7 @@ struct init_edma_ring_aux_param {
unsigned int channel;
};
-int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
+int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
size_t mem_size, unsigned int mem_flags,
unsigned int ring, unsigned int channel)
{
@@ -315,7 +315,7 @@ int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
EXPORT_SYMBOL(gxio_mpipe_init_edma_ring_aux);
-int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob,
+int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob,
size_t blob_size)
{
const void *params = blob;
@@ -332,7 +332,7 @@ struct register_client_memory_param {
unsigned int flags;
};
-int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context,
+int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,
unsigned int iotlb, HV_PTE pte,
unsigned int flags)
{
@@ -355,7 +355,7 @@ struct link_open_aux_param {
unsigned int flags;
};
-int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context,
+int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context,
_gxio_mpipe_link_name_t name, unsigned int flags)
{
struct link_open_aux_param temp;
@@ -374,7 +374,7 @@ struct link_close_aux_param {
int mac;
};
-int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac)
+int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac)
{
struct link_close_aux_param temp;
struct link_close_aux_param *params = &temp;
@@ -393,7 +393,7 @@ struct link_set_attr_aux_param {
int64_t val;
};
-int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac,
+int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac,
uint32_t attr, int64_t val)
{
struct link_set_attr_aux_param temp;
@@ -415,8 +415,8 @@ struct get_timestamp_aux_param {
uint64_t cycles;
};
-int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec,
- uint64_t * nsec, uint64_t * cycles)
+int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec,
+ uint64_t *nsec, uint64_t *cycles)
{
int __result;
struct get_timestamp_aux_param temp;
@@ -440,7 +440,7 @@ struct set_timestamp_aux_param {
uint64_t cycles;
};
-int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec,
+int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec,
uint64_t nsec, uint64_t cycles)
{
struct set_timestamp_aux_param temp;
@@ -460,8 +460,7 @@ struct adjust_timestamp_aux_param {
int64_t nsec;
};
-int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
- int64_t nsec)
+int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context, int64_t nsec)
{
struct adjust_timestamp_aux_param temp;
struct adjust_timestamp_aux_param *params = &temp;
@@ -475,25 +474,6 @@ int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux);
-struct adjust_timestamp_freq_param {
- int32_t ppb;
-};
-
-int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
- int32_t ppb)
-{
- struct adjust_timestamp_freq_param temp;
- struct adjust_timestamp_freq_param *params = &temp;
-
- params->ppb = ppb;
-
- return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
- sizeof(*params),
- GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ);
-}
-
-EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq);
-
struct config_edma_ring_blks_param {
unsigned int ering;
unsigned int max_blks;
@@ -501,7 +481,7 @@ struct config_edma_ring_blks_param {
unsigned int db;
};
-int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context,
+int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context,
unsigned int ering, unsigned int max_blks,
unsigned int min_snf_blks, unsigned int db)
{
@@ -520,11 +500,29 @@ int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context,
EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks);
+struct adjust_timestamp_freq_param {
+ int32_t ppb;
+};
+
+int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context, int32_t ppb)
+{
+ struct adjust_timestamp_freq_param temp;
+ struct adjust_timestamp_freq_param *params = &temp;
+
+ params->ppb = ppb;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq);
+
struct arm_pollfd_param {
union iorpc_pollfd pollfd;
};
-int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie)
+int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie)
{
struct arm_pollfd_param temp;
struct arm_pollfd_param *params = &temp;
@@ -541,7 +539,7 @@ struct close_pollfd_param {
union iorpc_pollfd pollfd;
};
-int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie)
+int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie)
{
struct close_pollfd_param temp;
struct close_pollfd_param *params = &temp;
@@ -558,7 +556,7 @@ struct get_mmio_base_param {
HV_PTE base;
};
-int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base)
+int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base)
{
int __result;
struct get_mmio_base_param temp;
@@ -579,7 +577,7 @@ struct check_mmio_offset_param {
unsigned long size;
};
-int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context,
+int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context,
unsigned long offset, unsigned long size)
{
struct check_mmio_offset_param temp;
diff --git a/arch/tile/gxio/iorpc_mpipe_info.c b/arch/tile/gxio/iorpc_mpipe_info.c
index 64883aabeb9c..77019c6e9b4a 100644
--- a/arch/tile/gxio/iorpc_mpipe_info.c
+++ b/arch/tile/gxio/iorpc_mpipe_info.c
@@ -15,12 +15,11 @@
/* This file is machine-generated; DO NOT EDIT! */
#include "gxio/iorpc_mpipe_info.h"
-
struct instance_aux_param {
_gxio_mpipe_link_name_t name;
};
-int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
+int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context,
_gxio_mpipe_link_name_t name)
{
struct instance_aux_param temp;
@@ -39,10 +38,10 @@ struct enumerate_aux_param {
_gxio_mpipe_link_mac_t mac;
};
-int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
+int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context,
unsigned int idx,
- _gxio_mpipe_link_name_t * name,
- _gxio_mpipe_link_mac_t * mac)
+ _gxio_mpipe_link_name_t *name,
+ _gxio_mpipe_link_mac_t *mac)
{
int __result;
struct enumerate_aux_param temp;
@@ -50,7 +49,7 @@ int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
__result =
hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
- (((uint64_t) idx << 32) |
+ (((uint64_t)idx << 32) |
GXIO_MPIPE_INFO_OP_ENUMERATE_AUX));
*name = params->name;
*mac = params->mac;
@@ -64,7 +63,7 @@ struct get_mmio_base_param {
HV_PTE base;
};
-int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context,
+int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context,
HV_PTE *base)
{
int __result;
@@ -86,7 +85,7 @@ struct check_mmio_offset_param {
unsigned long size;
};
-int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context,
+int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,
unsigned long offset, unsigned long size)
{
struct check_mmio_offset_param temp;
diff --git a/arch/tile/gxio/iorpc_trio.c b/arch/tile/gxio/iorpc_trio.c
index da6e18e049c3..1d3cedb9aeb4 100644
--- a/arch/tile/gxio/iorpc_trio.c
+++ b/arch/tile/gxio/iorpc_trio.c
@@ -21,7 +21,7 @@ struct alloc_asids_param {
unsigned int flags;
};
-int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count,
+int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count,
unsigned int first, unsigned int flags)
{
struct alloc_asids_param temp;
@@ -44,7 +44,7 @@ struct alloc_memory_maps_param {
unsigned int flags;
};
-int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context,
+int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags)
{
@@ -67,7 +67,7 @@ struct alloc_scatter_queues_param {
unsigned int flags;
};
-int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context,
+int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags)
{
@@ -91,7 +91,7 @@ struct alloc_pio_regions_param {
unsigned int flags;
};
-int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context,
+int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags)
{
@@ -115,7 +115,7 @@ struct init_pio_region_aux_param {
unsigned int flags;
};
-int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context,
+int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context,
unsigned int pio_region, unsigned int mac,
uint32_t bus_address_hi, unsigned int flags)
{
@@ -145,7 +145,7 @@ struct init_memory_map_mmu_aux_param {
unsigned int order_mode;
};
-int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context,
+int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context,
unsigned int map, unsigned long va,
uint64_t size, unsigned int asid,
unsigned int mac, uint64_t bus_address,
@@ -175,7 +175,7 @@ struct get_port_property_param {
struct pcie_trio_ports_property trio_ports;
};
-int gxio_trio_get_port_property(gxio_trio_context_t * context,
+int gxio_trio_get_port_property(gxio_trio_context_t *context,
struct pcie_trio_ports_property *trio_ports)
{
int __result;
@@ -198,7 +198,7 @@ struct config_legacy_intr_param {
unsigned int intx;
};
-int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x,
+int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x,
int inter_y, int inter_ipi, int inter_event,
unsigned int mac, unsigned int intx)
{
@@ -227,7 +227,7 @@ struct config_msi_intr_param {
unsigned int asid;
};
-int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x,
+int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x,
int inter_y, int inter_ipi, int inter_event,
unsigned int mac, unsigned int mem_map,
uint64_t mem_map_base, uint64_t mem_map_limit,
@@ -259,7 +259,7 @@ struct set_mps_mrs_param {
unsigned int mac;
};
-int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps,
+int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps,
uint16_t mrs, unsigned int mac)
{
struct set_mps_mrs_param temp;
@@ -279,7 +279,7 @@ struct force_rc_link_up_param {
unsigned int mac;
};
-int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac)
+int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac)
{
struct force_rc_link_up_param temp;
struct force_rc_link_up_param *params = &temp;
@@ -296,7 +296,7 @@ struct force_ep_link_up_param {
unsigned int mac;
};
-int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac)
+int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac)
{
struct force_ep_link_up_param temp;
struct force_ep_link_up_param *params = &temp;
@@ -313,7 +313,7 @@ struct get_mmio_base_param {
HV_PTE base;
};
-int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base)
+int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base)
{
int __result;
struct get_mmio_base_param temp;
@@ -334,7 +334,7 @@ struct check_mmio_offset_param {
unsigned long size;
};
-int gxio_trio_check_mmio_offset(gxio_trio_context_t * context,
+int gxio_trio_check_mmio_offset(gxio_trio_context_t *context,
unsigned long offset, unsigned long size)
{
struct check_mmio_offset_param temp;
diff --git a/arch/tile/gxio/iorpc_usb_host.c b/arch/tile/gxio/iorpc_usb_host.c
index cf3c3cc12204..9c820073bfc0 100644
--- a/arch/tile/gxio/iorpc_usb_host.c
+++ b/arch/tile/gxio/iorpc_usb_host.c
@@ -19,7 +19,7 @@ struct cfg_interrupt_param {
union iorpc_interrupt interrupt;
};
-int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x,
+int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x,
int inter_y, int inter_ipi, int inter_event)
{
struct cfg_interrupt_param temp;
@@ -41,7 +41,7 @@ struct register_client_memory_param {
unsigned int flags;
};
-int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context,
+int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context,
HV_PTE pte, unsigned int flags)
{
struct register_client_memory_param temp;
@@ -61,7 +61,7 @@ struct get_mmio_base_param {
HV_PTE base;
};
-int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, HV_PTE *base)
+int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, HV_PTE *base)
{
int __result;
struct get_mmio_base_param temp;
@@ -82,7 +82,7 @@ struct check_mmio_offset_param {
unsigned long size;
};
-int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context,
+int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,
unsigned long offset, unsigned long size)
{
struct check_mmio_offset_param temp;
diff --git a/arch/tile/gxio/usb_host.c b/arch/tile/gxio/usb_host.c
index 66b002f54ecc..785afad7922e 100644
--- a/arch/tile/gxio/usb_host.c
+++ b/arch/tile/gxio/usb_host.c
@@ -26,7 +26,7 @@
#include <gxio/kiorpc.h>
#include <gxio/usb_host.h>
-int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index,
+int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index,
int is_ehci)
{
char file[32];
@@ -63,7 +63,7 @@ int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index,
EXPORT_SYMBOL_GPL(gxio_usb_host_init);
-int gxio_usb_host_destroy(gxio_usb_host_context_t * context)
+int gxio_usb_host_destroy(gxio_usb_host_context_t *context)
{
iounmap((void __force __iomem *)(context->mmio_base));
hv_dev_close(context->fd);
@@ -76,14 +76,14 @@ int gxio_usb_host_destroy(gxio_usb_host_context_t * context)
EXPORT_SYMBOL_GPL(gxio_usb_host_destroy);
-void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context)
+void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context)
{
return context->mmio_base;
}
EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_start);
-size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context)
+size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context)
{
return HV_USB_HOST_MMIO_SIZE;
}
diff --git a/arch/tile/include/arch/mpipe.h b/arch/tile/include/arch/mpipe.h
index 8a33912fd6cc..904538e754d8 100644
--- a/arch/tile/include/arch/mpipe.h
+++ b/arch/tile/include/arch/mpipe.h
@@ -176,7 +176,18 @@ typedef union
*/
uint_reg_t stack_idx : 5;
/* Reserved. */
- uint_reg_t __reserved_2 : 5;
+ uint_reg_t __reserved_2 : 3;
+ /*
+ * Instance ID. For devices that support automatic buffer return between
+ * mPIPE instances, this field indicates the buffer owner. If the INST
+ * field does not match the mPIPE's instance number when a packet is
+ * egressed, buffers with HWB set will be returned to the other mPIPE
+ * instance. Note that not all devices support multi-mPIPE buffer
+ * return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates
+ * whether the INST field in the buffer descriptor is populated by iDMA
+ * hardware. This field is ignored on writes.
+ */
+ uint_reg_t inst : 2;
/*
* Reads as one to indicate that this is a hardware managed buffer.
* Ignored on writes since all buffers on a given stack are the same size.
@@ -205,7 +216,8 @@ typedef union
uint_reg_t c : 2;
uint_reg_t size : 3;
uint_reg_t hwb : 1;
- uint_reg_t __reserved_2 : 5;
+ uint_reg_t inst : 2;
+ uint_reg_t __reserved_2 : 3;
uint_reg_t stack_idx : 5;
uint_reg_t __reserved_1 : 6;
int_reg_t va : 35;
@@ -231,9 +243,9 @@ typedef union
/* Reserved. */
uint_reg_t __reserved_0 : 3;
/* eDMA ring being accessed */
- uint_reg_t ring : 5;
+ uint_reg_t ring : 6;
/* Reserved. */
- uint_reg_t __reserved_1 : 18;
+ uint_reg_t __reserved_1 : 17;
/*
* This field of the address selects the region (address space) to be
* accessed. For the egress DMA post region, this field must be 5.
@@ -250,8 +262,8 @@ typedef union
uint_reg_t svc_dom : 5;
uint_reg_t __reserved_2 : 6;
uint_reg_t region : 3;
- uint_reg_t __reserved_1 : 18;
- uint_reg_t ring : 5;
+ uint_reg_t __reserved_1 : 17;
+ uint_reg_t ring : 6;
uint_reg_t __reserved_0 : 3;
#endif
};
diff --git a/arch/tile/include/arch/mpipe_constants.h b/arch/tile/include/arch/mpipe_constants.h
index 410a0400e055..84022ac5fe82 100644
--- a/arch/tile/include/arch/mpipe_constants.h
+++ b/arch/tile/include/arch/mpipe_constants.h
@@ -16,13 +16,13 @@
#ifndef __ARCH_MPIPE_CONSTANTS_H__
#define __ARCH_MPIPE_CONSTANTS_H__
-#define MPIPE_NUM_CLASSIFIERS 10
+#define MPIPE_NUM_CLASSIFIERS 16
#define MPIPE_CLS_MHZ 1200
-#define MPIPE_NUM_EDMA_RINGS 32
+#define MPIPE_NUM_EDMA_RINGS 64
#define MPIPE_NUM_SGMII_MACS 16
-#define MPIPE_NUM_XAUI_MACS 4
+#define MPIPE_NUM_XAUI_MACS 16
#define MPIPE_NUM_LOOPBACK_CHANNELS 4
#define MPIPE_NUM_NON_LB_CHANNELS 28
diff --git a/arch/tile/include/arch/mpipe_shm.h b/arch/tile/include/arch/mpipe_shm.h
index f2e9e122818d..13b3c4300e50 100644
--- a/arch/tile/include/arch/mpipe_shm.h
+++ b/arch/tile/include/arch/mpipe_shm.h
@@ -44,8 +44,14 @@ typedef union
* descriptors toggles each time the ring tail pointer wraps.
*/
uint_reg_t gen : 1;
+ /**
+ * For devices with EDMA reorder support, this field allows the
+ * descriptor to select the egress FIFO. The associated DMA ring must
+ * have ALLOW_EFIFO_SEL enabled.
+ */
+ uint_reg_t efifo_sel : 6;
/** Reserved. Must be zero. */
- uint_reg_t r0 : 7;
+ uint_reg_t r0 : 1;
/** Checksum generation enabled for this transfer. */
uint_reg_t csum : 1;
/**
@@ -110,7 +116,8 @@ typedef union
uint_reg_t notif : 1;
uint_reg_t ns : 1;
uint_reg_t csum : 1;
- uint_reg_t r0 : 7;
+ uint_reg_t r0 : 1;
+ uint_reg_t efifo_sel : 6;
uint_reg_t gen : 1;
#endif
@@ -126,14 +133,16 @@ typedef union
/** Reserved. */
uint_reg_t __reserved_1 : 3;
/**
- * Instance ID. For devices that support more than one mPIPE instance,
- * this field indicates the buffer owner. If the INST field does not
- * match the mPIPE's instance number when a packet is egressed, buffers
- * with HWB set will be returned to the other mPIPE instance.
+ * Instance ID. For devices that support automatic buffer return between
+ * mPIPE instances, this field indicates the buffer owner. If the INST
+ * field does not match the mPIPE's instance number when a packet is
+ * egressed, buffers with HWB set will be returned to the other mPIPE
+ * instance. Note that not all devices support multi-mPIPE buffer
+ * return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates
+ * whether the INST field in the buffer descriptor is populated by iDMA
+ * hardware.
*/
- uint_reg_t inst : 1;
- /** Reserved. */
- uint_reg_t __reserved_2 : 1;
+ uint_reg_t inst : 2;
/**
* Always set to one by hardware in iDMA packet descriptors. For eDMA,
* indicates whether the buffer will be released to the buffer stack
@@ -166,8 +175,7 @@ typedef union
uint_reg_t c : 2;
uint_reg_t size : 3;
uint_reg_t hwb : 1;
- uint_reg_t __reserved_2 : 1;
- uint_reg_t inst : 1;
+ uint_reg_t inst : 2;
uint_reg_t __reserved_1 : 3;
uint_reg_t stack_idx : 5;
uint_reg_t __reserved_0 : 6;
@@ -408,7 +416,10 @@ typedef union
/**
* Sequence number applied when packet is distributed. Classifier
* selects which sequence number is to be applied by writing the 13-bit
- * SQN-selector into this field.
+ * SQN-selector into this field. For devices that support EXT_SQN (as
+ * indicated in IDMA_INFO.EXT_SQN_SUPPORT), the GP_SQN can be extended to
+ * 32-bits via the IDMA_CTL.EXT_SQN register. In this case the
+ * PACKET_SQN will be reduced to 32 bits.
*/
uint_reg_t gp_sqn : 16;
/**
@@ -451,14 +462,16 @@ typedef union
/** Reserved. */
uint_reg_t __reserved_5 : 3;
/**
- * Instance ID. For devices that support more than one mPIPE instance,
- * this field indicates the buffer owner. If the INST field does not
- * match the mPIPE's instance number when a packet is egressed, buffers
- * with HWB set will be returned to the other mPIPE instance.
+ * Instance ID. For devices that support automatic buffer return between
+ * mPIPE instances, this field indicates the buffer owner. If the INST
+ * field does not match the mPIPE's instance number when a packet is
+ * egressed, buffers with HWB set will be returned to the other mPIPE
+ * instance. Note that not all devices support multi-mPIPE buffer
+ * return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates
+ * whether the INST field in the buffer descriptor is populated by iDMA
+ * hardware.
*/
- uint_reg_t inst : 1;
- /** Reserved. */
- uint_reg_t __reserved_6 : 1;
+ uint_reg_t inst : 2;
/**
* Always set to one by hardware in iDMA packet descriptors. For eDMA,
* indicates whether the buffer will be released to the buffer stack
@@ -491,8 +504,7 @@ typedef union
uint_reg_t c : 2;
uint_reg_t size : 3;
uint_reg_t hwb : 1;
- uint_reg_t __reserved_6 : 1;
- uint_reg_t inst : 1;
+ uint_reg_t inst : 2;
uint_reg_t __reserved_5 : 3;
uint_reg_t stack_idx : 5;
uint_reg_t __reserved_4 : 6;
diff --git a/arch/tile/include/arch/trio_constants.h b/arch/tile/include/arch/trio_constants.h
index 628b045436b8..85647e91a458 100644
--- a/arch/tile/include/arch/trio_constants.h
+++ b/arch/tile/include/arch/trio_constants.h
@@ -16,21 +16,21 @@
#ifndef __ARCH_TRIO_CONSTANTS_H__
#define __ARCH_TRIO_CONSTANTS_H__
-#define TRIO_NUM_ASIDS 16
+#define TRIO_NUM_ASIDS 32
#define TRIO_NUM_TLBS_PER_ASID 16
#define TRIO_NUM_TPIO_REGIONS 8
#define TRIO_LOG2_NUM_TPIO_REGIONS 3
-#define TRIO_NUM_MAP_MEM_REGIONS 16
-#define TRIO_LOG2_NUM_MAP_MEM_REGIONS 4
+#define TRIO_NUM_MAP_MEM_REGIONS 32
+#define TRIO_LOG2_NUM_MAP_MEM_REGIONS 5
#define TRIO_NUM_MAP_SQ_REGIONS 8
#define TRIO_LOG2_NUM_MAP_SQ_REGIONS 3
#define TRIO_LOG2_NUM_SQ_FIFO_ENTRIES 6
-#define TRIO_NUM_PUSH_DMA_RINGS 32
+#define TRIO_NUM_PUSH_DMA_RINGS 64
-#define TRIO_NUM_PULL_DMA_RINGS 32
+#define TRIO_NUM_PULL_DMA_RINGS 64
#endif /* __ARCH_TRIO_CONSTANTS_H__ */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index 6346888f7bdc..672768008618 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -182,10 +182,9 @@ static inline __attribute_const__ int get_order(unsigned long size)
#define PAGE_OFFSET (-(_AC(1, UL) << (MAX_VA_WIDTH - 1)))
#define KERNEL_HIGH_VADDR _AC(0xfffffff800000000, UL) /* high 32GB */
-#define FIXADDR_BASE (KERNEL_HIGH_VADDR - 0x400000000) /* 4 GB */
-#define FIXADDR_TOP (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */
+#define FIXADDR_BASE (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */
+#define FIXADDR_TOP (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */
#define _VMALLOC_START FIXADDR_TOP
-#define HUGE_VMAP_BASE (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */
#define MEM_SV_START (KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */
#define MEM_MODULE_START (MEM_SV_START + (256*1024*1024)) /* 256 MB */
#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index 63142ab3b3dd..d26a42279036 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -55,17 +55,9 @@
#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK)
#ifdef CONFIG_HIGHMEM
-# define __VMAPPING_END (PKMAP_BASE & ~(HPAGE_SIZE-1))
+# define _VMALLOC_END (PKMAP_BASE & ~(HPAGE_SIZE-1))
#else
-# define __VMAPPING_END (FIXADDR_START & ~(HPAGE_SIZE-1))
-#endif
-
-#ifdef CONFIG_HUGEVMAP
-#define HUGE_VMAP_END __VMAPPING_END
-#define HUGE_VMAP_BASE (HUGE_VMAP_END - CONFIG_NR_HUGE_VMAPS * HPAGE_SIZE)
-#define _VMALLOC_END HUGE_VMAP_BASE
-#else
-#define _VMALLOC_END __VMAPPING_END
+# define _VMALLOC_END (FIXADDR_START & ~(HPAGE_SIZE-1))
#endif
/*
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index 3421177f7370..2c8a9cd102d3 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -52,12 +52,10 @@
* memory allocation code). The vmalloc code puts in an internal
* guard page between each allocation.
*/
-#define _VMALLOC_END HUGE_VMAP_BASE
+#define _VMALLOC_END MEM_SV_START
#define VMALLOC_END _VMALLOC_END
#define VMALLOC_START _VMALLOC_START
-#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE)
-
#ifndef __ASSEMBLY__
/* We have no pud since we are a three-level page table. */
diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h
index fdd07f88cfd7..4cda03de734f 100644
--- a/arch/tile/include/gxio/iorpc_mpipe.h
+++ b/arch/tile/include/gxio/iorpc_mpipe.h
@@ -56,89 +56,89 @@
#define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
#define GXIO_MPIPE_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
-int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context,
+int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags);
-int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context,
+int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context,
void *mem_va, size_t mem_size,
unsigned int mem_flags, unsigned int stack,
unsigned int buffer_size_enum);
-int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context,
+int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags);
-int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
+int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
size_t mem_size, unsigned int mem_flags,
unsigned int ring);
-int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context,
+int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context,
int inter_x, int inter_y,
int inter_ipi, int inter_event,
unsigned int ring);
-int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context,
+int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context,
unsigned int ring);
-int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context,
+int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags);
-int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context,
+int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,
unsigned int group,
gxio_mpipe_notif_group_bits_t bits);
-int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count,
+int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count,
unsigned int first, unsigned int flags);
-int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket,
+int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,
MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info);
-int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context,
+int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags);
-int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
+int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
size_t mem_size, unsigned int mem_flags,
unsigned int ring, unsigned int channel);
-int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob,
+int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob,
size_t blob_size);
-int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context,
+int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,
unsigned int iotlb, HV_PTE pte,
unsigned int flags);
-int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context,
+int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context,
_gxio_mpipe_link_name_t name, unsigned int flags);
-int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac);
+int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac);
-int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac,
+int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac,
uint32_t attr, int64_t val);
-int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec,
- uint64_t * nsec, uint64_t * cycles);
+int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec,
+ uint64_t *nsec, uint64_t *cycles);
-int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec,
+int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec,
uint64_t nsec, uint64_t cycles);
-int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
+int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context,
int64_t nsec);
-int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
+int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context,
int32_t ppb);
-int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
+int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie);
-int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
+int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie);
-int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base);
+int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base);
-int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context,
+int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context,
unsigned long offset, unsigned long size);
#endif /* !__GXIO_MPIPE_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h
index 476c5e5ca22c..f0b04284468b 100644
--- a/arch/tile/include/gxio/iorpc_mpipe_info.h
+++ b/arch/tile/include/gxio/iorpc_mpipe_info.h
@@ -33,18 +33,18 @@
#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
-int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
+int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context,
_gxio_mpipe_link_name_t name);
-int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
+int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context,
unsigned int idx,
- _gxio_mpipe_link_name_t * name,
- _gxio_mpipe_link_mac_t * mac);
+ _gxio_mpipe_link_name_t *name,
+ _gxio_mpipe_link_mac_t *mac);
-int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context,
+int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context,
HV_PTE *base);
-int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context,
+int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,
unsigned long offset, unsigned long size);
#endif /* !__GXIO_MPIPE_INFO_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_trio.h b/arch/tile/include/gxio/iorpc_trio.h
index d95b96fd6c93..376a4f771167 100644
--- a/arch/tile/include/gxio/iorpc_trio.h
+++ b/arch/tile/include/gxio/iorpc_trio.h
@@ -46,59 +46,59 @@
#define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
#define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
-int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count,
+int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count,
unsigned int first, unsigned int flags);
-int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context,
+int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags);
-int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context,
+int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags);
-int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context,
+int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context,
unsigned int count, unsigned int first,
unsigned int flags);
-int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context,
+int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context,
unsigned int pio_region, unsigned int mac,
uint32_t bus_address_hi, unsigned int flags);
-int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context,
+int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context,
unsigned int map, unsigned long va,
uint64_t size, unsigned int asid,
unsigned int mac, uint64_t bus_address,
unsigned int node,
unsigned int order_mode);
-int gxio_trio_get_port_property(gxio_trio_context_t * context,
+int gxio_trio_get_port_property(gxio_trio_context_t *context,
struct pcie_trio_ports_property *trio_ports);
-int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x,
+int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x,
int inter_y, int inter_ipi, int inter_event,
unsigned int mac, unsigned int intx);
-int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x,
+int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x,
int inter_y, int inter_ipi, int inter_event,
unsigned int mac, unsigned int mem_map,
uint64_t mem_map_base, uint64_t mem_map_limit,
unsigned int asid);
-int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps,
+int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps,
uint16_t mrs, unsigned int mac);
-int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac);
+int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac);
-int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac);
+int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac);
-int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base);
+int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base);
-int gxio_trio_check_mmio_offset(gxio_trio_context_t * context,
+int gxio_trio_check_mmio_offset(gxio_trio_context_t *context,
unsigned long offset, unsigned long size);
#endif /* !__GXIO_TRIO_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_usb_host.h b/arch/tile/include/gxio/iorpc_usb_host.h
index 8622e7d126ad..79962a97de8e 100644
--- a/arch/tile/include/gxio/iorpc_usb_host.h
+++ b/arch/tile/include/gxio/iorpc_usb_host.h
@@ -31,16 +31,16 @@
#define GXIO_USB_HOST_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
#define GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
-int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x,
+int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x,
int inter_y, int inter_ipi, int inter_event);
-int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context,
+int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context,
HV_PTE pte, unsigned int flags);
-int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context,
+int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context,
HV_PTE *base);
-int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context,
+int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,
unsigned long offset, unsigned long size);
#endif /* !__GXIO_USB_HOST_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/usb_host.h b/arch/tile/include/gxio/usb_host.h
index 5eedec0e988e..93c9636d2dd7 100644
--- a/arch/tile/include/gxio/usb_host.h
+++ b/arch/tile/include/gxio/usb_host.h
@@ -53,7 +53,7 @@ typedef struct {
* @return Zero if the context was successfully initialized, else a
* GXIO_ERR_xxx error code.
*/
-extern int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index,
+extern int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index,
int is_ehci);
/* Destroy a USB context.
@@ -68,20 +68,20 @@ extern int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index,
* @return Zero if the context was successfully destroyed, else a
* GXIO_ERR_xxx error code.
*/
-extern int gxio_usb_host_destroy(gxio_usb_host_context_t * context);
+extern int gxio_usb_host_destroy(gxio_usb_host_context_t *context);
/* Retrieve the address of the shim's MMIO registers.
*
* @param context Pointer to a properly initialized gxio_usb_host_context_t.
* @return The address of the shim's MMIO registers.
*/
-extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context);
+extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context);
/* Retrieve the length of the shim's MMIO registers.
*
* @param context Pointer to a properly initialized gxio_usb_host_context_t.
* @return The length of the shim's MMIO registers.
*/
-extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context);
+extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context);
#endif /* _GXIO_USB_H_ */
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index ed378416b86a..49120843ff96 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -84,7 +84,7 @@ COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,
{
return sys_llseek(fd, offset_high, offset_low, result, origin);
}
-
+
/* Provide the compat syscall number to call mapping. */
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
diff --git a/arch/tile/kernel/futex_64.S b/arch/tile/kernel/futex_64.S
deleted file mode 100644
index f465d1eda20f..000000000000
--- a/arch/tile/kernel/futex_64.S
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Atomically access user memory, but use MMU to avoid propagating
- * kernel exceptions.
- */
-
-#include <linux/linkage.h>
-#include <asm/errno.h>
-#include <asm/futex.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-
-/*
- * Provide a set of atomic memory operations supporting <asm/futex.h>.
- *
- * r0: user address to manipulate
- * r1: new value to write, or for cmpxchg, old value to compare against
- * r2: (cmpxchg only) new value to write
- *
- * Return __get_user struct, r0 with value, r1 with error.
- */
-#define FUTEX_OP(name, ...) \
-STD_ENTRY(futex_##name) \
- __VA_ARGS__; \
- { \
- move r1, zero; \
- jrp lr \
- }; \
- STD_ENDPROC(futex_##name); \
- .pushsection __ex_table,"a"; \
- .quad 1b, get_user_fault; \
- .popsection
-
- .pushsection .fixup,"ax"
-get_user_fault:
- { movei r1, -EFAULT; jrp lr }
- ENDPROC(get_user_fault)
- .popsection
-
-FUTEX_OP(cmpxchg, mtspr CMPEXCH_VALUE, r1; 1: cmpexch4 r0, r0, r2)
-FUTEX_OP(set, 1: exch4 r0, r0, r1)
-FUTEX_OP(add, 1: fetchadd4 r0, r0, r1)
-FUTEX_OP(or, 1: fetchor4 r0, r0, r1)
-FUTEX_OP(andn, nor r1, r1, zero; 1: fetchand4 r0, r0, r1)
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 4c34caea9dd3..74c91729a62a 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1268,8 +1268,7 @@ static void __init validate_va(void)
if ((long)VMALLOC_START >= 0)
early_panic(
"Linux VMALLOC region below the 2GB line (%#lx)!\n"
- "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n"
- "or smaller VMALLOC_RESERVE.\n",
+ "Reconfigure the kernel with smaller VMALLOC_RESERVE.\n",
VMALLOC_START);
#endif
}
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
index b425fb6a480d..b030b4e78845 100644
--- a/arch/tile/kernel/unaligned.c
+++ b/arch/tile/kernel/unaligned.c
@@ -551,8 +551,8 @@ static tilegx_bundle_bits jit_x1_bnezt(int ra, int broff)
/*
* This function generates unalign fixup JIT.
*
- * We fist find unalign load/store instruction's destination, source
- * reguisters: ra, rb and rd. and 3 scratch registers by calling
+ * We first find unalign load/store instruction's destination, source
+ * registers: ra, rb and rd. and 3 scratch registers by calling
* find_regs(...). 3 scratch clobbers should not alias with any register
* used in the fault bundle. Then analyze the fault bundle to determine
* if it's a load or store, operand width, branch or address increment etc.
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 4c288f199453..6c0571216a9d 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -149,8 +149,6 @@ static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
pmd_k = vmalloc_sync_one(pgd, address);
if (!pmd_k)
return -1;
- if (pmd_huge(*pmd_k))
- return 0; /* support TILE huge_vmap() API */
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
return -1;
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 4e316deb92fd..0fa1acfac79a 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -828,10 +828,6 @@ void __init mem_init(void)
printk(KERN_DEBUG " PKMAP %#lx - %#lx\n",
PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1);
#endif
-#ifdef CONFIG_HUGEVMAP
- printk(KERN_DEBUG " HUGEMAP %#lx - %#lx\n",
- HUGE_VMAP_BASE, HUGE_VMAP_END - 1);
-#endif
printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n",
_VMALLOC_START, _VMALLOC_END - 1);
#ifdef __tilegx__
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 2deaddf3e01f..4fd9ec0b58ed 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -127,8 +127,7 @@ void shatter_huge_page(unsigned long addr)
}
/* Shatter the huge page into the preallocated L2 page table. */
- pmd_populate_kernel(&init_mm, pmd,
- get_prealloc_pte(pte_pfn(*(pte_t *)pmd)));
+ pmd_populate_kernel(&init_mm, pmd, get_prealloc_pte(pmd_pfn(*pmd)));
#ifdef __PAGETABLE_PMD_FOLDED
/* Walk every pgd on the system and update the pmd there. */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e241a1930c98..ee2fb9d37745 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -481,11 +481,12 @@ config X86_INTEL_LPSS
bool "Intel Low Power Subsystem Support"
depends on ACPI
select COMMON_CLK
+ select PINCTRL
---help---
Select to build support for Intel Low Power Subsystem such as
found on Intel Lynxpoint PCH. Selecting this option enables
- things like clock tree (common clock framework) which are needed
- by the LPSS peripheral drivers.
+ things like clock tree (common clock framework) and pincontrol
+ which are needed by the LPSS peripheral drivers.
config X86_RDC321X
bool "RDC R-321x SoC"
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 6aef9fbc09b7..b913915e8e63 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
}
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
+static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
{
unsigned long pfn;
- int ret = 0;
+ int ret;
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
- if (unlikely(mfn >= machine_to_phys_nr)) {
- pfn = ~0;
- goto try_override;
- }
- pfn = 0;
+ if (unlikely(mfn >= machine_to_phys_nr))
+ return ~0;
+
/*
* The array access can fail (e.g., device space beyond end of RAM).
* In such cases it doesn't matter what we return (we return garbage),
* but we must handle the fault without crashing!
*/
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-try_override:
- /* ret might be < 0 if there are no entries in the m2p for mfn */
if (ret < 0)
- pfn = ~0;
- else if (get_phys_to_machine(pfn) != mfn)
+ return ~0;
+
+ return pfn;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+ unsigned long pfn;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return mfn;
+
+ pfn = mfn_to_pfn_no_overrides(mfn);
+ if (get_phys_to_machine(pfn) != mfn) {
/*
* If this appears to be a foreign mfn (because the pfn
* doesn't map back to the mfn), then check the local override
@@ -111,6 +119,7 @@ try_override:
* m2p_find_override_pfn returns ~0 if it doesn't find anything.
*/
pfn = m2p_find_override_pfn(mfn, ~0);
+ }
/*
* pfn is ~0 if there are no entries in the m2p for mfn or if the
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8355c84b9729..897783b3302a 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void)
err = amd_pmu_init();
break;
default:
- return 0;
+ err = -ENOTSUPP;
}
if (err != 0) {
pr_cont("no PMU driver, software events only.\n");
@@ -1883,9 +1883,9 @@ static struct pmu pmu = {
void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
- userpg->cap_usr_time = 0;
- userpg->cap_usr_time_zero = 0;
- userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
+ userpg->cap_user_time = 0;
+ userpg->cap_user_time_zero = 0;
+ userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
userpg->pmc_width = x86_pmu.cntval_bits;
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
@@ -1894,13 +1894,13 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
return;
- userpg->cap_usr_time = 1;
+ userpg->cap_user_time = 1;
userpg->time_mult = this_cpu_read(cyc2ns);
userpg->time_shift = CYC2NS_SCALE_FACTOR;
userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
if (sched_clock_stable && !check_tsc_disabled()) {
- userpg->cap_usr_time_zero = 1;
+ userpg->cap_user_time_zero = 1;
userpg->time_zero = this_cpu_read(cyc2ns_offset);
}
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index c62d88396ad5..f31a1655d1ff 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -899,8 +899,8 @@ static __initconst const u64 atom_hw_cache_event_ids
static struct extra_reg intel_slm_extra_regs[] __read_mostly =
{
/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
- INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffff, RSP_0),
- INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffff, RSP_1),
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1),
EVENT_EXTRA_END
};
@@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void)
break;
case 55: /* Atom 22nm "Silvermont" */
+ case 77: /* Avoton "Silvermont" */
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 63438aad177f..ab3ba1c1b7dd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -584,6 +584,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
EVENT_CONSTRAINT_END
};
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 8ed44589b0e4..4118f9f68315 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
box->hrtimer.function = uncore_pmu_hrtimer;
}
-struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
+static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
{
struct intel_uncore_box *box;
int i, size;
size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
- box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
+ box = kzalloc_node(size, GFP_KERNEL, node);
if (!box)
return NULL;
@@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
struct intel_uncore_box *fake_box;
int ret = -EINVAL, n;
- fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
+ fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
if (!fake_box)
return -ENOMEM;
@@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
}
type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
- box = uncore_alloc_box(type, 0);
+ box = uncore_alloc_box(type, NUMA_NO_NODE);
if (!box)
return -ENOMEM;
@@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
if (pmu->func_id < 0)
pmu->func_id = j;
- box = uncore_alloc_box(type, cpu);
+ box = uncore_alloc_box(type, cpu_to_node(cpu));
if (!box)
return -ENOMEM;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1b69951a81e2..b077f4cc225a 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -487,21 +487,6 @@ ENDPROC(native_usergs_sysret64)
TRACE_IRQS_OFF
.endm
-ENTRY(save_rest)
- PARTIAL_FRAME 1 (REST_SKIP+8)
- movq 5*8+16(%rsp), %r11 /* save return address */
- movq_cfi rbx, RBX+16
- movq_cfi rbp, RBP+16
- movq_cfi r12, R12+16
- movq_cfi r13, R13+16
- movq_cfi r14, R14+16
- movq_cfi r15, R15+16
- movq %r11, 8(%rsp) /* return address */
- FIXUP_TOP_OF_STACK %r11, 16
- ret
- CFI_ENDPROC
-END(save_rest)
-
/* save complete stack frame */
.pushsection .kprobes.text, "ax"
ENTRY(save_paranoid)
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 7123b5df479d..af99f71aeb7f 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu)
/* need to apply patch? */
if (rev >= mc_amd->hdr.patch_id) {
c->microcode = rev;
+ uci->cpu_sig.rev = rev;
return 0;
}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 563ed91e6faa..e643e744e4d8 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -352,12 +352,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
},
{ /* Handle problems with rebooting on the Precision M6600. */
.callback = set_pci_reboot,
- .ident = "Dell OptiPlex 990",
+ .ident = "Dell Precision M6600",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
},
},
+ { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
+ .callback = set_pci_reboot,
+ .ident = "Dell PowerEdge C6100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+ },
+ },
+ { /* Some C6100 machines were shipped with vendor being 'Dell'. */
+ .callback = set_pci_reboot,
+ .ident = "Dell PowerEdge C6100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+ },
+ },
{ }
};
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index aecc98a93d1b..6cacab671f9b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -653,6 +653,7 @@ static void announce_cpu(int cpu, int apicid)
{
static int current_node = -1;
int node = early_cpu_to_node(cpu);
+ int max_cpu_present = find_last_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
if (system_state == SYSTEM_BOOTING) {
if (node != current_node) {
@@ -661,7 +662,7 @@ static void announce_cpu(int cpu, int apicid)
current_node = node;
pr_info("Booting Node %3d, Processors ", node);
}
- pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " OK\n" : "");
+ pr_cont(" #%4d%s", cpu, cpu == max_cpu_present ? " OK\n" : "");
return;
} else
pr_info("Booting Node %d Processor %d APIC 0x%x\n",
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 2bc1e81045b0..ddc3f3d2afdb 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2025,6 +2025,17 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
return rc;
}
+static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
+{
+ int rc;
+
+ rc = em_ret_far(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ rsp_increment(ctxt, ctxt->src.val);
+ return X86EMUL_CONTINUE;
+}
+
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
/* Save real source value, then compare EAX against destination. */
@@ -3763,7 +3774,8 @@ static const struct opcode opcode_table[256] = {
G(ByteOp, group11), G(0, group11),
/* 0xC8 - 0xCF */
I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
- N, I(ImplicitOps | Stack, em_ret_far),
+ I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
+ I(ImplicitOps | Stack, em_ret_far),
D(ImplicitOps), DI(SrcImmByte, intn),
D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
/* 0xD0 - 0xD7 */
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 043330159179..ad75d77999d0 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -99,6 +99,7 @@ struct guest_walker {
pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
+ bool pte_writable[PT_MAX_FULL_LEVELS];
unsigned pt_access;
unsigned pte_access;
gfn_t gfn;
@@ -235,6 +236,22 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
if (pte == orig_pte)
continue;
+ /*
+ * If the slot is read-only, simply do not process the accessed
+ * and dirty bits. This is the correct thing to do if the slot
+ * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
+ * are only supported if the accessed and dirty bits are already
+ * set in the ROM (so that MMIO writes are never needed).
+ *
+ * Note that NPT does not allow this at all and faults, since
+ * it always wants nested page table entries for the guest
+ * page tables to be writable. And EPT works but will simply
+ * overwrite the read-only memory to set the accessed and dirty
+ * bits.
+ */
+ if (unlikely(!walker->pte_writable[level - 1]))
+ continue;
+
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
if (ret)
return ret;
@@ -309,7 +326,8 @@ retry_walk:
goto error;
real_gfn = gpa_to_gfn(real_gfn);
- host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
+ host_addr = gfn_to_hva_prot(vcpu->kvm, real_gfn,
+ &walker->pte_writable[walker->level - 1]);
if (unlikely(kvm_is_error_hva(host_addr)))
goto error;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1f1da43ff2a2..a1216de9ffda 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5339,6 +5339,15 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
return 0;
}
+ /*
+ * EPT violation happened while executing iret from NMI,
+ * "blocked by NMI" bit has to be set before next VM entry.
+ * There are errata that may cause this bit to not be set:
+ * AAK134, BY25.
+ */
+ if (exit_qualification & INTR_INFO_UNBLOCK_NMI)
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
+
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
trace_kvm_page_fault(gpa, exit_qualification);
@@ -7766,6 +7775,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
+ __clear_bit(VCPU_EXREG_PDPTR,
+ (unsigned long *)&vcpu->arch.regs_avail);
+ __clear_bit(VCPU_EXREG_PDPTR,
+ (unsigned long *)&vcpu->arch.regs_dirty);
}
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 90f6ed127096..c7e22ab29a5a 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void)
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA)
- continue;
+ if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
+#ifdef CONFIG_X86_64
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
+#endif
+ continue;
+ }
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8b901e8d782d..a61c7d5811be 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
- int ret = 0;
pfn = page_to_pfn(page);
if (!PageHighMem(page)) {
@@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
* frontend pages while they are being shared with the backend,
* because mfn_to_pfn (that ends up being called by GUPF) will
* return the backend pfn rather than the frontend pfn. */
- ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
- if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+ pfn = mfn_to_pfn_no_overrides(mfn);
+ if (get_phys_to_machine(pfn) == mfn)
set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
return 0;
@@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
- int ret = 0;
pfn = page_to_pfn(page);
mfn = get_phys_to_machine(pfn);
@@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
* the original pfn causes mfn_to_pfn(mfn) to return the frontend
* pfn again. */
mfn &= ~FOREIGN_FRAME_BIT;
- ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
- if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+ pfn = mfn_to_pfn_no_overrides(mfn);
+ if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
m2p_find_override(mfn) == NULL)
set_phys_to_machine(pfn, mfn);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 253f63fceea1..be6b86078957 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
}
+/*
+ * Our init of PV spinlocks is split in two init functions due to us
+ * using paravirt patching and jump labels patching and having to do
+ * all of this before SMP code is invoked.
+ *
+ * The paravirt patching needs to be done _before_ the alternative asm code
+ * is started, otherwise we would not patch the core kernel code.
+ */
void __init xen_init_spinlocks(void)
{
@@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
return;
}
- static_key_slow_inc(&paravirt_ticketlocks_enabled);
-
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
pv_lock_ops.unlock_kick = xen_unlock_kick;
}
+/*
+ * While the jump_label init code needs to happend _after_ the jump labels are
+ * enabled and before SMP is started. Hence we use pre-SMP initcall level
+ * init. We cannot do it in xen_init_spinlocks as that is done before
+ * jump labels are activated.
+ */
+static __init int xen_init_spinlocks_jump(void)
+{
+ if (!xen_pvspin)
+ return 0;
+
+ static_key_slow_inc(&paravirt_ticketlocks_enabled);
+ return 0;
+}
+early_initcall(xen_init_spinlocks_jump);
+
static __init int xen_parse_nopvspin(char *arg)
{
xen_pvspin = false;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index e90c7c164c83..4e491d9b5292 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -235,8 +235,13 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg->online = true;
spin_unlock(&blkcg->lock);
- if (!ret)
+ if (!ret) {
+ if (blkcg == &blkcg_root) {
+ q->root_blkg = blkg;
+ q->root_rl.blkg = blkg;
+ }
return blkg;
+ }
/* @blkg failed fully initialized, use the usual release path */
blkg_put(blkg);
@@ -335,6 +340,15 @@ static void blkg_destroy(struct blkcg_gq *blkg)
rcu_assign_pointer(blkcg->blkg_hint, NULL);
/*
+ * If root blkg is destroyed. Just clear the pointer since root_rl
+ * does not take reference on root blkg.
+ */
+ if (blkcg == &blkcg_root) {
+ blkg->q->root_blkg = NULL;
+ blkg->q->root_rl.blkg = NULL;
+ }
+
+ /*
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
@@ -360,13 +374,6 @@ static void blkg_destroy_all(struct request_queue *q)
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
}
-
- /*
- * root blkg is destroyed. Just clear the pointer since
- * root_rl does not take reference on root blkg.
- */
- q->root_blkg = NULL;
- q->root_rl.blkg = NULL;
}
/*
@@ -970,8 +977,6 @@ int blkcg_activate_policy(struct request_queue *q,
ret = PTR_ERR(blkg);
goto out_unlock;
}
- q->root_blkg = blkg;
- q->root_rl.blkg = blkg;
list_for_each_entry(blkg, &q->blkg_list, q_node)
cnt++;
diff --git a/block/blk-core.c b/block/blk-core.c
index c04505358342..0a00e4ecf87c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1549,11 +1549,9 @@ get_rq:
if (plug) {
/*
* If this is the first request added after a plug, fire
- * of a plug trace. If others have been added before, check
- * if we have multiple devices in this plug. If so, make a
- * note to sort the list before dispatch.
+ * of a plug trace.
*/
- if (list_empty(&plug->list))
+ if (!request_count)
trace_block_plug(q);
else {
if (request_count >= BLK_MAX_REQUEST_COUNT) {
diff --git a/block/blk-exec.c b/block/blk-exec.c
index e70621396129..ae4f27d7944e 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -68,9 +68,9 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
spin_lock_irq(q->queue_lock);
if (unlikely(blk_queue_dying(q))) {
+ rq->cmd_flags |= REQ_QUIET;
rq->errors = -ENXIO;
- if (rq->end_io)
- rq->end_io(rq, rq->errors);
+ __blk_end_request_all(rq, rq->errors);
spin_unlock_irq(q->queue_lock);
return;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index dabb9d02cf9a..434944cbd761 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1803,7 +1803,7 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
if (samples) {
v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
- do_div(v, samples);
+ v = div64_u64(v, samples);
}
__blkg_prfill_u64(sf, pd, v);
return 0;
@@ -4358,7 +4358,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
if (!eq)
return -ENOMEM;
- cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
+ cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
if (!cfqd) {
kobject_put(&eq->kobj);
return -ENOMEM;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 20614a332362..9ef66406c625 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -346,7 +346,7 @@ static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
if (!eq)
return -ENOMEM;
- dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
+ dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
if (!dd) {
kobject_put(&eq->kobj);
return -ENOMEM;
diff --git a/block/elevator.c b/block/elevator.c
index 668394d18588..2bcbd8cc14d4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -155,7 +155,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
{
struct elevator_queue *eq;
- eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
+ eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
if (unlikely(!eq))
goto err;
diff --git a/block/genhd.c b/block/genhd.c
index dadf42b454a3..791f41943132 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1252,8 +1252,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
{
struct gendisk *disk;
- disk = kmalloc_node(sizeof(struct gendisk),
- GFP_KERNEL | __GFP_ZERO, node_id);
+ disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
if (disk) {
if (!init_part_stats(&disk->part0)) {
kfree(disk);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 69ce573f1224..71f337aefa39 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -776,6 +776,22 @@ config CRYPTO_AES_ARM
See <http://csrc.nist.gov/encryption/aes/> for more information.
+config CRYPTO_AES_ARM_BS
+ tristate "Bit sliced AES using NEON instructions"
+ depends on ARM && KERNEL_MODE_NEON
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES_ARM
+ select CRYPTO_ABLK_HELPER
+ help
+ Use a faster and more secure NEON based implementation of AES in CBC,
+ CTR and XTS modes
+
+ Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
+ and for XTS mode encryption, CBC and XTS mode decryption speedup is
+ around 25%. (CBC encryption speed is not affected by this driver.)
+ This implementation does not rely on any lookup tables so it is
+ believed to be invulnerable to cache timing attacks.
+
config CRYPTO_ANUBIS
tristate "Anubis cipher algorithm"
select CRYPTO_ALGAPI
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index f40acef80269..a6977e12d574 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -39,6 +39,7 @@
#include <linux/ipmi.h>
#include <linux/device.h>
#include <linux/pnp.h>
+#include <linux/spinlock.h>
MODULE_AUTHOR("Zhao Yakui");
MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
@@ -57,7 +58,7 @@ struct acpi_ipmi_device {
struct list_head head;
/* the IPMI request message list */
struct list_head tx_msg_list;
- struct mutex tx_msg_lock;
+ spinlock_t tx_msg_lock;
acpi_handle handle;
struct pnp_dev *pnp_dev;
ipmi_user_t user_interface;
@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
struct kernel_ipmi_msg *msg;
struct acpi_ipmi_buffer *buffer;
struct acpi_ipmi_device *device;
+ unsigned long flags;
msg = &tx_msg->tx_message;
/*
@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
/* Get the msgid */
device = tx_msg->device;
- mutex_lock(&device->tx_msg_lock);
+ spin_lock_irqsave(&device->tx_msg_lock, flags);
device->curr_msgid++;
tx_msg->tx_msgid = device->curr_msgid;
- mutex_unlock(&device->tx_msg_lock);
+ spin_unlock_irqrestore(&device->tx_msg_lock, flags);
}
static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
int msg_found = 0;
struct acpi_ipmi_msg *tx_msg;
struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+ unsigned long flags;
if (msg->user != ipmi_device->user_interface) {
dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
ipmi_free_recv_msg(msg);
return;
}
- mutex_lock(&ipmi_device->tx_msg_lock);
+ spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
if (msg->msgid == tx_msg->tx_msgid) {
msg_found = 1;
@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
}
}
- mutex_unlock(&ipmi_device->tx_msg_lock);
+ spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
if (!msg_found) {
dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
"returned.\n", msg->msgid);
@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
struct acpi_ipmi_device *ipmi_device = handler_context;
int err, rem_time;
acpi_status status;
+ unsigned long flags;
/*
* IPMI opregion message.
* IPMI message is firstly written to the BMC and system software
@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
return AE_NO_MEMORY;
acpi_format_ipmi_msg(tx_msg, address, value);
- mutex_lock(&ipmi_device->tx_msg_lock);
+ spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
- mutex_unlock(&ipmi_device->tx_msg_lock);
+ spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
err = ipmi_request_settime(ipmi_device->user_interface,
&tx_msg->addr,
tx_msg->tx_msgid,
@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
status = AE_OK;
end_label:
- mutex_lock(&ipmi_device->tx_msg_lock);
+ spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
list_del(&tx_msg->head);
- mutex_unlock(&ipmi_device->tx_msg_lock);
+ spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
kfree(tx_msg);
return status;
}
@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
INIT_LIST_HEAD(&ipmi_device->head);
- mutex_init(&ipmi_device->tx_msg_lock);
+ spin_lock_init(&ipmi_device->tx_msg_lock);
INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
ipmi_install_space_handler(ipmi_device);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fbdb82e70d10..611ce9061dc5 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1121,7 +1121,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
EXPORT_SYMBOL(acpi_bus_register_driver);
/**
- * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
+ * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
* @driver: driver to unregister
*
* Unregisters a driver with the ACPI bus. Searches the namespace for all
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 958ba2a420c3..97f4acb54ad6 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -2,7 +2,7 @@
* sata_promise.c - Promise SATA
*
* Maintained by: Tejun Heo <tj@kernel.org>
- * Mikael Pettersson <mikpe@it.uu.se>
+ * Mikael Pettersson
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 449f6298dc89..8557adcd34ee 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2865,15 +2865,4 @@ static struct pci_driver he_driver = {
.id_table = he_pci_tbl,
};
-static int __init he_init(void)
-{
- return pci_register_driver(&he_driver);
-}
-
-static void __exit he_cleanup(void)
-{
- pci_unregister_driver(&he_driver);
-}
-
-module_init(he_init);
-module_exit(he_cleanup);
+module_pci_driver(he_driver);
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 409502a78e7e..5aca5f4c5458 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -778,7 +778,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
return error;
}
- if (mac[i] == NULL || mac_pton(mac[i], card->atmdev->esi)) {
+ if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) {
nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
card->atmdev->esi, 6);
if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
diff --git a/drivers/base/core.c b/drivers/base/core.c
index c7cfadcf6752..34abf4d8a45f 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(device_move);
*/
void device_shutdown(void)
{
- struct device *dev;
+ struct device *dev, *parent;
spin_lock(&devices_kset->list_lock);
/*
@@ -2034,7 +2034,7 @@ void device_shutdown(void)
* prevent it from being freed because parent's
* lock is to be held
*/
- get_device(dev->parent);
+ parent = get_device(dev->parent);
get_device(dev);
/*
* Make sure the device is off the kset list, in the
@@ -2044,8 +2044,8 @@ void device_shutdown(void)
spin_unlock(&devices_kset->list_lock);
/* hold lock to avoid race with probe/release */
- if (dev->parent)
- device_lock(dev->parent);
+ if (parent)
+ device_lock(parent);
device_lock(dev);
/* Don't allow any more runtime suspends */
@@ -2063,11 +2063,11 @@ void device_shutdown(void)
}
device_unlock(dev);
- if (dev->parent)
- device_unlock(dev->parent);
+ if (parent)
+ device_unlock(parent);
put_device(dev);
- put_device(dev->parent);
+ put_device(parent);
spin_lock(&devices_kset->list_lock);
}
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index cd6b20fce680..37768401d113 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -269,6 +269,8 @@ static struct bcma_device *bcma_find_core_reverse(struct bcma_bus *bus, u16 core
return NULL;
}
+#define IS_ERR_VALUE_U32(x) ((x) >= (u32)-MAX_ERRNO)
+
static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
struct bcma_device_id *match, int core_num,
struct bcma_device *core)
@@ -351,11 +353,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
* the main register space for the core
*/
tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
- if (tmp == 0 || IS_ERR_VALUE(tmp)) {
+ if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
/* Try again to see if it is a bridge */
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_BRIDGE, 0);
- if (tmp == 0 || IS_ERR_VALUE(tmp)) {
+ if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
return -EILSEQ;
} else {
bcma_info(bus, "Bridge found\n");
@@ -369,7 +371,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_SLAVE, i);
- if (IS_ERR_VALUE(tmp)) {
+ if (IS_ERR_VALUE_U32(tmp)) {
/* no more entries for port _i_ */
/* pr_debug("erom: slave port %d "
* "has %d descriptors\n", i, j); */
@@ -386,7 +388,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_MWRAP, i);
- if (IS_ERR_VALUE(tmp)) {
+ if (IS_ERR_VALUE_U32(tmp)) {
/* no more entries for port _i_ */
/* pr_debug("erom: master wrapper %d "
* "has %d descriptors\n", i, j); */
@@ -404,7 +406,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_SWRAP, i + hack);
- if (IS_ERR_VALUE(tmp)) {
+ if (IS_ERR_VALUE_U32(tmp)) {
/* no more entries for port _i_ */
/* pr_debug("erom: master wrapper %d "
* has %d descriptors\n", i, j); */
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index d2d95ff5353b..edfa2515bc86 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
int err;
u32 cp;
+ memset(&arg64, 0, sizeof(arg64));
err = 0;
err |=
copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 639d26b90b91..2b9440384536 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1193,6 +1193,7 @@ out_passthru:
ida_pci_info_struct pciinfo;
if (!arg) return -EINVAL;
+ memset(&pciinfo, 0, sizeof(pciinfo));
pciinfo.bus = host->pci_dev->bus->number;
pciinfo.dev_fn = host->pci_dev->devfn;
pciinfo.board_id = host->board_id;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b22a7d0fe5b7..cb1db2979d3d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -931,12 +931,14 @@ static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
u64 snap_id)
{
u32 which;
+ const char *snap_name;
which = rbd_dev_snap_index(rbd_dev, snap_id);
if (which == BAD_SNAP_INDEX)
- return NULL;
+ return ERR_PTR(-ENOENT);
- return _rbd_dev_v1_snap_name(rbd_dev, which);
+ snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
+ return snap_name ? snap_name : ERR_PTR(-ENOMEM);
}
static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
@@ -2812,7 +2814,7 @@ out_err:
obj_request_done_set(obj_request);
}
-static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
+static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
{
struct rbd_obj_request *obj_request;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
@@ -2827,16 +2829,17 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
if (!obj_request->osd_req)
goto out;
- obj_request->callback = rbd_obj_request_put;
osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
notify_id, 0, 0);
rbd_osd_req_format_read(obj_request);
ret = rbd_obj_request_submit(osdc, obj_request);
-out:
if (ret)
- rbd_obj_request_put(obj_request);
+ goto out;
+ ret = rbd_obj_request_wait(obj_request);
+out:
+ rbd_obj_request_put(obj_request);
return ret;
}
@@ -2856,7 +2859,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
if (ret)
rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
- rbd_obj_notify_ack(rbd_dev, notify_id);
+ rbd_obj_notify_ack_sync(rbd_dev, notify_id);
}
/*
@@ -3328,6 +3331,31 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
}
+static void rbd_dev_update_size(struct rbd_device *rbd_dev)
+{
+ sector_t size;
+ bool removing;
+
+ /*
+ * Don't hold the lock while doing disk operations,
+ * or lock ordering will conflict with the bdev mutex via:
+ * rbd_add() -> blkdev_get() -> rbd_open()
+ */
+ spin_lock_irq(&rbd_dev->lock);
+ removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
+ spin_unlock_irq(&rbd_dev->lock);
+ /*
+ * If the device is being removed, rbd_dev->disk has
+ * been destroyed, so don't try to update its size
+ */
+ if (!removing) {
+ size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
+ dout("setting size to %llu sectors", (unsigned long long)size);
+ set_capacity(rbd_dev->disk, size);
+ revalidate_disk(rbd_dev->disk);
+ }
+}
+
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
u64 mapping_size;
@@ -3347,12 +3375,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
up_write(&rbd_dev->header_rwsem);
if (mapping_size != rbd_dev->mapping.size) {
- sector_t size;
-
- size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
- dout("setting size to %llu sectors", (unsigned long long)size);
- set_capacity(rbd_dev->disk, size);
- revalidate_disk(rbd_dev->disk);
+ rbd_dev_update_size(rbd_dev);
}
return ret;
@@ -4061,8 +4084,13 @@ static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
snap_id = snapc->snaps[which];
snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
- if (IS_ERR(snap_name))
- break;
+ if (IS_ERR(snap_name)) {
+ /* ignore no-longer existing snapshots */
+ if (PTR_ERR(snap_name) == -ENOENT)
+ continue;
+ else
+ break;
+ }
found = !strcmp(name, snap_name);
kfree(snap_name);
}
@@ -4141,8 +4169,8 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
/* Look up the snapshot name, and make a copy */
snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
- if (!snap_name) {
- ret = -ENOMEM;
+ if (IS_ERR(snap_name)) {
+ ret = PTR_ERR(snap_name);
goto out_err;
}
@@ -5163,10 +5191,23 @@ static ssize_t rbd_remove(struct bus_type *bus,
if (ret < 0 || already)
return ret;
- rbd_bus_del_dev(rbd_dev);
ret = rbd_dev_header_watch_sync(rbd_dev, false);
if (ret)
rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
+
+ /*
+ * flush remaining watch callbacks - these must be complete
+ * before the osd_client is shutdown
+ */
+ dout("%s: flushing notifies", __func__);
+ ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
+ /*
+ * Don't free anything from rbd_dev->disk until after all
+ * notifies are completely processed. Otherwise
+ * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
+ * in a potential use after free of rbd_dev->disk or rbd_dev.
+ */
+ rbd_bus_del_dev(rbd_dev);
rbd_dev_image_release(rbd_dev);
module_put(THIS_MODULE);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 7a7929ba2658..06189e55b4e5 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -142,32 +142,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return length;
}
-ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- struct tpm_private *priv = TPM_VPRIV(chip);
- u8 locality = priv->shr->locality;
-
- return sprintf(buf, "%d\n", locality);
-}
-
-ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- struct tpm_private *priv = TPM_VPRIV(chip);
- u8 val;
-
- int rv = kstrtou8(buf, 0, &val);
- if (rv)
- return rv;
-
- priv->shr->locality = val;
-
- return len;
-}
-
static const struct file_operations vtpm_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -188,8 +162,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
-static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
- tpm_store_locality);
static struct attribute *vtpm_attrs[] = {
&dev_attr_pubek.attr,
@@ -202,7 +174,6 @@ static struct attribute *vtpm_attrs[] = {
&dev_attr_cancel.attr,
&dev_attr_durations.attr,
&dev_attr_timeouts.attr,
- &dev_attr_locality.attr,
NULL,
};
@@ -210,8 +181,6 @@ static struct attribute_group vtpm_attr_grp = {
.attrs = vtpm_attrs,
};
-#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
-
static const struct tpm_vendor_specific tpm_vtpm = {
.status = vtpm_status,
.recv = vtpm_recv,
@@ -224,11 +193,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
.miscdev = {
.fops = &vtpm_ops,
},
- .duration = {
- TPM_LONG_TIMEOUT,
- TPM_LONG_TIMEOUT,
- TPM_LONG_TIMEOUT,
- },
};
static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 41c69469ce20..971d796e071d 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -26,6 +26,7 @@ config DW_APB_TIMER_OF
config ARMADA_370_XP_TIMER
bool
+ select CLKSRC_OF
config ORION_TIMER
select CLKSRC_OF
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 37f5325bec95..b9ddd9e3a2f5 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -30,6 +30,9 @@ void __init clocksource_of_init(void)
clocksource_of_init_fn init_func;
for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
+ if (!of_device_is_available(np))
+ continue;
+
init_func = match->data;
init_func(np);
}
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index b9c81b7c3a3b..3a5909c12d42 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
ced->name = dev_name(&p->pdev->dev);
ced->features = CLOCK_EVT_FEAT_ONESHOT;
ced->rating = 200;
- ced->cpumask = cpumask_of(0);
+ ced->cpumask = cpu_possible_mask;
ced->set_next_event = em_sti_clock_event_next;
ced->set_mode = em_sti_clock_event_mode;
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 5b34768f4d7c..62b0de6a1837 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -428,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
evt->irq);
return -EIO;
}
- irq_set_affinity(evt->irq, cpumask_of(cpu));
} else {
enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
}
@@ -449,6 +448,7 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
struct mct_clock_event_device *mevt;
+ unsigned int cpu;
/*
* Grab cpu pointer in each case to avoid spurious
@@ -459,6 +459,12 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
mevt = this_cpu_ptr(&percpu_mct_tick);
exynos4_local_timer_setup(&mevt->evt);
break;
+ case CPU_ONLINE:
+ cpu = (unsigned long)hcpu;
+ if (mct_int_type == MCT_INT_SPI)
+ irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
+ cpumask_of(cpu));
+ break;
case CPU_DYING:
mevt = this_cpu_ptr(&percpu_mct_tick);
exynos4_local_timer_stop(&mevt->evt);
@@ -500,6 +506,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
&percpu_mct_tick);
WARN(err, "MCT: can't request IRQ %d (%d)\n",
mct_irqs[MCT_L0_IRQ], err);
+ } else {
+ irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
}
err = register_cpu_notifier(&exynos4_mct_cpu_nb);
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index a1260b4549db..d2c3253e015e 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -986,6 +986,10 @@ static int __init acpi_cpufreq_init(void)
{
int ret;
+ /* don't keep reloading if cpufreq_driver exists */
+ if (cpufreq_get_current_driver())
+ return 0;
+
if (acpi_disabled)
return 0;
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index cbfffa91ebdd..78c49d8e0f4a 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -177,7 +178,11 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
struct device_node *np;
int ret;
- cpu_dev = &pdev->dev;
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu0 device\n");
+ return -ENODEV;
+ }
np = of_node_get(cpu_dev->of_node);
if (!np) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 43c24aa756f6..04548f7023af 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -952,9 +952,20 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
if (cpu == policy->cpu)
return;
+ /*
+ * Take direct locks as lock_policy_rwsem_write wouldn't work here.
+ * Also lock for last cpu is enough here as contention will happen only
+ * after policy->cpu is changed and after it is changed, other threads
+ * will try to acquire lock for new cpu. And policy is already updated
+ * by then.
+ */
+ down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
+
policy->last_cpu = policy->cpu;
policy->cpu = cpu;
+ up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
+
#ifdef CONFIG_CPU_FREQ_TABLE
cpufreq_frequency_table_update_policy_cpu(policy);
#endif
@@ -1125,7 +1136,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
int ret;
/* first sibling now owns the new sysfs dir */
- cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
+ cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
/* Don't touch sysfs files during light-weight tear-down */
if (frozen)
@@ -1189,12 +1200,9 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
policy->governor->name, CPUFREQ_NAME_LEN);
#endif
- WARN_ON(lock_policy_rwsem_write(cpu));
+ lock_policy_rwsem_read(cpu);
cpus = cpumask_weight(policy->cpus);
-
- if (cpus > 1)
- cpumask_clear_cpu(cpu, policy->cpus);
- unlock_policy_rwsem_write(cpu);
+ unlock_policy_rwsem_read(cpu);
if (cpu != policy->cpu) {
if (!frozen)
@@ -1203,9 +1211,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
if (new_cpu >= 0) {
- WARN_ON(lock_policy_rwsem_write(cpu));
update_policy_cpu(policy, new_cpu);
- unlock_policy_rwsem_write(cpu);
if (!frozen) {
pr_debug("%s: policy Kobject moved to cpu: %d "
@@ -1237,9 +1243,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
return -EINVAL;
}
- lock_policy_rwsem_read(cpu);
+ WARN_ON(lock_policy_rwsem_write(cpu));
cpus = cpumask_weight(policy->cpus);
- unlock_policy_rwsem_read(cpu);
+
+ if (cpus > 1)
+ cpumask_clear_cpu(cpu, policy->cpus);
+ unlock_policy_rwsem_write(cpu);
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
@@ -1451,6 +1460,9 @@ unsigned int cpufreq_get(unsigned int cpu)
{
unsigned int ret_freq = 0;
+ if (cpufreq_disabled() || !cpufreq_driver)
+ return -ENOENT;
+
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
@@ -2095,7 +2107,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
write_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return -EBUSY;
+ return -EEXIST;
}
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index d514c152fd1a..be5380ecdcd4 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -457,7 +457,7 @@ err_free_table:
opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
err_put_node:
of_node_put(np);
- dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__);
+ dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
return ret;
}
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 3e396543aea4..c3fd2a101ca0 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -202,7 +203,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
unsigned long min_volt, max_volt;
int num, ret;
- cpu_dev = &pdev->dev;
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu0 device\n");
+ return -ENODEV;
+ }
np = of_node_get(cpu_dev->of_node);
if (!np) {
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 796dbb212a41..8492b68e873c 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -177,7 +177,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static inline void ast_open_key(struct ast_private *ast)
{
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
}
#define AST_VIDMEM_SIZE_8M 0x00800000
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index b4fb86d89850..224ff965bcf7 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -42,6 +42,10 @@
#include <drm/drmP.h>
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
/**
* Free a handle from the context bitmap.
*
@@ -52,48 +56,13 @@
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
-static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle);
mutex_unlock(&dev->struct_mutex);
}
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
-void drm_legacy_ctxbitmap_release(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->tag == file_priv &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev,
- pos->handle);
-
- drm_ctxbitmap_free(dev, pos->handle);
-
- list_del(&pos->head);
- kfree(pos);
- --dev->ctx_count;
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
-}
-
/**
* Context bitmap allocation.
*
@@ -121,12 +90,10 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
*
* Initialise the drm_device::ctx_idr
*/
-void drm_legacy_ctxbitmap_init(struct drm_device * dev)
+int drm_ctxbitmap_init(struct drm_device * dev)
{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
idr_init(&dev->ctx_idr);
+ return 0;
}
/**
@@ -137,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
-void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
{
mutex_lock(&dev->struct_mutex);
idr_destroy(&dev->ctx_idr);
@@ -169,9 +136,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map;
struct drm_map_list *_entry;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
mutex_lock(&dev->struct_mutex);
map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -216,9 +180,6 @@ int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
@@ -319,9 +280,6 @@ int drm_resctx(struct drm_device *dev, void *data,
struct drm_ctx ctx;
int i;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
if (res->count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -352,9 +310,6 @@ int drm_addctx(struct drm_device *dev, void *data,
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
ctx->handle = drm_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
@@ -398,9 +353,6 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
/* This is 0, because we don't handle any context flags */
ctx->flags = 0;
@@ -423,9 +375,6 @@ int drm_switchctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
@@ -446,9 +395,6 @@ int drm_newctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
@@ -471,9 +417,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3d13ca6e257f..f6f6cc7fc133 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -416,6 +416,14 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
return;
/*
+ * fbdev->blank can be called from irq context in case of a panic.
+ * Since we already have our own special panic handler which will
+ * restore the fbdev console mode completely, just bail out early.
+ */
+ if (oops_in_progress)
+ return;
+
+ /*
* For each CRTC in this fb, turn the connectors on/off.
*/
drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 4be8e09a32ef..3f84277d7036 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -439,7 +439,26 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
- drm_legacy_ctxbitmap_release(dev, file_priv);
+ mutex_lock(&dev->ctxlist_mutex);
+ if (!list_empty(&dev->ctxlist)) {
+ struct drm_ctx_list *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+ if (pos->tag == file_priv &&
+ pos->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor)
+ dev->driver->context_dtor(dev,
+ pos->handle);
+
+ drm_ctxbitmap_free(dev, pos->handle);
+
+ list_del(&pos->head);
+ kfree(pos);
+ --dev->ctx_count;
+ }
+ }
+ }
+ mutex_unlock(&dev->ctxlist_mutex);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index e7eb0276f7f1..39d864576be4 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -292,7 +292,13 @@ int drm_fill_in_dev(struct drm_device *dev,
goto error_out_unreg;
}
- drm_legacy_ctxbitmap_init(dev);
+
+
+ retcode = drm_ctxbitmap_init(dev);
+ if (retcode) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ goto error_out_unreg;
+ }
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev);
@@ -446,7 +452,7 @@ void drm_put_dev(struct drm_device *dev)
drm_rmmap(dev, r_list->map);
drm_ht_remove(&dev->map_hash);
- drm_legacy_ctxbitmap_cleanup(dev);
+ drm_ctxbitmap_cleanup(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 4752f223e5b2..45b6ef595965 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -56,7 +56,7 @@ config DRM_EXYNOS_IPP
config DRM_EXYNOS_FIMC
bool "Exynos DRM FIMC"
- depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF
+ depends on DRM_EXYNOS_IPP && MFD_SYSCON
help
Choose this option if you want to use Exynos FIMC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 3445a0f3a6b2..9c8088462c26 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -63,7 +63,8 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
return -ENOMEM;
}
- buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
+ buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
+ buf->size,
&buf->dma_addr, GFP_KERNEL,
&buf->dma_attrs);
if (!buf->kvaddr) {
@@ -90,9 +91,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
}
buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
- if (!buf->sgt) {
+ if (IS_ERR(buf->sgt)) {
DRM_ERROR("failed to get sg table.\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(buf->sgt);
goto err_free_attrs;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 78e868bcf1ec..e7c2f2d07f19 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -99,12 +99,13 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
if (is_drm_iommu_supported(dev)) {
unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
- buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+ buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
+ nr_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
} else {
phys_addr_t dma_addr = buffer->dma_addr;
if (dma_addr)
- buffer->kvaddr = phys_to_virt(dma_addr);
+ buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
else
buffer->kvaddr = (void __iomem *)NULL;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index b1f8fc69023f..60e84043aa34 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
break;
case DRM_MODE_DPMS_OFF:
- /* disable audio and video ports */
- reg_write(encoder, REG_ENA_AP, 0x00);
+ /* disable video ports */
reg_write(encoder, REG_ENA_VP_0, 0x00);
reg_write(encoder, REG_ENA_VP_1, 0x00);
reg_write(encoder, REG_ENA_VP_2, 0x00);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8507c6d1e642..cdfb9da0e4ce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1392,14 +1392,11 @@ out:
if (i915_terminally_wedged(&dev_priv->gpu_error))
return VM_FAULT_SIGBUS;
case -EAGAIN:
- /* Give the error handler a chance to run and move the
- * objects off the GPU active list. Next time we service the
- * fault, we should be able to transition the page into the
- * GTT without touching the GPU (and so avoid further
- * EIO/EGAIN). If the GPU is wedged, then there is no issue
- * with coherency, just lost writes.
+ /*
+ * EAGAIN means the gpu is hung and we'll wait for the error
+ * handler to reset everything when re-faulting in
+ * i915_mutex_lock_interruptible.
*/
- set_need_resched();
case 0:
case -ERESTARTSYS:
case -EINTR:
@@ -4803,10 +4800,10 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return SHRINK_STOP;
+ return 0;
if (dev_priv->mm.shrinker_no_lock_stealing)
- return SHRINK_STOP;
+ return 0;
unlock = false;
}
@@ -4904,10 +4901,10 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return 0;
+ return SHRINK_STOP;
if (dev_priv->mm.shrinker_no_lock_stealing)
- return 0;
+ return SHRINK_STOP;
unlock = false;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index aba9d7498996..dae364f0028c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
/* Seek the first printf which is hits start position */
if (e->pos < e->start) {
- len = vsnprintf(NULL, 0, f, args);
- if (!__i915_error_seek(e, len))
+ va_list tmp;
+
+ va_copy(tmp, args);
+ if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
return;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 83cce0cdb769..4b91228fd9bd 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1469,6 +1469,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
return ret;
}
+static void i915_error_wake_up(struct drm_i915_private *dev_priv,
+ bool reset_completed)
+{
+ struct intel_ring_buffer *ring;
+ int i;
+
+ /*
+ * Notify all waiters for GPU completion events that reset state has
+ * been changed, and that they need to restart their wait after
+ * checking for potential errors (and bail out to drop locks if there is
+ * a gpu reset pending so that i915_error_work_func can acquire them).
+ */
+
+ /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
+
+ /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
+ wake_up_all(&dev_priv->pending_flip_queue);
+
+ /*
+ * Signal tasks blocked in i915_gem_wait_for_error that the pending
+ * reset state is cleared.
+ */
+ if (reset_completed)
+ wake_up_all(&dev_priv->gpu_error.reset_queue);
+}
+
/**
* i915_error_work_func - do process context error handling work
* @work: work struct
@@ -1483,11 +1511,10 @@ static void i915_error_work_func(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
gpu_error);
struct drm_device *dev = dev_priv->dev;
- struct intel_ring_buffer *ring;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
- int i, ret;
+ int ret;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
@@ -1506,8 +1533,16 @@ static void i915_error_work_func(struct work_struct *work)
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
reset_event);
+ /*
+ * All state reset _must_ be completed before we update the
+ * reset counter, for otherwise waiters might miss the reset
+ * pending state and not properly drop locks, resulting in
+ * deadlocks with the reset work.
+ */
ret = i915_reset(dev);
+ intel_display_handle_reset(dev);
+
if (ret == 0) {
/*
* After all the gem state is reset, increment the reset
@@ -1528,12 +1563,11 @@ static void i915_error_work_func(struct work_struct *work)
atomic_set(&error->reset_counter, I915_WEDGED);
}
- for_each_ring(ring, dev_priv, i)
- wake_up_all(&ring->irq_queue);
-
- intel_display_handle_reset(dev);
-
- wake_up_all(&dev_priv->gpu_error.reset_queue);
+ /*
+ * Note: The wake_up also serves as a memory barrier so that
+ * waiters see the update value of the reset counter atomic_t.
+ */
+ i915_error_wake_up(dev_priv, true);
}
}
@@ -1642,8 +1676,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- int i;
i915_capture_error_state(dev);
i915_report_and_clear_eir(dev);
@@ -1653,11 +1685,19 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
&dev_priv->gpu_error.reset_counter);
/*
- * Wakeup waiting processes so that the reset work item
- * doesn't deadlock trying to grab various locks.
+ * Wakeup waiting processes so that the reset work function
+ * i915_error_work_func doesn't deadlock trying to grab various
+ * locks. By bumping the reset counter first, the woken
+ * processes will see a reset in progress and back off,
+ * releasing their locks and then wait for the reset completion.
+ * We must do this for _all_ gpu waiters that might hold locks
+ * that the reset work needs to acquire.
+ *
+ * Note: The wake_up serves as the required memory barrier to
+ * ensure that the waiters see the updated value of the reset
+ * counter atomic_t.
*/
- for_each_ring(ring, dev_priv, i)
- wake_up_all(&ring->irq_queue);
+ i915_error_wake_up(dev_priv, false);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 63aca49d11a8..63de2701b974 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -778,7 +778,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
/* Can only use the always-on power well for eDP when
* not using the panel fitter, and when not using motion
* blur mitigation (which we don't support). */
- if (intel_crtc->config.pch_pfit.size)
+ if (intel_crtc->config.pch_pfit.enabled)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2489d0b4c7d2..e5822e79f912 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2249,7 +2249,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
I915_WRITE(PIPESRC(intel_crtc->pipe),
((crtc->mode.hdisplay - 1) << 16) |
(crtc->mode.vdisplay - 1));
- if (!intel_crtc->config.pch_pfit.size &&
+ if (!intel_crtc->config.pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
@@ -3203,7 +3203,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
- if (crtc->config.pch_pfit.size) {
+ if (crtc->config.pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
@@ -3428,7 +3428,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (crtc->config.pch_pfit.size) {
+ if (crtc->config.pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -4775,6 +4775,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
pipeconf = 0;
+ if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+ I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
+ pipeconf |= PIPECONF_ENABLE;
+
if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
/* Enable pixel doubling when the dot clock is > 90% of the (display)
* core speed.
@@ -4877,9 +4881,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
}
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
-
if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
* Ensure we match the reduced clock's P to the target clock.
@@ -5768,9 +5769,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc->config.dpll.p2 = clock.p2;
}
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
-
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (intel_crtc->config.has_pch_encoder) {
fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
@@ -5859,6 +5857,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
tmp = I915_READ(PF_CTL(crtc->pipe));
if (tmp & PF_ENABLE) {
+ pipe_config->pch_pfit.enabled = true;
pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
@@ -6236,7 +6235,7 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
if (!crtc->base.enabled)
continue;
- if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size ||
+ if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
crtc->config.cpu_transcoder != TRANSCODER_EDP)
enable = true;
}
@@ -6259,9 +6258,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
if (!intel_ddi_pll_mode_set(crtc))
return -EINVAL;
- /* Ensure that the cursor is valid for the new mode before changing... */
- intel_crtc_update_cursor(crtc, true);
-
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
@@ -6494,15 +6490,15 @@ static void haswell_write_eld(struct drm_connector *connector,
/* Set ELD valid state */
tmp = I915_READ(aud_cntrl_st2);
- DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+ DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
I915_WRITE(aud_cntrl_st2, tmp);
tmp = I915_READ(aud_cntrl_st2);
- DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+ DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
/* Enable HDMI mode */
tmp = I915_READ(aud_config);
- DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+ DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
/* clear N_programing_enable and N_value_index */
tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
I915_WRITE(aud_config, tmp);
@@ -6937,7 +6933,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
- intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+ if (intel_crtc->active)
+ intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
return 0;
fail_unpin:
@@ -6956,7 +6953,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
intel_crtc->cursor_x = x;
intel_crtc->cursor_y = y;
- intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+ if (intel_crtc->active)
+ intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
return 0;
}
@@ -8205,9 +8203,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
pipe_config->gmch_pfit.lvds_border_bits);
- DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n",
+ DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
pipe_config->pch_pfit.pos,
- pipe_config->pch_pfit.size);
+ pipe_config->pch_pfit.size,
+ pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
}
@@ -8603,8 +8602,11 @@ intel_pipe_config_compare(struct drm_device *dev,
if (INTEL_INFO(dev)->gen < 4)
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
- PIPE_CONF_CHECK_I(pch_pfit.pos);
- PIPE_CONF_CHECK_I(pch_pfit.size);
+ PIPE_CONF_CHECK_I(pch_pfit.enabled);
+ if (current_config->pch_pfit.enabled) {
+ PIPE_CONF_CHECK_I(pch_pfit.pos);
+ PIPE_CONF_CHECK_I(pch_pfit.size);
+ }
PIPE_CONF_CHECK_I(ips_enabled);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2151d13772b8..79c14e298ba6 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
case AUX_NATIVE_REPLY_DEFER:
- udelay(100);
+ /*
+ * For now, just give more slack to branch devices. We
+ * could check the DPCD for I2C bit rate capabilities,
+ * and if available, adjust the interval. We could also
+ * be more careful with DP-to-Legacy adapters where a
+ * long legacy cable may force very low I2C bit rates.
+ */
+ if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT)
+ usleep_range(500, 600);
+ else
+ usleep_range(300, 400);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a47799e832c6..28cae80495e2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -280,6 +280,7 @@ struct intel_crtc_config {
struct {
u32 pos;
u32 size;
+ bool enabled;
} pch_pfit;
/* FDI configuration, only valid if has_pch_encoder is set. */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 406303b509c1..7fa7df546c1e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -263,6 +263,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
C(vtotal);
C(clock);
#undef C
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
}
if (intel_dvo->dev.dev_ops->mode_fixup)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 42114ecbae0e..293564a2896a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -112,6 +112,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
done:
pipe_config->pch_pfit.pos = (x << 16) | y;
pipe_config->pch_pfit.size = (width << 16) | height;
+ pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
}
static void
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0c115cc4899f..dd176b7296c1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2096,16 +2096,16 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t pixel_rate, pfit_size;
+ uint32_t pixel_rate;
pixel_rate = intel_crtc->config.adjusted_mode.clock;
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
- pfit_size = intel_crtc->config.pch_pfit.size;
- if (pfit_size) {
+ if (intel_crtc->config.pch_pfit.enabled) {
uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
+ uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
pipe_w = intel_crtc->config.requested_mode.hdisplay;
pipe_h = intel_crtc->config.requested_mode.vdisplay;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 85037b9d4934..49482fd5b76c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -788,6 +788,8 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
uint16_t h_sync_offset, v_sync_offset;
int mode_clock;
+ memset(dtd, 0, sizeof(*dtd));
+
width = mode->hdisplay;
height = mode->vdisplay;
@@ -830,44 +832,51 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
- dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
- dtd->part2.reserved = 0;
}
-static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode,
const struct intel_sdvo_dtd *dtd)
{
- mode->hdisplay = dtd->part1.h_active;
- mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
- mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
- mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
- mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
- mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
- mode->htotal = mode->hdisplay + dtd->part1.h_blank;
- mode->htotal += (dtd->part1.h_high & 0xf) << 8;
-
- mode->vdisplay = dtd->part1.v_active;
- mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
- mode->vsync_start = mode->vdisplay;
- mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
- mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
- mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
- mode->vsync_end = mode->vsync_start +
+ struct drm_display_mode mode = {};
+
+ mode.hdisplay = dtd->part1.h_active;
+ mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+ mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off;
+ mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+ mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width;
+ mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+ mode.htotal = mode.hdisplay + dtd->part1.h_blank;
+ mode.htotal += (dtd->part1.h_high & 0xf) << 8;
+
+ mode.vdisplay = dtd->part1.v_active;
+ mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+ mode.vsync_start = mode.vdisplay;
+ mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+ mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+ mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+ mode.vsync_end = mode.vsync_start +
(dtd->part2.v_sync_off_width & 0xf);
- mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
- mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
- mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+ mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+ mode.vtotal = mode.vdisplay + dtd->part1.v_blank;
+ mode.vtotal += (dtd->part1.v_high & 0xf) << 8;
- mode->clock = dtd->part1.clock * 10;
+ mode.clock = dtd->part1.clock * 10;
- mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ mode.flags |= DRM_MODE_FLAG_INTERLACE;
if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
- mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ mode.flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ mode.flags |= DRM_MODE_FLAG_NHSYNC;
if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
- mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ mode.flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
+ drm_mode_set_crtcinfo(&mode, 0);
+
+ drm_mode_copy(pmode, &mode);
}
static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index f2c6d7909ae2..dd6f84bf6c22 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
+ /* TV has it's own notion of sync and other mode flags, so clear them. */
+ pipe_config->adjusted_mode.flags = 0;
+
+ /*
+ * FIXME: We don't check whether the input mode is actually what we want
+ * or whether userspace is doing something stupid.
+ */
+
return true;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a60584763b61..a0b9d8a95b16 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -124,6 +124,8 @@ void adreno_recover(struct msm_gpu *gpu)
/* reset completed fence seqno, just discard anything pending: */
adreno_gpu->memptrs->fence = gpu->submitted_fence;
+ adreno_gpu->memptrs->rptr = 0;
+ adreno_gpu->memptrs->wptr = 0;
gpu->funcs->pm_resume(gpu);
ret = gpu->funcs->hw_init(gpu);
@@ -229,7 +231,7 @@ void adreno_idle(struct msm_gpu *gpu)
return;
} while(time_before(jiffies, t));
- DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name);
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
/* TODO maybe we need to reset GPU here to recover from hang? */
}
@@ -256,11 +258,17 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t freedwords;
+ unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT;
do {
uint32_t size = gpu->rb->size / 4;
uint32_t wptr = get_wptr(gpu->rb);
uint32_t rptr = adreno_gpu->memptrs->rptr;
freedwords = (rptr + (size - 1) - wptr) % size;
+
+ if (time_after(jiffies, t)) {
+ DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+ break;
+ }
} while(freedwords < ndwords);
}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
index 5db5bbaedae2..bc7fd11ad8be 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -19,8 +19,6 @@
#include "msm_drv.h"
#include "mdp4_kms.h"
-#include <mach/iommu.h>
-
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
static int mdp4_hw_init(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 864c9773636b..b3a2f1629041 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -18,8 +18,6 @@
#include "msm_drv.h"
#include "msm_gpu.h"
-#include <mach/iommu.h>
-
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
int i, ret;
for (i = 0; i < cnt; i++) {
+ /* TODO maybe some day msm iommu won't require this hack: */
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
struct device *ctx = msm_iommu_get_ctx(names[i]);
if (!ctx)
continue;
@@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
* imx drm driver on iMX5
*/
dev_err(dev->dev, "failed to load kms\n");
- ret = PTR_ERR(priv->kms);
+ ret = PTR_ERR(kms);
goto fail;
}
@@ -499,25 +499,41 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
struct timespec *timeout)
{
struct msm_drm_private *priv = dev->dev_private;
- unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
- unsigned long start_jiffies = jiffies;
- unsigned long remaining_jiffies;
int ret;
- if (time_after(start_jiffies, timeout_jiffies))
- remaining_jiffies = 0;
- else
- remaining_jiffies = timeout_jiffies - start_jiffies;
-
- ret = wait_event_interruptible_timeout(priv->fence_event,
- priv->completed_fence >= fence,
- remaining_jiffies);
- if (ret == 0) {
- DBG("timeout waiting for fence: %u (completed: %u)",
- fence, priv->completed_fence);
- ret = -ETIMEDOUT;
- } else if (ret != -ERESTARTSYS) {
- ret = 0;
+ if (!priv->gpu)
+ return 0;
+
+ if (fence > priv->gpu->submitted_fence) {
+ DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
+ fence, priv->gpu->submitted_fence);
+ return -EINVAL;
+ }
+
+ if (!timeout) {
+ /* no-wait: */
+ ret = fence_completed(dev, fence) ? 0 : -EBUSY;
+ } else {
+ unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
+ unsigned long start_jiffies = jiffies;
+ unsigned long remaining_jiffies;
+
+ if (time_after(start_jiffies, timeout_jiffies))
+ remaining_jiffies = 0;
+ else
+ remaining_jiffies = timeout_jiffies - start_jiffies;
+
+ ret = wait_event_interruptible_timeout(priv->fence_event,
+ fence_completed(dev, fence),
+ remaining_jiffies);
+
+ if (ret == 0) {
+ DBG("timeout waiting for fence: %u (completed: %u)",
+ fence, priv->completed_fence);
+ ret = -ETIMEDOUT;
+ } else if (ret != -ERESTARTSYS) {
+ ret = 0;
+ }
}
return ret;
@@ -681,7 +697,7 @@ static struct drm_driver msm_driver = {
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
- .dumb_destroy = msm_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
.debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 80d75094bf0a..df8f1d084bc1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -153,7 +153,7 @@ void *msm_gem_vaddr(struct drm_gem_object *obj);
int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
struct work_struct *work);
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, uint32_t fence);
+ struct msm_gpu *gpu, bool write, uint32_t fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
struct timespec *timeout);
@@ -191,6 +191,12 @@ u32 msm_readl(const void __iomem *addr);
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ return priv->completed_fence >= fence;
+}
+
static inline int align_pitch(int width, int bpp)
{
int bytespp = (bpp + 7) / 8;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 6b5a6c8c7658..2bae46c66a30 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -40,9 +40,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
}
msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
- if (!msm_obj->sgt) {
+ if (IS_ERR(msm_obj->sgt)) {
dev_err(dev->dev, "failed to allocate sgt\n");
- return ERR_PTR(-ENOMEM);
+ return ERR_CAST(msm_obj->sgt);
}
msm_obj->pages = p;
@@ -159,7 +159,6 @@ out_unlock:
out:
switch (ret) {
case -EAGAIN:
- set_need_resched();
case 0:
case -ERESTARTSYS:
case -EINTR:
@@ -320,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
}
-int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- /* No special work needed, drop the reference and see what falls out */
- return drm_gem_handle_delete(file, handle);
-}
-
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
@@ -393,11 +385,14 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, uint32_t fence)
+ struct msm_gpu *gpu, bool write, uint32_t fence)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_obj->gpu = gpu;
- msm_obj->fence = fence;
+ if (write)
+ msm_obj->write_fence = fence;
+ else
+ msm_obj->read_fence = fence;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}
@@ -411,7 +406,8 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
msm_obj->gpu = NULL;
- msm_obj->fence = 0;
+ msm_obj->read_fence = 0;
+ msm_obj->write_fence = 0;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
@@ -433,8 +429,18 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
- if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC))
- ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout);
+ if (is_active(msm_obj)) {
+ uint32_t fence = 0;
+
+ if (op & MSM_PREP_READ)
+ fence = msm_obj->write_fence;
+ if (op & MSM_PREP_WRITE)
+ fence = max(fence, msm_obj->read_fence);
+ if (op & MSM_PREP_NOSYNC)
+ timeout = NULL;
+
+ ret = msm_wait_fence_interruptable(dev, fence, timeout);
+ }
/* TODO cache maintenance */
@@ -455,9 +461,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
uint64_t off = drm_vma_node_start(&obj->vma_node);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n",
+ seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
- msm_obj->fence, obj->name, obj->refcount.refcount.counter,
+ msm_obj->read_fence, msm_obj->write_fence,
+ obj->name, obj->refcount.refcount.counter,
off, msm_obj->vaddr, obj->size);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index d746f13d283c..0676f32e2c6a 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -36,7 +36,7 @@ struct msm_gem_object {
*/
struct list_head mm_list;
struct msm_gpu *gpu; /* non-null if active */
- uint32_t fence;
+ uint32_t read_fence, write_fence;
/* Transiently in the process of submit ioctl, objects associated
* with the submit are on submit->bo_list.. this only lasts for
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3e1ef3a00f60..5281d4bc37f7 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -78,7 +78,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
}
if (submit_bo.flags & BO_INVALID_FLAGS) {
- DBG("invalid flags: %x", submit_bo.flags);
+ DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
}
@@ -92,7 +92,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
obj = idr_find(&file->object_idr, submit_bo.handle);
if (!obj) {
- DBG("invalid handle %u at index %u", submit_bo.handle, i);
+ DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -100,7 +100,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
msm_obj = to_msm_bo(obj);
if (!list_empty(&msm_obj->submit_entry)) {
- DBG("handle %u at index %u already on submit list",
+ DRM_ERROR("handle %u at index %u already on submit list\n",
submit_bo.handle, i);
ret = -EINVAL;
goto out_unlock;
@@ -216,8 +216,9 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct msm_gem_object **obj, uint32_t *iova, bool *valid)
{
if (idx >= submit->nr_bos) {
- DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos);
- return EINVAL;
+ DRM_ERROR("invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
+ return -EINVAL;
}
if (obj)
@@ -239,7 +240,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
int ret;
if (offset % 4) {
- DBG("non-aligned cmdstream buffer: %u", offset);
+ DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
}
@@ -266,7 +267,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return -EFAULT;
if (submit_reloc.submit_offset % 4) {
- DBG("non-aligned reloc offset: %u",
+ DRM_ERROR("non-aligned reloc offset: %u\n",
submit_reloc.submit_offset);
return -EINVAL;
}
@@ -276,7 +277,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
if ((off >= (obj->base.size / 4)) ||
(off < last_offset)) {
- DBG("invalid offset %u at reloc %u", off, i);
+ DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
return -EINVAL;
}
@@ -374,14 +375,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
if (submit_cmd.size % 4) {
- DBG("non-aligned cmdstream buffer size: %u",
+ DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
submit_cmd.size);
ret = -EINVAL;
goto out;
}
- if (submit_cmd.size >= msm_obj->base.size) {
- DBG("invalid cmdstream size: %u", submit_cmd.size);
+ if ((submit_cmd.size + submit_cmd.submit_offset) >=
+ msm_obj->base.size) {
+ DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index e1e1ec9321ff..3bab937965d1 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -29,13 +29,14 @@
static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct drm_device *dev = gpu->dev;
- struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+ struct kgsl_device_platform_data *pdata;
if (!pdev) {
dev_err(dev->dev, "could not find dtv pdata\n");
return;
}
+ pdata = pdev->dev.platform_data;
if (pdata->bus_scale_table) {
gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
DBG("bus scale client: %08x", gpu->bsc);
@@ -230,6 +231,8 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
static void hangcheck_handler(unsigned long data)
{
struct msm_gpu *gpu = (struct msm_gpu *)data;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
uint32_t fence = gpu->funcs->last_fence(gpu);
if (fence != gpu->hangcheck_fence) {
@@ -237,14 +240,22 @@ static void hangcheck_handler(unsigned long data)
gpu->hangcheck_fence = fence;
} else if (fence < gpu->submitted_fence) {
/* no progress and not done.. hung! */
- struct msm_drm_private *priv = gpu->dev->dev_private;
gpu->hangcheck_fence = fence;
+ dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
+ gpu->name);
+ dev_err(dev->dev, "%s: completed fence: %u\n",
+ gpu->name, fence);
+ dev_err(dev->dev, "%s: submitted fence: %u\n",
+ gpu->name, gpu->submitted_fence);
queue_work(priv->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
if (gpu->submitted_fence > gpu->hangcheck_fence)
hangcheck_timer_reset(gpu);
+
+ /* workaround for missing irq: */
+ queue_work(priv->wq, &gpu->retire_work);
}
/*
@@ -265,7 +276,8 @@ static void retire_worker(struct work_struct *work)
obj = list_first_entry(&gpu->active_list,
struct msm_gem_object, mm_list);
- if (obj->fence <= fence) {
+ if ((obj->read_fence <= fence) &&
+ (obj->write_fence <= fence)) {
/* move to inactive: */
msm_gem_move_to_inactive(&obj->base);
msm_gem_put_iova(&obj->base, gpu->id);
@@ -321,7 +333,11 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit->gpu->id, &iova);
}
- msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence);
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
+ msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
+
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+ msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
hangcheck_timer_reset(gpu);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 2e11ea02cf87..57cda2a1437b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -579,8 +579,22 @@ static void
init_reserved(struct nvbios_init *init)
{
u8 opcode = nv_ro08(init->bios, init->offset);
- trace("RESERVED\t0x%02x\n", opcode);
- init->offset += 1;
+ u8 length, i;
+
+ switch (opcode) {
+ case 0xaa:
+ length = 4;
+ break;
+ default:
+ length = 1;
+ break;
+ }
+
+ trace("RESERVED 0x%02x\t", opcode);
+ for (i = 1; i < length; i++)
+ cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
+ cont("\n");
+ init->offset += length;
}
/**
@@ -1437,7 +1451,7 @@ init_configure_mem(struct nvbios_init *init)
data = init_rdvgai(init, 0x03c4, 0x01);
init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
- while ((addr = nv_ro32(bios, sdata)) != 0xffffffff) {
+ for (; (addr = nv_ro32(bios, sdata)) != 0xffffffff; sdata += 4) {
switch (addr) {
case 0x10021c: /* CKE_NORMAL */
case 0x1002d0: /* CMD_REFRESH */
@@ -2135,6 +2149,7 @@ static struct nvbios_init_opcode {
[0x99] = { init_zm_auxch },
[0x9a] = { init_i2c_long_if },
[0xa9] = { init_gpio_ne },
+ [0xaa] = { init_reserved },
};
#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index d2712e6e5d31..7848590f5568 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -278,7 +278,6 @@ nouveau_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_display *disp;
- u32 pclass = dev->pdev->class >> 8;
int ret, gen;
disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
@@ -340,29 +339,25 @@ nouveau_display_create(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
- if (nouveau_modeset == 1 ||
- (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) {
- if (drm->vbios.dcb.entries) {
- if (nv_device(drm->device)->card_type < NV_50)
- ret = nv04_display_create(dev);
- else
- ret = nv50_display_create(dev);
- } else {
- ret = 0;
- }
-
- if (ret)
- goto disp_create_err;
+ if (drm->vbios.dcb.entries) {
+ if (nv_device(drm->device)->card_type < NV_50)
+ ret = nv04_display_create(dev);
+ else
+ ret = nv50_display_create(dev);
+ } else {
+ ret = 0;
+ }
- if (dev->mode_config.num_crtc) {
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
- if (ret)
- goto vblank_err;
- }
+ if (ret)
+ goto disp_create_err;
- nouveau_backlight_init(dev);
+ if (dev->mode_config.num_crtc) {
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret)
+ goto vblank_err;
}
+ nouveau_backlight_init(dev);
return 0;
vblank_err:
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8f6d63d7edd3..a86ecf65c164 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -454,7 +454,8 @@ nouveau_fbcon_init(struct drm_device *dev)
int preferred_bpp;
int ret;
- if (!dev->mode_config.num_crtc)
+ if (!dev->mode_config.num_crtc ||
+ (dev->pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
return 0;
fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index ca5492ac2da5..0843ebc910d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -104,9 +104,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
- if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
- kfree(nvbe);
+ if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
return NULL;
- }
return &nvbe->ttm.ttm;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dfac7965ea28..32923d2f6002 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -707,8 +707,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
+ (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ (radeon_connector->audio == RADEON_AUDIO_AUTO)))
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
@@ -718,8 +719,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
+ (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ (radeon_connector->audio == RADEON_AUDIO_AUTO)))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -732,8 +734,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
+ (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ (radeon_connector->audio == RADEON_AUDIO_AUTO)))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -1647,8 +1650,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- /* some early dce3.2 boards have a bug in their transmitter control table */
- if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
+ /* some dce3.x boards have a bug in their transmitter control table.
+ * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
+ * does the same thing and more.
+ */
+ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
+ (rdev->family != CHIP_RS880))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 084e69414fd1..b162e98a2953 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
{ 25000, 30000, RADEON_SCLK_UP }
};
+void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+ u32 *max_clock)
+{
+ u32 i, clock = 0;
+
+ if ((table == NULL) || (table->count == 0)) {
+ *max_clock = clock;
+ return;
+ }
+
+ for (i = 0; i < table->count; i++) {
+ if (clock < table->entries[i].clk)
+ clock = table->entries[i].clk;
+ }
+ *max_clock = clock;
+}
+
void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
u32 clock, u16 max_voltage, u16 *voltage)
{
@@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
bool disable_mclk_switching;
u32 mclk, sclk;
u16 vddc, vddci;
+ u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
btc_dpm_vblank_too_short(rdev))
@@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
ps->low.vddci = max_limits->vddci;
}
+ /* limit clocks to max supported clocks based on voltage dependency tables */
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ &max_sclk_vddc);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &max_mclk_vddci);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &max_mclk_vddc);
+
+ if (max_sclk_vddc) {
+ if (ps->low.sclk > max_sclk_vddc)
+ ps->low.sclk = max_sclk_vddc;
+ if (ps->medium.sclk > max_sclk_vddc)
+ ps->medium.sclk = max_sclk_vddc;
+ if (ps->high.sclk > max_sclk_vddc)
+ ps->high.sclk = max_sclk_vddc;
+ }
+ if (max_mclk_vddci) {
+ if (ps->low.mclk > max_mclk_vddci)
+ ps->low.mclk = max_mclk_vddci;
+ if (ps->medium.mclk > max_mclk_vddci)
+ ps->medium.mclk = max_mclk_vddci;
+ if (ps->high.mclk > max_mclk_vddci)
+ ps->high.mclk = max_mclk_vddci;
+ }
+ if (max_mclk_vddc) {
+ if (ps->low.mclk > max_mclk_vddc)
+ ps->low.mclk = max_mclk_vddc;
+ if (ps->medium.mclk > max_mclk_vddc)
+ ps->medium.mclk = max_mclk_vddc;
+ if (ps->high.mclk > max_mclk_vddc)
+ ps->high.mclk = max_mclk_vddc;
+ }
+
/* XXX validate the min clocks required for display */
if (disable_mclk_switching) {
@@ -2340,12 +2391,6 @@ int btc_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
- ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("rv770_dpm_force_performance_level failed\n");
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
index 1a15e0e41950..3b6f12b7760b 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.h
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
struct rv7xx_pl *pl);
void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
u32 clock, u16 max_voltage, u16 *voltage);
+void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+ u32 *max_clock);
void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
u16 max_vddc, u16 max_vddci,
u16 *vddc, u16 *vddci);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 3cce533397c6..51e947a97edf 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] =
};
extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
+extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+ u32 *max_clock);
extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
u32 arb_freq_src, u32 arb_freq_dest);
extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
@@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching;
u32 sclk, mclk;
+ u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
+ /* limit clocks to max supported clocks based on voltage dependency tables */
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ &max_sclk_vddc);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &max_mclk_vddci);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &max_mclk_vddc);
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (max_sclk_vddc) {
+ if (ps->performance_levels[i].sclk > max_sclk_vddc)
+ ps->performance_levels[i].sclk = max_sclk_vddc;
+ }
+ if (max_mclk_vddci) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddci)
+ ps->performance_levels[i].mclk = max_mclk_vddci;
+ }
+ if (max_mclk_vddc) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddc)
+ ps->performance_levels[i].mclk = max_mclk_vddc;
+ }
+ }
+
/* XXX validate the min clocks required for display */
if (disable_mclk_switching) {
@@ -4748,12 +4774,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
if (pi->pcie_performance_request)
ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
- ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("ci_dpm_force_performance_level failed\n");
- return ret;
- }
-
cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
RADEON_CG_BLOCK_MC |
RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 53b43dd3cf1e..252e10a41cf5 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -47,10 +47,11 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit)
{
+ unsigned long flags;
u32 data, original_data;
u32 addr;
u32 extra_shift;
- int ret;
+ int ret = 0;
if (smc_start_address & 3)
return -EINVAL;
@@ -59,13 +60,14 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
addr = smc_start_address;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = ci_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_IND_DATA_0, data);
@@ -80,7 +82,7 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
ret = ci_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
original_data = RREG32(SMC_IND_DATA_0);
@@ -97,11 +99,15 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
ret = ci_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_IND_DATA_0, data);
}
- return 0;
+
+done:
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+ return ret;
}
void ci_start_smc(struct radeon_device *rdev)
@@ -197,6 +203,7 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
{
+ unsigned long flags;
u32 ucode_start_address;
u32 ucode_size;
const u8 *src;
@@ -219,6 +226,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
return -EINVAL;
src = (const u8 *)rdev->smc_fw->data;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
while (ucode_size >= 4) {
@@ -231,6 +239,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
ucode_size -= 4;
}
WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return 0;
}
@@ -238,25 +247,29 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
int ci_read_smc_sram_dword(struct radeon_device *rdev,
u32 smc_address, u32 *value, u32 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = ci_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ *value = RREG32(SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- *value = RREG32(SMC_IND_DATA_0);
- return 0;
+ return ret;
}
int ci_write_smc_sram_dword(struct radeon_device *rdev,
u32 smc_address, u32 value, u32 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = ci_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ WREG32(SMC_IND_DATA_0, value);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- WREG32(SMC_IND_DATA_0, value);
- return 0;
+ return ret;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a3bba0587276..d02fd1c045d5 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
static void cik_program_aspm(struct radeon_device *rdev);
static void cik_init_pg(struct radeon_device *rdev);
static void cik_init_cg(struct radeon_device *rdev);
+static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
+ bool enable);
/* get temperature in millidegrees */
int ci_get_temp(struct radeon_device *rdev)
@@ -120,20 +122,27 @@ int kv_get_temp(struct radeon_device *rdev)
*/
u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_INDEX, reg);
(void)RREG32(PCIE_INDEX);
r = RREG32(PCIE_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
return r;
}
void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_INDEX, reg);
(void)RREG32(PCIE_INDEX);
WREG32(PCIE_DATA, v);
(void)RREG32(PCIE_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
}
static const u32 spectre_rlc_save_restore_register_list[] =
@@ -2722,7 +2731,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
} else if ((rdev->pdev->device == 0x1309) ||
(rdev->pdev->device == 0x130A) ||
(rdev->pdev->device == 0x130D) ||
- (rdev->pdev->device == 0x1313)) {
+ (rdev->pdev->device == 0x1313) ||
+ (rdev->pdev->device == 0x131D)) {
rdev->config.cik.max_cu_per_sh = 6;
rdev->config.cik.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x1306) ||
@@ -2835,10 +2845,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
rdev->config.cik.tile_config |= (3 << 0);
break;
}
- if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
- rdev->config.cik.tile_config |= 1 << 4;
- else
- rdev->config.cik.tile_config |= 0 << 4;
+ rdev->config.cik.tile_config |=
+ ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
rdev->config.cik.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.cik.tile_config |=
@@ -4013,6 +4021,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
{
int r;
+ cik_enable_gui_idle_interrupt(rdev, false);
+
r = cik_cp_load_microcode(rdev);
if (r)
return r;
@@ -4024,6 +4034,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
if (r)
return r;
+ cik_enable_gui_idle_interrupt(rdev, true);
+
return 0;
}
@@ -4442,8 +4454,8 @@ static int cik_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* size in MB on si */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
si_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
@@ -4721,12 +4733,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
- char *block = (char *)&mc_client;
+ char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+ (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
- printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
+ printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr,
(status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
- block, mc_id);
+ block, mc_client, mc_id);
}
/**
@@ -5376,7 +5389,9 @@ static void cik_enable_hdp_ls(struct radeon_device *rdev,
void cik_update_cg(struct radeon_device *rdev,
u32 block, bool enable)
{
+
if (block & RADEON_CG_BLOCK_GFX) {
+ cik_enable_gui_idle_interrupt(rdev, false);
/* order matters! */
if (enable) {
cik_enable_mgcg(rdev, true);
@@ -5385,6 +5400,7 @@ void cik_update_cg(struct radeon_device *rdev,
cik_enable_cgcg(rdev, false);
cik_enable_mgcg(rdev, false);
}
+ cik_enable_gui_idle_interrupt(rdev, true);
}
if (block & RADEON_CG_BLOCK_MC) {
@@ -5541,7 +5557,7 @@ static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
{
u32 data, orig;
- if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
orig = data = RREG32(RLC_PG_CNTL);
data |= GFX_PG_ENABLE;
if (orig != data)
@@ -5805,7 +5821,7 @@ static void cik_init_pg(struct radeon_device *rdev)
if (rdev->pg_flags) {
cik_enable_sck_slowdown_on_pu(rdev, true);
cik_enable_sck_slowdown_on_pd(rdev, true);
- if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
cik_init_gfx_cgpg(rdev);
cik_enable_cp_pg(rdev, true);
cik_enable_gds_pg(rdev, true);
@@ -5819,7 +5835,7 @@ static void cik_fini_pg(struct radeon_device *rdev)
{
if (rdev->pg_flags) {
cik_update_gfx_pg(rdev, false);
- if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
cik_enable_cp_pg(rdev, false);
cik_enable_gds_pg(rdev, false);
}
@@ -5895,7 +5911,9 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
u32 tmp;
/* gfx ring */
- WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(CP_INT_CNTL_RING0) &
+ (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ WREG32(CP_INT_CNTL_RING0, tmp);
/* sdma */
tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
@@ -6036,8 +6054,7 @@ static int cik_irq_init(struct radeon_device *rdev)
*/
int cik_irq_set(struct radeon_device *rdev)
{
- u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE |
- PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
+ u32 cp_int_cntl;
u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
@@ -6058,6 +6075,10 @@ int cik_irq_set(struct radeon_device *rdev)
return 0;
}
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
+ (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
+
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 95a66db08d9b..91bb470de0a3 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2014,12 +2014,6 @@ int cypress_dpm_set_power_state(struct radeon_device *rdev)
if (eg_pi->pcie_performance_request)
cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
- ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("rv770_dpm_force_performance_level failed\n");
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 8953255e894b..85a69d2ea3d2 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -28,22 +28,30 @@
static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
u32 block_offset, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->end_idx_lock, flags);
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
+ spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
+
return r;
}
static void dce6_endpoint_wreg(struct radeon_device *rdev,
u32 block_offset, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->end_idx_lock, flags);
if (ASIC_IS_DCE8(rdev))
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
else
WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
+ spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
}
#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
@@ -86,12 +94,12 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 offset = dig->afmt->offset;
- u32 id = dig->afmt->pin->id;
if (!dig->afmt->pin)
return;
- WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id));
+ WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
+ AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
}
void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index ecd60809db4e..71399065db04 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -40,6 +40,7 @@ static int kv_calculate_dpm_settings(struct radeon_device *rdev);
static void kv_enable_new_levels(struct radeon_device *rdev);
static void kv_program_nbps_index_settings(struct radeon_device *rdev,
struct radeon_ps *new_rps);
+static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
static int kv_set_enabled_levels(struct radeon_device *rdev);
static int kv_force_dpm_highest(struct radeon_device *rdev);
static int kv_force_dpm_lowest(struct radeon_device *rdev);
@@ -519,7 +520,7 @@ static int kv_set_dpm_boot_state(struct radeon_device *rdev)
static void kv_program_vc(struct radeon_device *rdev)
{
- WREG32_SMC(CG_FTV_0, 0x3FFFC000);
+ WREG32_SMC(CG_FTV_0, 0x3FFFC100);
}
static void kv_clear_vc(struct radeon_device *rdev)
@@ -638,7 +639,10 @@ static int kv_force_lowest_valid(struct radeon_device *rdev)
static int kv_unforce_levels(struct radeon_device *rdev)
{
- return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+ if (rdev->family == CHIP_KABINI)
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+ else
+ return kv_set_enabled_levels(rdev);
}
static int kv_update_sclk_t(struct radeon_device *rdev)
@@ -667,9 +671,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
if (table && table->count) {
- for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
- if ((table->entries[i].clk == pi->boot_pl.sclk) ||
- (i == 0))
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].clk == pi->boot_pl.sclk)
break;
}
@@ -682,9 +685,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
if (table->num_max_dpm_entries == 0)
return -EINVAL;
- for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
- if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) ||
- (i == 0))
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
break;
}
@@ -1078,6 +1080,13 @@ static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
}
+static void kv_reset_acp_boot_level(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->acp_boot_level = 0xff;
+}
+
static void kv_update_current_ps(struct radeon_device *rdev,
struct radeon_ps *rps)
{
@@ -1100,6 +1109,18 @@ static void kv_update_requested_ps(struct radeon_device *rdev,
pi->requested_rps.ps_priv = &pi->requested_ps;
}
+void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ if (pi->bapm_enable) {
+ ret = kv_smc_bapm_enable(rdev, enable);
+ if (ret)
+ DRM_ERROR("kv_smc_bapm_enable failed\n");
+ }
+}
+
int kv_dpm_enable(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1192,6 +1213,8 @@ int kv_dpm_enable(struct radeon_device *rdev)
return ret;
}
+ kv_reset_acp_boot_level(rdev);
+
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1203,6 +1226,12 @@ int kv_dpm_enable(struct radeon_device *rdev)
radeon_irq_set(rdev);
}
+ ret = kv_smc_bapm_enable(rdev, false);
+ if (ret) {
+ DRM_ERROR("kv_smc_bapm_enable failed\n");
+ return ret;
+ }
+
/* powerdown unused blocks for now */
kv_dpm_powergate_acp(rdev, true);
kv_dpm_powergate_samu(rdev, true);
@@ -1226,6 +1255,8 @@ void kv_dpm_disable(struct radeon_device *rdev)
RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), false);
+ kv_smc_bapm_enable(rdev, false);
+
/* powerup blocks */
kv_dpm_powergate_acp(rdev, false);
kv_dpm_powergate_samu(rdev, false);
@@ -1450,6 +1481,39 @@ static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
return kv_enable_samu_dpm(rdev, !gate);
}
+static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
+{
+ u8 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].clk >= 0) /* XXX */
+ break;
+ }
+
+ if (i >= table->count)
+ i = table->count - 1;
+
+ return i;
+}
+
+static void kv_update_acp_boot_level(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u8 acp_boot_level;
+
+ if (!pi->caps_stable_p_state) {
+ acp_boot_level = kv_get_acp_boot_level(rdev);
+ if (acp_boot_level != pi->acp_boot_level) {
+ pi->acp_boot_level = acp_boot_level;
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_ACPDPM_SetEnabledMask,
+ (1 << pi->acp_boot_level));
+ }
+ }
+}
+
static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
{
struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1461,7 +1525,7 @@ static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
if (pi->caps_stable_p_state)
pi->acp_boot_level = table->count - 1;
else
- pi->acp_boot_level = 0;
+ pi->acp_boot_level = kv_get_acp_boot_level(rdev);
ret = kv_copy_bytes_to_smc(rdev,
pi->dpm_table_start +
@@ -1588,13 +1652,11 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
}
}
- for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
- if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) ||
- (i == 0)) {
- pi->highest_valid = i;
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
+ if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
break;
- }
}
+ pi->highest_valid = i;
if (pi->lowest_valid > pi->highest_valid) {
if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
@@ -1615,14 +1677,12 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
}
}
- for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
if (table->entries[i].sclk_frequency <=
- new_ps->levels[new_ps->num_levels - 1].sclk ||
- i == 0) {
- pi->highest_valid = i;
+ new_ps->levels[new_ps->num_levels - 1].sclk)
break;
- }
}
+ pi->highest_valid = i;
if (pi->lowest_valid > pi->highest_valid) {
if ((new_ps->levels[0].sclk -
@@ -1724,6 +1784,14 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), false);
+ if (pi->bapm_enable) {
+ ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
+ if (ret) {
+ DRM_ERROR("kv_smc_bapm_enable failed\n");
+ return ret;
+ }
+ }
+
if (rdev->family == CHIP_KABINI) {
if (pi->enable_dpm) {
kv_set_valid_clock_range(rdev, new_ps);
@@ -1775,6 +1843,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
#endif
+ kv_update_acp_boot_level(rdev);
kv_update_sclk_t(rdev);
kv_enable_nb_dpm(rdev);
}
@@ -1785,7 +1854,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), true);
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
return 0;
}
@@ -1806,12 +1874,23 @@ void kv_dpm_setup_asic(struct radeon_device *rdev)
void kv_dpm_reset_asic(struct radeon_device *rdev)
{
- kv_force_lowest_valid(rdev);
- kv_init_graphics_levels(rdev);
- kv_program_bootup_state(rdev);
- kv_upload_dpm_settings(rdev);
- kv_force_lowest_valid(rdev);
- kv_unforce_levels(rdev);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (rdev->family == CHIP_KABINI) {
+ kv_force_lowest_valid(rdev);
+ kv_init_graphics_levels(rdev);
+ kv_program_bootup_state(rdev);
+ kv_upload_dpm_settings(rdev);
+ kv_force_lowest_valid(rdev);
+ kv_unforce_levels(rdev);
+ } else {
+ kv_init_graphics_levels(rdev);
+ kv_program_bootup_state(rdev);
+ kv_freeze_sclk_dpm(rdev, true);
+ kv_upload_dpm_settings(rdev);
+ kv_freeze_sclk_dpm(rdev, false);
+ kv_set_enabled_level(rdev, pi->graphics_boot_level);
+ }
}
//XXX use sumo_dpm_display_configuration_changed
@@ -1871,12 +1950,15 @@ static int kv_force_dpm_highest(struct radeon_device *rdev)
if (ret)
return ret;
- for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) {
+ for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
if (enable_mask & (1 << i))
break;
}
- return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+ if (rdev->family == CHIP_KABINI)
+ return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+ else
+ return kv_set_enabled_level(rdev, i);
}
static int kv_force_dpm_lowest(struct radeon_device *rdev)
@@ -1893,7 +1975,10 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev)
break;
}
- return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+ if (rdev->family == CHIP_KABINI)
+ return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+ else
+ return kv_set_enabled_level(rdev, i);
}
static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
@@ -1911,9 +1996,9 @@ static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
if (!pi->caps_sclk_ds)
return 0;
- for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) {
+ for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
temp = sclk / sumo_get_sleep_divider_from_id(i);
- if ((temp >= min) || (i == 0))
+ if (temp >= min)
break;
}
@@ -2039,12 +2124,12 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
ps->dpmx_nb_ps_lo = 0x1;
ps->dpmx_nb_ps_hi = 0x0;
} else {
- ps->dpm0_pg_nb_ps_lo = 0x1;
+ ps->dpm0_pg_nb_ps_lo = 0x3;
ps->dpm0_pg_nb_ps_hi = 0x0;
- ps->dpmx_nb_ps_lo = 0x2;
- ps->dpmx_nb_ps_hi = 0x1;
+ ps->dpmx_nb_ps_lo = 0x3;
+ ps->dpmx_nb_ps_hi = 0x0;
- if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
+ if (pi->sys_info.nb_dpm_enable) {
force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
pi->disable_nb_ps3_in_battery;
@@ -2210,6 +2295,15 @@ static void kv_enable_new_levels(struct radeon_device *rdev)
}
}
+static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
+{
+ u32 new_mask = (1 << level);
+
+ return kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ new_mask);
+}
+
static int kv_set_enabled_levels(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
index 32bb079572d7..8cef7525d7a8 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.h
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -192,6 +192,7 @@ int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
u32 *value, u32 limit);
int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
+int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable);
int kv_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit);
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
index 34a226d7e34a..0000b59a6d05 100644
--- a/drivers/gpu/drm/radeon/kv_smc.c
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -107,6 +107,14 @@ int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
}
+int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
+ else
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
+}
+
int kv_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit)
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f7b625c9e0e9..f26339028154 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
bool disable_mclk_switching;
u32 mclk, sclk;
u16 vddc, vddci;
+ u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
+ /* limit clocks to max supported clocks based on voltage dependency tables */
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ &max_sclk_vddc);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &max_mclk_vddci);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &max_mclk_vddc);
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (max_sclk_vddc) {
+ if (ps->performance_levels[i].sclk > max_sclk_vddc)
+ ps->performance_levels[i].sclk = max_sclk_vddc;
+ }
+ if (max_mclk_vddci) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddci)
+ ps->performance_levels[i].mclk = max_mclk_vddci;
+ }
+ if (max_mclk_vddc) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddc)
+ ps->performance_levels[i].mclk = max_mclk_vddc;
+ }
+ }
+
/* XXX validate the min clocks required for display */
if (disable_mclk_switching) {
@@ -3865,12 +3889,6 @@ int ni_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
- ret = ni_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("ni_dpm_force_performance_level failed\n");
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index 682842804bce..5670b8291285 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -163,6 +163,8 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
+#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
+#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121)
#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9fc61dd68bc0..d71333033b2b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2853,21 +2853,28 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev)
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t data;
+ spin_lock_irqsave(&rdev->pll_idx_lock, flags);
WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
r100_pll_errata_after_index(rdev);
data = RREG32(RADEON_CLOCK_CNTL_DATA);
r100_pll_errata_after_data(rdev);
+ spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
return data;
}
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pll_idx_lock, flags);
WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
r100_pll_errata_after_index(rdev);
WREG32(RADEON_CLOCK_CNTL_DATA, v);
r100_pll_errata_after_data(rdev);
+ spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
}
static void r100_set_safe_registers(struct radeon_device *rdev)
@@ -2926,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
- for (j = 0; j <= count; j++) {
- i = (rdp + j) & ring->ptr_mask;
- seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ if (ring->ready) {
+ for (j = 0; j <= count; j++) {
+ i = (rdp + j) & ring->ptr_mask;
+ seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4e796ecf9ea4..6edf2b3a52b4 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -160,18 +160,25 @@ void r420_pipes_init(struct radeon_device *rdev)
u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
r = RREG32(R_0001FC_MC_IND_DATA);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
S_0001F8_MC_IND_WR_EN(1));
WREG32(R_0001FC_MC_IND_DATA, v);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void r420_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index ea4d3734e6d9..2a1b1876b431 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -119,6 +119,11 @@ u32 r600_get_xclk(struct radeon_device *rdev)
return rdev->clock.spll.reference_freq;
}
+int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ return 0;
+}
+
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
{
@@ -1045,20 +1050,27 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
r = RREG32(R_0028FC_MC_DATA);
WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
S_0028F8_MC_IND_WR_EN(1));
WREG32(R_0028FC_MC_DATA, v);
WREG32(R_0028F8_MC_INDEX, 0x7F);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void r600_mc_program(struct radeon_device *rdev)
@@ -2092,20 +2104,27 @@ static void r600_gpu_init(struct radeon_device *rdev)
*/
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX);
r = RREG32(PCIE_PORT_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
return r;
}
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX);
WREG32(PCIE_PORT_DATA, (v));
(void)RREG32(PCIE_PORT_DATA);
+ spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
}
/*
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index fa0de46fcc0d..5513d8f06252 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(limits->entries[i].usVoltage);
+ le16_to_cpu(entry->usVoltage);
entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
}
@@ -1219,30 +1219,20 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
void r600_free_extended_power_table(struct radeon_device *rdev)
{
- if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries)
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
- if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries)
- kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
- if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries)
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
- if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries)
- kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries);
- if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries)
- kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
- if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
- kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries);
- if (rdev->pm.dpm.dyn_state.ppm_table)
- kfree(rdev->pm.dpm.dyn_state.ppm_table);
- if (rdev->pm.dpm.dyn_state.cac_tdp_table)
- kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
- if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
- kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
- if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
- kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
- if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
- kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
- if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
- kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
+ struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
+
+ kfree(dyn_state->vddc_dependency_on_sclk.entries);
+ kfree(dyn_state->vddci_dependency_on_mclk.entries);
+ kfree(dyn_state->vddc_dependency_on_mclk.entries);
+ kfree(dyn_state->mvdd_dependency_on_mclk.entries);
+ kfree(dyn_state->cac_leakage_table.entries);
+ kfree(dyn_state->phase_shedding_limits_table.entries);
+ kfree(dyn_state->ppm_table);
+ kfree(dyn_state->cac_tdp_table);
+ kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
}
enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f443010ce90b..b0fa6002af3e 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -257,10 +257,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
- if (ASIC_IS_DCE3(rdev)) {
- /* according to the reg specs, this should DCE3.2 only, but in
- * practice it seems to cover DCE3.0 as well.
- */
+ if (ASIC_IS_DCE32(rdev)) {
if (dig->dig_encoder == 0) {
dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
@@ -276,8 +273,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
}
+ } else if (ASIC_IS_DCE3(rdev)) {
+ /* according to the reg specs, this should DCE3.2 only, but in
+ * practice it seems to cover DCE3.0/3.1 as well.
+ */
+ if (dig->dig_encoder == 0) {
+ WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ } else {
+ WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+ }
} else {
- /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
+ /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
AUDIO_DTO_MODULE(clock / 10));
}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 454f90a849e4..e673fe26ea84 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1040,7 +1040,7 @@
# define HDMI0_AVI_INFO_CONT (1 << 1)
# define HDMI0_AUDIO_INFO_SEND (1 << 4)
# define HDMI0_AUDIO_INFO_CONT (1 << 5)
-# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
# define HDMI0_MPEG_INFO_SEND (1 << 8)
# define HDMI0_MPEG_INFO_CONT (1 << 9)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ff8b564ce2b2..a400ac1c4147 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -181,7 +181,7 @@ extern int radeon_aspm;
#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
/* PG flags */
-#define RADEON_PG_SUPPORT_GFX_CG (1 << 0)
+#define RADEON_PG_SUPPORT_GFX_PG (1 << 0)
#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
#define RADEON_PG_SUPPORT_UVD (1 << 3)
@@ -1778,6 +1778,7 @@ struct radeon_asic {
int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
bool (*vblank_too_short)(struct radeon_device *rdev);
void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
+ void (*enable_bapm)(struct radeon_device *rdev, bool enable);
} dpm;
/* pageflipping */
struct {
@@ -2110,6 +2111,28 @@ struct radeon_device {
resource_size_t rmmio_size;
/* protects concurrent MM_INDEX/DATA based register access */
spinlock_t mmio_idx_lock;
+ /* protects concurrent SMC based register access */
+ spinlock_t smc_idx_lock;
+ /* protects concurrent PLL register access */
+ spinlock_t pll_idx_lock;
+ /* protects concurrent MC register access */
+ spinlock_t mc_idx_lock;
+ /* protects concurrent PCIE register access */
+ spinlock_t pcie_idx_lock;
+ /* protects concurrent PCIE_PORT register access */
+ spinlock_t pciep_idx_lock;
+ /* protects concurrent PIF register access */
+ spinlock_t pif_idx_lock;
+ /* protects concurrent CG register access */
+ spinlock_t cg_idx_lock;
+ /* protects concurrent UVD register access */
+ spinlock_t uvd_idx_lock;
+ /* protects concurrent RCU register access */
+ spinlock_t rcu_idx_lock;
+ /* protects concurrent DIDT register access */
+ spinlock_t didt_idx_lock;
+ /* protects concurrent ENDPOINT (audio) register access */
+ spinlock_t end_idx_lock;
void __iomem *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
@@ -2277,123 +2300,179 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
*/
static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
r = RREG32(RADEON_PCIE_DATA);
+ spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
return r;
}
static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
WREG32(RADEON_PCIE_DATA, (v));
+ spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
}
static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(TN_SMC_IND_INDEX_0, (reg));
r = RREG32(TN_SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return r;
}
static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(TN_SMC_IND_INDEX_0, (reg));
WREG32(TN_SMC_IND_DATA_0, (v));
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
}
static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
r = RREG32(R600_RCU_DATA);
+ spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
return r;
}
static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
WREG32(R600_RCU_DATA, (v));
+ spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
}
static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->cg_idx_lock, flags);
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
r = RREG32(EVERGREEN_CG_IND_DATA);
+ spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
return r;
}
static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->cg_idx_lock, flags);
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
WREG32(EVERGREEN_CG_IND_DATA, (v));
+ spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
}
static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
r = RREG32(EVERGREEN_PIF_PHY0_DATA);
+ spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
return r;
}
static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
+ spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
}
static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
r = RREG32(EVERGREEN_PIF_PHY1_DATA);
+ spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
return r;
}
static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
+ spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
}
static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
r = RREG32(R600_UVD_CTX_DATA);
+ spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
return r;
}
static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
WREG32(R600_UVD_CTX_DATA, (v));
+ spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
}
static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
{
+ unsigned long flags;
u32 r;
+ spin_lock_irqsave(&rdev->didt_idx_lock, flags);
WREG32(CIK_DIDT_IND_INDEX, (reg));
r = RREG32(CIK_DIDT_IND_DATA);
+ spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
return r;
}
static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->didt_idx_lock, flags);
WREG32(CIK_DIDT_IND_INDEX, (reg));
WREG32(CIK_DIDT_IND_DATA, (v));
+ spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
}
void r100_pll_errata_after_index(struct radeon_device *rdev);
@@ -2569,6 +2648,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
+#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
/* Common functions */
/* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 630853b96841..8f7e04538fd6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
@@ -1037,6 +1039,7 @@ static struct radeon_asic rv6xx_asic = {
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.get_temperature = &rv6xx_get_temp,
+ .set_uvd_clocks = &r600_set_uvd_clocks,
},
.dpm = {
.init = &rv6xx_dpm_init,
@@ -1126,6 +1129,7 @@ static struct radeon_asic rs780_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.get_temperature = &rv6xx_get_temp,
+ .set_uvd_clocks = &r600_set_uvd_clocks,
},
.dpm = {
.init = &rs780_dpm_init,
@@ -1141,6 +1145,7 @@ static struct radeon_asic rs780_asic = {
.get_mclk = &rs780_dpm_get_mclk,
.print_power_state = &rs780_dpm_print_power_state,
.debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &rs780_dpm_force_performance_level,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
@@ -1791,6 +1796,7 @@ static struct radeon_asic trinity_asic = {
.print_power_state = &trinity_dpm_print_power_state,
.debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
.force_performance_level = &trinity_dpm_force_performance_level,
+ .enable_bapm = &trinity_dpm_enable_bapm,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -2166,6 +2172,7 @@ static struct radeon_asic kv_asic = {
.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
.force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd,
+ .enable_bapm = &kv_dpm_enable_bapm,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -2390,7 +2397,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0 |
- /*RADEON_PG_SUPPORT_GFX_CG | */
+ /*RADEON_PG_SUPPORT_GFX_PG | */
RADEON_PG_SUPPORT_SDMA;
break;
case CHIP_OLAND:
@@ -2479,7 +2486,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
- /*RADEON_PG_SUPPORT_GFX_CG |
+ /*RADEON_PG_SUPPORT_GFX_PG |
RADEON_PG_SUPPORT_GFX_SMG |
RADEON_PG_SUPPORT_GFX_DMG |
RADEON_PG_SUPPORT_UVD |
@@ -2507,7 +2514,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
- /*RADEON_PG_SUPPORT_GFX_CG |
+ /*RADEON_PG_SUPPORT_GFX_PG |
RADEON_PG_SUPPORT_GFX_SMG |
RADEON_PG_SUPPORT_UVD |
RADEON_PG_SUPPORT_VCE |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 818bbe6b884b..70c29d5e080d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -389,6 +389,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
u32 r600_get_xclk(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
int rv6xx_get_temp(struct radeon_device *rdev);
+int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
void r600_dpm_post_set_power_state(struct radeon_device *rdev);
/* r600 dma */
@@ -428,6 +429,8 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
+int rs780_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level);
/*
* rv770,rv730,rv710,rv740
@@ -625,6 +628,7 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r
struct seq_file *m);
int trinity_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
+void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -781,6 +785,7 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
int kv_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
/* uvd v1.0 */
uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 404e25d285ba..f79ee184ffd5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
uint16_t data_offset, size;
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
+ struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
uint8_t frev, crev;
int i, num_indices;
@@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
-
+ ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+ ((u8 *)&ss_info->asSS_Info[0]);
for (i = 0; i < num_indices; i++) {
- if (ss_info->asSS_Info[i].ucSS_Id == id) {
+ if (ss_assign->ucSS_Id == id) {
ss->percentage =
- le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
- ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
- ss->step = ss_info->asSS_Info[i].ucSS_Step;
- ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
- ss->range = ss_info->asSS_Info[i].ucSS_Range;
- ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+ le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
+ ss->type = ss_assign->ucSpreadSpectrumType;
+ ss->step = ss_assign->ucSS_Step;
+ ss->delay = ss_assign->ucSS_Delay;
+ ss->range = ss_assign->ucSS_Range;
+ ss->refdiv = ss_assign->ucRecommendedRef_Div;
return true;
}
+ ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+ ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
}
}
return false;
@@ -1477,6 +1481,12 @@ union asic_ss_info {
struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
};
+union asic_ss_assignment {
+ struct _ATOM_ASIC_SS_ASSIGNMENT v1;
+ struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
+ struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
+};
+
bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock)
@@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
uint16_t data_offset, size;
union asic_ss_info *ss_info;
+ union asic_ss_assignment *ss_assign;
uint8_t frev, crev;
int i, num_indices;
@@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT);
+ ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
for (i = 0; i < num_indices; i++) {
- if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
- (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
+ if ((ss_assign->v1.ucClockIndication == id) &&
+ (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
ss->percentage =
- le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
- ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
- ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
+ le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
+ ss->type = ss_assign->v1.ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
return true;
}
+ ss_assign = (union asic_ss_assignment *)
+ ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
}
break;
case 2:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+ ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
for (i = 0; i < num_indices; i++) {
- if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
- (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
+ if ((ss_assign->v2.ucClockIndication == id) &&
+ (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
ss->percentage =
- le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
- ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
- ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
+ ss->type = ss_assign->v2.ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
if ((crev == 2) &&
((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS)))
ss->rate /= 100;
return true;
}
+ ss_assign = (union asic_ss_assignment *)
+ ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
}
break;
case 3:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+ ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
for (i = 0; i < num_indices; i++) {
- if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
- (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
+ if ((ss_assign->v3.ucClockIndication == id) &&
+ (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
ss->percentage =
- le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
- ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
- ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
+ ss->type = ss_assign->v3.ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
if ((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS))
ss->rate /= 100;
@@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
return true;
}
+ ss_assign = (union asic_ss_assignment *)
+ ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
}
break;
default:
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2399f25ec037..79159b5da05b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -396,6 +396,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
}
}
+ if (property == rdev->mode_info.audio_property) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_connector->audio != val) {
+ radeon_connector->audio = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
if (property == rdev->mode_info.underscan_property) {
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -1420,7 +1435,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
- /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
+ /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
if (radeon_ddc_probe(radeon_connector, false))
ret = connector_status_connected;
}
@@ -1489,6 +1504,24 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.force = radeon_dvi_force,
};
+static const struct drm_connector_funcs radeon_edp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_lvds_set_property,
+ .destroy = radeon_dp_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
+static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_lvds_set_property,
+ .destroy = radeon_dp_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
void
radeon_add_atom_connector(struct drm_device *dev,
uint32_t connector_id,
@@ -1580,8 +1613,6 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
- drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1598,6 +1629,10 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
default:
+ drm_connector_init(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base,
+ &radeon_dp_connector_helper_funcs);
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
radeon_connector->dac_load_detect = true;
@@ -1610,6 +1645,10 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
+ drm_connector_init(dev, &radeon_connector->base,
+ &radeon_dp_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base,
+ &radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
@@ -1619,6 +1658,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_DISABLE);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1634,6 +1676,10 @@ radeon_add_atom_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
+ drm_connector_init(dev, &radeon_connector->base,
+ &radeon_lvds_bridge_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base,
+ &radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -1708,6 +1754,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
+ if (ASIC_IS_DCE2(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_DISABLE);
+ }
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
@@ -1748,6 +1799,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
+ if (ASIC_IS_DCE2(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_DISABLE);
+ }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1787,6 +1843,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
+ if (ASIC_IS_DCE2(rdev)) {
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.audio_property,
+ RADEON_AUDIO_DISABLE);
+ }
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
@@ -1797,7 +1858,7 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+ drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a56084410372..66c222836631 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -28,6 +28,7 @@
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_trace.h"
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
@@ -80,10 +81,13 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].lobj.bo = p->relocs[i].robj;
p->relocs[i].lobj.written = !!r->write_domain;
- /* the first reloc of an UVD job is the
- msg and that must be in VRAM */
- if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) {
- /* TODO: is this still needed for NI+ ? */
+ /* the first reloc of an UVD job is the msg and that must be in
+ VRAM, also but everything into VRAM on AGP cards to avoid
+ image corruptions */
+ if (p->ring == R600_RING_TYPE_UVD_INDEX &&
+ p->rdev->family < CHIP_PALM &&
+ (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
+
p->relocs[i].lobj.domain =
RADEON_GEM_DOMAIN_VRAM;
@@ -559,6 +563,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return r;
}
+ trace_radeon_cs(&parser);
+
r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 16cb8792b1e6..841d0e09be3e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1249,6 +1249,17 @@ int radeon_device_init(struct radeon_device *rdev,
/* Registers mapping */
/* TODO: block userspace mapping of io register */
spin_lock_init(&rdev->mmio_idx_lock);
+ spin_lock_init(&rdev->smc_idx_lock);
+ spin_lock_init(&rdev->pll_idx_lock);
+ spin_lock_init(&rdev->mc_idx_lock);
+ spin_lock_init(&rdev->pcie_idx_lock);
+ spin_lock_init(&rdev->pciep_idx_lock);
+ spin_lock_init(&rdev->pif_idx_lock);
+ spin_lock_init(&rdev->cg_idx_lock);
+ spin_lock_init(&rdev->uvd_idx_lock);
+ spin_lock_init(&rdev->rcu_idx_lock);
+ spin_lock_init(&rdev->didt_idx_lock);
+ spin_lock_init(&rdev->end_idx_lock);
if (rdev->family >= CHIP_BONAIRE) {
rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
@@ -1309,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev,
return r;
}
if ((radeon_testing & 1)) {
- radeon_test_moves(rdev);
+ if (rdev->accel_working)
+ radeon_test_moves(rdev);
+ else
+ DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
}
if ((radeon_testing & 2)) {
- radeon_test_syncing(rdev);
+ if (rdev->accel_working)
+ radeon_test_syncing(rdev);
+ else
+ DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
}
if (radeon_benchmarking) {
- radeon_benchmark(rdev, radeon_benchmarking);
+ if (rdev->accel_working)
+ radeon_benchmark(rdev, radeon_benchmarking);
+ else
+ DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b055bddaa94c..0d1aa050d41d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1172,6 +1172,12 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
{ UNDERSCAN_AUTO, "auto" },
};
+static struct drm_prop_enum_list radeon_audio_enum_list[] =
+{ { RADEON_AUDIO_DISABLE, "off" },
+ { RADEON_AUDIO_ENABLE, "on" },
+ { RADEON_AUDIO_AUTO, "auto" },
+};
+
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int sz;
@@ -1222,6 +1228,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM;
+ sz = ARRAY_SIZE(radeon_audio_enum_list);
+ rdev->mode_info.audio_property =
+ drm_property_create_enum(rdev->ddev, 0,
+ "audio",
+ radeon_audio_enum_list, sz);
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb4445f55a96..cdd12dcd988b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -153,7 +153,7 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
-int radeon_audio = 0;
+int radeon_audio = 1;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = -1;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d908d8d68f6b..ef63d3f00b2f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -247,6 +247,8 @@ struct radeon_mode_info {
struct drm_property *underscan_property;
struct drm_property *underscan_hborder_property;
struct drm_property *underscan_vborder_property;
+ /* audio */
+ struct drm_property *audio_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
@@ -471,6 +473,12 @@ struct radeon_router {
u8 cd_mux_state;
};
+enum radeon_connector_audio {
+ RADEON_AUDIO_DISABLE = 0,
+ RADEON_AUDIO_ENABLE = 1,
+ RADEON_AUDIO_AUTO = 2
+};
+
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -489,6 +497,7 @@ struct radeon_connector {
struct radeon_hpd hpd;
struct radeon_router router;
struct radeon_i2c_chan *router_bus;
+ enum radeon_connector_audio audio;
};
struct radeon_framebuffer {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d7555369a3e5..ac07ad1d4f8c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -67,7 +67,16 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
{
- if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ mutex_lock(&rdev->pm.mutex);
+ if (power_supply_is_system_supplied() > 0)
+ rdev->pm.dpm.ac_power = true;
+ else
+ rdev->pm.dpm.ac_power = false;
+ if (rdev->asic->dpm.enable_bapm)
+ radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+ mutex_unlock(&rdev->pm.mutex);
+ } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
radeon_pm_update_profile(rdev);
@@ -333,7 +342,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
int cp = rdev->pm.profile;
@@ -349,7 +358,7 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
mutex_lock(&rdev->pm.mutex);
@@ -383,7 +392,7 @@ static ssize_t radeon_get_pm_method(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
int pm = rdev->pm.pm_method;
@@ -397,7 +406,7 @@ static ssize_t radeon_set_pm_method(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
/* we don't support the legacy modes with dpm */
@@ -433,7 +442,7 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
@@ -447,7 +456,7 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
mutex_lock(&rdev->pm.mutex);
@@ -472,7 +481,7 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
@@ -486,7 +495,7 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level;
int ret = 0;
@@ -524,7 +533,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
int temp;
@@ -536,6 +545,23 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
+static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct radeon_device *rdev = ddev->dev_private;
+ int hyst = to_sensor_dev_attr(attr)->index;
+ int temp;
+
+ if (hyst)
+ temp = rdev->pm.dpm.thermal.min_temp;
+ else
+ temp = rdev->pm.dpm.thermal.max_temp;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
static ssize_t radeon_hwmon_show_name(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -544,16 +570,37 @@ static ssize_t radeon_hwmon_show_name(struct device *dev,
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
NULL
};
+static umode_t hwmon_attributes_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct radeon_device *rdev = ddev->dev_private;
+
+ /* Skip limit attributes if DPM is not enabled */
+ if (rdev->pm.pm_method != PM_METHOD_DPM &&
+ (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes,
+ .is_visible = hwmon_attributes_visible,
};
static int radeon_hwmon_init(struct radeon_device *rdev)
@@ -870,10 +917,13 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
radeon_dpm_post_set_power_state(rdev);
- /* force low perf level for thermal */
- if (rdev->pm.dpm.thermal_active &&
- rdev->asic->dpm.force_performance_level) {
- radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
+ if (rdev->asic->dpm.force_performance_level) {
+ if (rdev->pm.dpm.thermal_active)
+ /* force low perf level for thermal */
+ radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
+ else
+ /* otherwise, enable auto */
+ radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
}
done:
@@ -952,7 +1002,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
{
/* set up the default clocks if the MC ucode is loaded */
if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_HAINAN) &&
+ (rdev->family <= CHIP_CAYMAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -996,7 +1046,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
if (ret) {
DRM_ERROR("radeon: dpm resume failed\n");
if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_HAINAN) &&
+ (rdev->family <= CHIP_CAYMAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1047,7 +1097,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
radeon_pm_init_profile(rdev);
/* set up the default clocks if the MC ucode is loaded */
if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_HAINAN) &&
+ (rdev->family <= CHIP_CAYMAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1102,9 +1152,10 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
{
int ret;
- /* default to performance state */
+ /* default to balanced state */
rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+ rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
rdev->pm.default_sclk = rdev->clock.default_sclk;
rdev->pm.default_mclk = rdev->clock.default_mclk;
rdev->pm.current_sclk = rdev->clock.default_sclk;
@@ -1132,7 +1183,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
if (ret) {
rdev->pm.dpm_enabled = false;
if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_HAINAN) &&
+ (rdev->family <= CHIP_CAYMAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 46a25f037b84..18254e1c3e71 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
* packet that is the root issue
*/
i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
- for (j = 0; j <= (count + 32); j++) {
- seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
- i = (i + 1) & ring->ptr_mask;
+ if (ring->ready) {
+ for (j = 0; j <= (count + 32); j++) {
+ seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
+ i = (i + 1) & ring->ptr_mask;
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index eafd8160a155..f7e367815964 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -27,6 +27,26 @@ TRACE_EVENT(radeon_bo_create,
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
+TRACE_EVENT(radeon_cs,
+ TP_PROTO(struct radeon_cs_parser *p),
+ TP_ARGS(p),
+ TP_STRUCT__entry(
+ __field(u32, ring)
+ __field(u32, dw)
+ __field(u32, fences)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = p->ring;
+ __entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
+ __entry->fences = radeon_fence_count_emitted(
+ p->rdev, p->ring);
+ ),
+ TP_printk("ring=%u, dw=%u, fences=%u",
+ __entry->ring, __entry->dw,
+ __entry->fences)
+);
+
DECLARE_EVENT_CLASS(radeon_fence_request,
TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -53,13 +73,6 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
TP_ARGS(dev, seqno)
);
-DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno)
-);
-
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
TP_PROTO(struct drm_device *dev, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1a01bbff9bfa..a0f11856ddde 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -476,8 +476,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
- /* TODO: is this still necessary on NI+ ? */
- if ((cmd == 0 || cmd == 0x3) &&
+ if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) &&
(start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
start, end);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b8074a8ec75a..9566b5940a5a 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -274,19 +274,26 @@ static void rs400_mc_init(struct radeon_device *rdev)
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(RS480_NB_MC_INDEX, reg & 0xff);
r = RREG32(RS480_NB_MC_DATA);
WREG32(RS480_NB_MC_INDEX, 0xff);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
WREG32(RS480_NB_MC_DATA, (v));
WREG32(RS480_NB_MC_INDEX, 0xff);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 670b555d2ca2..6acba8017b9a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -847,16 +847,26 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1));
- return RREG32(R_000074_MC_IND_DATA);
+ r = RREG32(R_000074_MC_IND_DATA);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+ return r;
}
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
WREG32(R_000074_MC_IND_DATA, v);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void rs600_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index d8ddfb34545d..1447d794c22a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -631,20 +631,27 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
r = RREG32(R_00007C_MC_DATA);
WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
S_000078_MC_IND_WR_EN(1));
WREG32(R_00007C_MC_DATA, v);
WREG32(R_000078_MC_INDEX, 0x7F);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void rs690_mc_program(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index d1a1ce73bd45..6af8505cf4d2 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -62,9 +62,7 @@ static void rs780_get_pm_mode_parameters(struct radeon_device *rdev)
radeon_crtc = to_radeon_crtc(crtc);
pi->crtc_id = radeon_crtc->crtc_id;
if (crtc->mode.htotal && crtc->mode.vtotal)
- pi->refresh_rate =
- (crtc->mode.clock * 1000) /
- (crtc->mode.htotal * crtc->mode.vtotal);
+ pi->refresh_rate = drm_mode_vrefresh(&crtc->mode);
break;
}
}
@@ -376,9 +374,8 @@ static void rs780_disable_vbios_powersaving(struct radeon_device *rdev)
WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000);
}
-static void rs780_force_voltage_to_high(struct radeon_device *rdev)
+static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage)
{
- struct igp_power_info *pi = rs780_get_pi(rdev);
struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
@@ -390,7 +387,7 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
udelay(1);
WREG32_P(FVTHROT_PWM_CTRL_REG0,
- STARTING_PWM_HIGHTIME(pi->max_voltage),
+ STARTING_PWM_HIGHTIME(voltage),
~STARTING_PWM_HIGHTIME_MASK);
WREG32_P(FVTHROT_PWM_CTRL_REG0,
@@ -404,6 +401,26 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
}
+static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div)
+{
+ struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
+
+ if (current_state->sclk_low == current_state->sclk_high)
+ return;
+
+ WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
+
+ WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div),
+ ~FORCED_FEEDBACK_DIV_MASK);
+ WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div),
+ ~STARTING_FEEDBACK_DIV_MASK);
+ WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
+
+ udelay(100);
+
+ WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
+}
+
static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
struct radeon_ps *new_ps,
struct radeon_ps *old_ps)
@@ -432,17 +449,13 @@ static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
if (ret)
return ret;
- WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
-
- WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div),
- ~FORCED_FEEDBACK_DIV_MASK);
- WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div),
- ~STARTING_FEEDBACK_DIV_MASK);
- WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
-
- udelay(100);
+ if ((min_dividers.ref_div != max_dividers.ref_div) ||
+ (min_dividers.post_div != max_dividers.post_div) ||
+ (max_dividers.ref_div != current_max_dividers.ref_div) ||
+ (max_dividers.post_div != current_max_dividers.post_div))
+ return -EINVAL;
- WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
+ rs780_force_fbdiv(rdev, max_dividers.fb_div);
if (max_dividers.fb_div > min_dividers.fb_div) {
WREG32_P(FVTHROT_FBDIV_REG0,
@@ -486,6 +499,9 @@ static void rs780_activate_engine_clk_scaling(struct radeon_device *rdev,
(new_state->sclk_low == old_state->sclk_low))
return;
+ if (new_state->sclk_high == new_state->sclk_low)
+ return;
+
rs780_clk_scaling_enable(rdev, true);
}
@@ -649,7 +665,7 @@ int rs780_dpm_set_power_state(struct radeon_device *rdev)
rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
if (pi->voltage_control) {
- rs780_force_voltage_to_high(rdev);
+ rs780_force_voltage(rdev, pi->max_voltage);
mdelay(5);
}
@@ -717,14 +733,18 @@ static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev,
if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else if (r600_is_uvd_state(rps->class, rps->class2)) {
- rps->vclk = RS780_DEFAULT_VCLK_FREQ;
- rps->dclk = RS780_DEFAULT_DCLK_FREQ;
} else {
rps->vclk = 0;
rps->dclk = 0;
}
+ if (r600_is_uvd_state(rps->class, rps->class2)) {
+ if ((rps->vclk == 0) || (rps->dclk == 0)) {
+ rps->vclk = RS780_DEFAULT_VCLK_FREQ;
+ rps->dclk = RS780_DEFAULT_DCLK_FREQ;
+ }
+ }
+
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
rdev->pm.dpm.boot_ps = rps;
if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
@@ -986,3 +1006,55 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n",
ps->sclk_high, ps->max_voltage);
}
+
+int rs780_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level)
+{
+ struct igp_power_info *pi = rs780_get_pi(rdev);
+ struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct igp_ps *ps = rs780_get_ps(rps);
+ struct atom_clock_dividers dividers;
+ int ret;
+
+ rs780_clk_scaling_enable(rdev, false);
+ rs780_voltage_scaling_enable(rdev, false);
+
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ if (pi->voltage_control)
+ rs780_force_voltage(rdev, pi->max_voltage);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ ps->sclk_high, false, &dividers);
+ if (ret)
+ return ret;
+
+ rs780_force_fbdiv(rdev, dividers.fb_div);
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ ps->sclk_low, false, &dividers);
+ if (ret)
+ return ret;
+
+ rs780_force_fbdiv(rdev, dividers.fb_div);
+
+ if (pi->voltage_control)
+ rs780_force_voltage(rdev, pi->min_voltage);
+ } else {
+ if (pi->voltage_control)
+ rs780_force_voltage(rdev, pi->max_voltage);
+
+ if (ps->sclk_high != ps->sclk_low) {
+ WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV);
+ rs780_clk_scaling_enable(rdev, true);
+ }
+
+ if (pi->voltage_control) {
+ rs780_voltage_scaling_enable(rdev, true);
+ rs780_enable_voltage_scaling(rdev, rps);
+ }
+ }
+
+ rdev->pm.dpm.forced_level = level;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 8ea1573ae820..873eb4b193b4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -209,19 +209,27 @@ static void rv515_mc_init(struct radeon_device *rdev)
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
+ unsigned long flags;
uint32_t r;
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
r = RREG32(MC_IND_DATA);
WREG32(MC_IND_INDEX, 0);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
+
return r;
}
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
WREG32(MC_IND_DATA, (v));
WREG32(MC_IND_INDEX, 0);
+ spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index ab1f2016f21e..5811d277a36a 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1758,8 +1758,6 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 8cbb85dae5aa..913b025ae9b3 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2064,12 +2064,6 @@ int rv770_dpm_set_power_state(struct radeon_device *rdev)
rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
- ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("rv770_dpm_force_performance_level failed\n");
- return ret;
- }
-
return 0;
}
@@ -2147,14 +2141,18 @@ static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else if (r600_is_uvd_state(rps->class, rps->class2)) {
- rps->vclk = RV770_DEFAULT_VCLK_FREQ;
- rps->dclk = RV770_DEFAULT_DCLK_FREQ;
} else {
rps->vclk = 0;
rps->dclk = 0;
}
+ if (r600_is_uvd_state(rps->class, rps->class2)) {
+ if ((rps->vclk == 0) || (rps->dclk == 0)) {
+ rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+ rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+ }
+ }
+
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
rdev->pm.dpm.boot_ps = rps;
if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
index ab95da570215..b2a224407365 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.c
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -274,8 +274,8 @@ static const u8 cayman_smc_int_vectors[] =
0x08, 0x72, 0x08, 0x72
};
-int rv770_set_smc_sram_address(struct radeon_device *rdev,
- u16 smc_address, u16 limit)
+static int rv770_set_smc_sram_address(struct radeon_device *rdev,
+ u16 smc_address, u16 limit)
{
u32 addr;
@@ -296,9 +296,10 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
u16 smc_start_address, const u8 *src,
u16 byte_count, u16 limit)
{
+ unsigned long flags;
u32 data, original_data, extra_shift;
u16 addr;
- int ret;
+ int ret = 0;
if (smc_start_address & 3)
return -EINVAL;
@@ -307,13 +308,14 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
addr = smc_start_address;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = rv770_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_SRAM_DATA, data);
@@ -328,7 +330,7 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
ret = rv770_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
original_data = RREG32(SMC_SRAM_DATA);
@@ -346,12 +348,15 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
ret = rv770_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_SRAM_DATA, data);
}
- return 0;
+done:
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+ return ret;
}
static int rv770_program_interrupt_vectors(struct radeon_device *rdev,
@@ -461,12 +466,15 @@ PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev)
static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit)
{
+ unsigned long flags;
u16 i;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
for (i = 0; i < limit; i += 4) {
rv770_set_smc_sram_address(rdev, i, limit);
WREG32(SMC_SRAM_DATA, 0);
}
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
}
int rv770_load_smc_ucode(struct radeon_device *rdev,
@@ -595,27 +603,29 @@ int rv770_load_smc_ucode(struct radeon_device *rdev,
int rv770_read_smc_sram_dword(struct radeon_device *rdev,
u16 smc_address, u32 *value, u16 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
-
- *value = RREG32(SMC_SRAM_DATA);
+ if (ret == 0)
+ *value = RREG32(SMC_SRAM_DATA);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- return 0;
+ return ret;
}
int rv770_write_smc_sram_dword(struct radeon_device *rdev,
u16 smc_address, u32 value, u16 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ WREG32(SMC_SRAM_DATA, value);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- WREG32(SMC_SRAM_DATA, value);
-
- return 0;
+ return ret;
}
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h
index f78d92a4b325..3b2c963c4880 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.h
+++ b/drivers/gpu/drm/radeon/rv770_smc.h
@@ -187,8 +187,6 @@ typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
#define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C
#define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0
-int rv770_set_smc_sram_address(struct radeon_device *rdev,
- u16 smc_address, u16 limit);
int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
u16 smc_start_address, const u8 *src,
u16 byte_count, u16 limit);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9fe60e542922..1ae277152cc7 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -852,7 +852,7 @@
#define AFMT_VBI_PACKET_CONTROL 0x7608
# define AFMT_GENERIC0_UPDATE (1 << 2)
#define AFMT_INFOFRAME_CONTROL0 0x760c
-# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7610
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 3e23b757dcfa..c354c1094967 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -83,6 +83,8 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
+static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
+ bool enable);
static const u32 verde_rlc_save_restore_register_list[] =
{
@@ -3386,6 +3388,8 @@ static int si_cp_resume(struct radeon_device *rdev)
u32 rb_bufsz;
int r;
+ si_enable_gui_idle_interrupt(rdev, false);
+
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
@@ -3501,6 +3505,8 @@ static int si_cp_resume(struct radeon_device *rdev)
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
}
+ si_enable_gui_idle_interrupt(rdev, true);
+
return 0;
}
@@ -4888,7 +4894,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev,
{
u32 tmp;
- if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
WREG32(RLC_TTOP_D, tmp);
@@ -5250,6 +5256,7 @@ void si_update_cg(struct radeon_device *rdev,
u32 block, bool enable)
{
if (block & RADEON_CG_BLOCK_GFX) {
+ si_enable_gui_idle_interrupt(rdev, false);
/* order matters! */
if (enable) {
si_enable_mgcg(rdev, true);
@@ -5258,6 +5265,7 @@ void si_update_cg(struct radeon_device *rdev,
si_enable_cgcg(rdev, false);
si_enable_mgcg(rdev, false);
}
+ si_enable_gui_idle_interrupt(rdev, true);
}
if (block & RADEON_CG_BLOCK_MC) {
@@ -5408,7 +5416,7 @@ static void si_init_pg(struct radeon_device *rdev)
si_init_dma_pg(rdev);
}
si_init_ao_cu_mask(rdev);
- if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
si_init_gfx_cgpg(rdev);
}
si_enable_dma_pg(rdev, true);
@@ -5560,7 +5568,9 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
- WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ tmp = RREG32(CP_INT_CNTL_RING0) &
+ (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ WREG32(CP_INT_CNTL_RING0, tmp);
WREG32(CP_INT_CNTL_RING1, 0);
WREG32(CP_INT_CNTL_RING2, 0);
tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -5685,7 +5695,7 @@ static int si_irq_init(struct radeon_device *rdev)
int si_irq_set(struct radeon_device *rdev)
{
- u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 cp_int_cntl;
u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
@@ -5706,6 +5716,9 @@ int si_irq_set(struct radeon_device *rdev)
return 0;
}
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
+ (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+
if (!ASIC_IS_NODCE(rdev)) {
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 5be9b4e72350..9ace28702c76 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
bool disable_sclk_switching = false;
u32 mclk, sclk;
u16 vddc, vddci;
+ u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
+ /* limit clocks to max supported clocks based on voltage dependency tables */
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ &max_sclk_vddc);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &max_mclk_vddci);
+ btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &max_mclk_vddc);
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (max_sclk_vddc) {
+ if (ps->performance_levels[i].sclk > max_sclk_vddc)
+ ps->performance_levels[i].sclk = max_sclk_vddc;
+ }
+ if (max_mclk_vddci) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddci)
+ ps->performance_levels[i].mclk = max_mclk_vddci;
+ }
+ if (max_mclk_vddc) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddc)
+ ps->performance_levels[i].mclk = max_mclk_vddc;
+ }
+ }
+
/* XXX validate the min clocks required for display */
if (disable_mclk_switching) {
@@ -6075,12 +6099,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
- ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
- if (ret) {
- DRM_ERROR("si_dpm_force_performance_level failed\n");
- return ret;
- }
-
si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
RADEON_CG_BLOCK_MC |
RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index 5f524c0a541e..d422a1cbf727 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -29,8 +29,8 @@
#include "ppsmc.h"
#include "radeon_ucode.h"
-int si_set_smc_sram_address(struct radeon_device *rdev,
- u32 smc_address, u32 limit)
+static int si_set_smc_sram_address(struct radeon_device *rdev,
+ u32 smc_address, u32 limit)
{
if (smc_address & 3)
return -EINVAL;
@@ -47,7 +47,8 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
u32 smc_start_address,
const u8 *src, u32 byte_count, u32 limit)
{
- int ret;
+ unsigned long flags;
+ int ret = 0;
u32 data, original_data, addr, extra_shift;
if (smc_start_address & 3)
@@ -57,13 +58,14 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
addr = smc_start_address;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
while (byte_count >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
ret = si_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_IND_DATA_0, data);
@@ -78,7 +80,7 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
ret = si_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
original_data = RREG32(SMC_IND_DATA_0);
@@ -96,11 +98,15 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
ret = si_set_smc_sram_address(rdev, addr, limit);
if (ret)
- return ret;
+ goto done;
WREG32(SMC_IND_DATA_0, data);
}
- return 0;
+
+done:
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
+
+ return ret;
}
void si_start_smc(struct radeon_device *rdev)
@@ -203,6 +209,7 @@ PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev)
int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
{
+ unsigned long flags;
u32 ucode_start_address;
u32 ucode_size;
const u8 *src;
@@ -241,6 +248,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
return -EINVAL;
src = (const u8 *)rdev->smc_fw->data;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
while (ucode_size >= 4) {
@@ -253,6 +261,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
ucode_size -= 4;
}
WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return 0;
}
@@ -260,25 +269,29 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
u32 *value, u32 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ *value = RREG32(SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- *value = RREG32(SMC_IND_DATA_0);
- return 0;
+ return ret;
}
int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
u32 value, u32 limit)
{
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&rdev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(rdev, smc_address, limit);
- if (ret)
- return ret;
+ if (ret == 0)
+ WREG32(SMC_IND_DATA_0, value);
+ spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- WREG32(SMC_IND_DATA_0, value);
- return 0;
+ return ret;
}
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 864761c0120e..96ea6db8bf57 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1319,8 +1319,6 @@ int sumo_dpm_set_power_state(struct radeon_device *rdev)
if (pi->enable_dpm)
sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index b07b7b8f1aff..7f998bf1cc9d 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1068,6 +1068,17 @@ static void trinity_update_requested_ps(struct radeon_device *rdev,
pi->requested_rps.ps_priv = &pi->requested_ps;
}
+void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+{
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+
+ if (pi->enable_bapm) {
+ trinity_acquire_mutex(rdev);
+ trinity_dpm_bapm_enable(rdev, enable);
+ trinity_release_mutex(rdev);
+ }
+}
+
int trinity_dpm_enable(struct radeon_device *rdev)
{
struct trinity_power_info *pi = trinity_get_pi(rdev);
@@ -1091,6 +1102,7 @@ int trinity_dpm_enable(struct radeon_device *rdev)
trinity_program_sclk_dpm(rdev);
trinity_start_dpm(rdev);
trinity_wait_for_dpm_enabled(rdev);
+ trinity_dpm_bapm_enable(rdev, false);
trinity_release_mutex(rdev);
if (rdev->irq.installed &&
@@ -1116,6 +1128,7 @@ void trinity_dpm_disable(struct radeon_device *rdev)
trinity_release_mutex(rdev);
return;
}
+ trinity_dpm_bapm_enable(rdev, false);
trinity_disable_clock_power_gating(rdev);
sumo_clear_vc(rdev);
trinity_wait_for_level_0(rdev);
@@ -1212,6 +1225,8 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
trinity_acquire_mutex(rdev);
if (pi->enable_dpm) {
+ if (pi->enable_bapm)
+ trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power);
trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
trinity_enable_power_level_0(rdev);
trinity_force_level_0(rdev);
@@ -1221,7 +1236,6 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
trinity_force_level_0(rdev);
trinity_unforce_levels(rdev);
trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
}
trinity_release_mutex(rdev);
@@ -1854,6 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
+ pi->enable_bapm = true;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
index e82df071f8b3..c261657750ca 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.h
+++ b/drivers/gpu/drm/radeon/trinity_dpm.h
@@ -108,6 +108,7 @@ struct trinity_power_info {
bool enable_auto_thermal_throttling;
bool enable_dpm;
bool enable_sclk_ds;
+ bool enable_bapm;
bool uvd_dpm;
struct radeon_ps current_rps;
struct trinity_ps current_ps;
@@ -118,6 +119,7 @@ struct trinity_power_info {
#define TRINITY_AT_DFLT 30
/* trinity_smc.c */
+int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable);
int trinity_dpm_config(struct radeon_device *rdev, bool enable);
int trinity_uvd_dpm_config(struct radeon_device *rdev);
int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
index a42d89f1830c..9672bcbc7312 100644
--- a/drivers/gpu/drm/radeon/trinity_smc.c
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -56,6 +56,14 @@ static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
return 0;
}
+int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
+ else
+ return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
+}
+
int trinity_dpm_config(struct radeon_device *rdev, bool enable)
{
if (enable)
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 7266805d9786..3100fa9cb52f 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
/* enable VCPU clock */
WREG32(UVD_VCPU_CNTL, 1 << 9);
- /* enable UMC */
- WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+ /* enable UMC and NC0 */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13)));
/* boot up the VCPU */
WREG32(UVD_SOFT_RESET, 0);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 58a5f3261c0b..a868176c258a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -218,7 +218,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
{
struct ttm_object_device *tdev = tfile->tdev;
- struct ttm_base_object *base;
+ struct ttm_base_object *uninitialized_var(base);
struct drm_hash_item *hash;
int ret;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 5e93a52d4f2c..210d50365162 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -170,7 +170,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm_tt_unbind(ttm);
}
- if (likely(ttm->pages != NULL)) {
+ if (ttm->state == tt_unbound) {
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8dbe9d0ae9a7..8bf646183bac 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -97,7 +97,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
switch (ret) {
case -EAGAIN:
- set_need_resched();
case 0:
case -ERESTARTSYS:
return VM_FAULT_NOPAGE;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index ae88a97f976e..b8470b1a10fe 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -94,7 +94,6 @@ EXPORT_SYMBOL_GPL(hid_register_report);
static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
{
struct hid_field *field;
- int i;
if (report->maxfield == HID_MAX_FIELDS) {
hid_err(report->device, "too many fields in report\n");
@@ -113,9 +112,6 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
field->value = (s32 *)(field->usage + usages);
field->report = report;
- for (i = 0; i < usages; i++)
- field->usage[i].usage_index = i;
-
return field;
}
@@ -226,9 +222,9 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
{
struct hid_report *report;
struct hid_field *field;
- int usages;
+ unsigned usages;
unsigned offset;
- int i;
+ unsigned i;
report = hid_register_report(parser->device, report_type, parser->global.report_id);
if (!report) {
@@ -255,7 +251,8 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
if (!parser->local.usage_index) /* Ignore padding fields */
return 0;
- usages = max_t(int, parser->local.usage_index, parser->global.report_count);
+ usages = max_t(unsigned, parser->local.usage_index,
+ parser->global.report_count);
field = hid_register_field(report, usages, parser->global.report_count);
if (!field)
@@ -266,13 +263,14 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
for (i = 0; i < usages; i++) {
- int j = i;
+ unsigned j = i;
/* Duplicate the last usage we parsed if we have excess values */
if (i >= parser->local.usage_index)
j = parser->local.usage_index - 1;
field->usage[i].hid = parser->local.usage[j];
field->usage[i].collection_index =
parser->local.collection_index[j];
+ field->usage[i].usage_index = i;
}
field->maxusage = usages;
@@ -801,6 +799,64 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
}
EXPORT_SYMBOL_GPL(hid_parse_report);
+static const char * const hid_report_names[] = {
+ "HID_INPUT_REPORT",
+ "HID_OUTPUT_REPORT",
+ "HID_FEATURE_REPORT",
+};
+/**
+ * hid_validate_values - validate existing device report's value indexes
+ *
+ * @device: hid device
+ * @type: which report type to examine
+ * @id: which report ID to examine (0 for first)
+ * @field_index: which report field to examine
+ * @report_counts: expected number of values
+ *
+ * Validate the number of values in a given field of a given report, after
+ * parsing.
+ */
+struct hid_report *hid_validate_values(struct hid_device *hid,
+ unsigned int type, unsigned int id,
+ unsigned int field_index,
+ unsigned int report_counts)
+{
+ struct hid_report *report;
+
+ if (type > HID_FEATURE_REPORT) {
+ hid_err(hid, "invalid HID report type %u\n", type);
+ return NULL;
+ }
+
+ if (id >= HID_MAX_IDS) {
+ hid_err(hid, "invalid HID report id %u\n", id);
+ return NULL;
+ }
+
+ /*
+ * Explicitly not using hid_get_report() here since it depends on
+ * ->numbered being checked, which may not always be the case when
+ * drivers go to access report values.
+ */
+ report = hid->report_enum[type].report_id_hash[id];
+ if (!report) {
+ hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
+ return NULL;
+ }
+ if (report->maxfield <= field_index) {
+ hid_err(hid, "not enough fields in %s %u\n",
+ hid_report_names[type], id);
+ return NULL;
+ }
+ if (report->field[field_index]->report_count < report_counts) {
+ hid_err(hid, "not enough values in %s %u field %u\n",
+ hid_report_names[type], id, field_index);
+ return NULL;
+ }
+ return report;
+}
+EXPORT_SYMBOL_GPL(hid_validate_values);
+
/**
* hid_open_report - open a driver-specific device report
*
@@ -1296,7 +1352,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
goto out;
}
- if (hid->claimed != HID_CLAIMED_HIDRAW) {
+ if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
for (a = 0; a < report->maxfield; a++)
hid_input_field(hid, report->field[a], cdata, interrupt);
hdrv = hid->driver;
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b420f4a0fd28..8741d953dcc8 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -485,6 +485,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
if (field->flags & HID_MAIN_ITEM_CONSTANT)
goto ignore;
+ /* Ignore if report count is out of bounds. */
+ if (field->report_count < 1)
+ goto ignore;
+
/* only LED usages are supported in output fields */
if (field->report_type == HID_OUTPUT_REPORT &&
(usage->hid & HID_USAGE_PAGE) != HID_UP_LED) {
@@ -1236,7 +1240,11 @@ static void report_features(struct hid_device *hid)
rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
list_for_each_entry(rep, &rep_enum->report_list, list)
- for (i = 0; i < rep->maxfield; i++)
+ for (i = 0; i < rep->maxfield; i++) {
+ /* Ignore if report count is out of bounds. */
+ if (rep->field[i]->report_count < 1)
+ continue;
+
for (j = 0; j < rep->field[i]->maxusage; j++) {
/* Verify if Battery Strength feature is available */
hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]);
@@ -1245,6 +1253,7 @@ static void report_features(struct hid_device *hid)
drv->feature_mapping(hid, rep->field[i],
rep->field[i]->usage + j);
}
+ }
}
static struct hid_input *hidinput_allocate(struct hid_device *hid)
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
index 07837f5a4eb8..31cf29a6ba17 100644
--- a/drivers/hid/hid-lenovo-tpkbd.c
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -339,7 +339,15 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
struct tpkbd_data_pointer *data_pointer;
size_t name_sz = strlen(dev_name(dev)) + 16;
char *name_mute, *name_micmute;
- int ret;
+ int i, ret;
+
+ /* Validate required reports. */
+ for (i = 0; i < 4; i++) {
+ if (!hid_validate_values(hdev, HID_FEATURE_REPORT, 4, i, 1))
+ return -ENODEV;
+ }
+ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 3, 0, 2))
+ return -ENODEV;
if (sysfs_create_group(&hdev->dev.kobj,
&tpkbd_attr_group_pointer)) {
@@ -406,22 +414,27 @@ static int tpkbd_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "hid_parse failed\n");
- goto err_free;
+ goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hid_hw_start failed\n");
- goto err_free;
+ goto err;
}
uhdev = (struct usbhid_device *) hdev->driver_data;
- if (uhdev->ifnum == 1)
- return tpkbd_probe_tp(hdev);
+ if (uhdev->ifnum == 1) {
+ ret = tpkbd_probe_tp(hdev);
+ if (ret)
+ goto err_hid;
+ }
return 0;
-err_free:
+err_hid:
+ hid_hw_stop(hdev);
+err:
return ret;
}
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index b3cd1507dda2..1a42eaa6ca02 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -64,26 +64,13 @@ int lg2ff_init(struct hid_device *hid)
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
- struct list_head *report_list =
- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
int error;
- if (list_empty(report_list)) {
- hid_err(hid, "no output report found\n");
+ /* Check that the report looks ok */
+ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
+ if (!report)
return -ENODEV;
- }
-
- report = list_entry(report_list->next, struct hid_report, list);
-
- if (report->maxfield < 1) {
- hid_err(hid, "output report is empty\n");
- return -ENODEV;
- }
- if (report->field[0]->report_count < 7) {
- hid_err(hid, "not enough values in the field\n");
- return -ENODEV;
- }
lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
if (!lg2ff)
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
index e52f181f6aa1..8c2da183d3bc 100644
--- a/drivers/hid/hid-lg3ff.c
+++ b/drivers/hid/hid-lg3ff.c
@@ -66,10 +66,11 @@ static int hid_lg3ff_play(struct input_dev *dev, void *data,
int x, y;
/*
- * Maxusage should always be 63 (maximum fields)
- * likely a better way to ensure this data is clean
+ * Available values in the field should always be 63, but we only use up to
+ * 35. Instead, clear the entire area, however big it is.
*/
- memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage);
+ memset(report->field[0]->value, 0,
+ sizeof(__s32) * report->field[0]->report_count);
switch (effect->type) {
case FF_CONSTANT:
@@ -129,32 +130,14 @@ static const signed short ff3_joystick_ac[] = {
int lg3ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
- struct hid_report *report;
- struct hid_field *field;
const signed short *ff_bits = ff3_joystick_ac;
int error;
int i;
- /* Find the report to use */
- if (list_empty(report_list)) {
- hid_err(hid, "No output report found\n");
- return -1;
- }
-
/* Check that the report looks ok */
- report = list_entry(report_list->next, struct hid_report, list);
- if (!report) {
- hid_err(hid, "NULL output report\n");
- return -1;
- }
-
- field = report->field[0];
- if (!field) {
- hid_err(hid, "NULL field\n");
- return -1;
- }
+ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
+ return -ENODEV;
/* Assume single fixed device G940 */
for (i = 0; ff_bits[i] >= 0; i++)
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 0ddae2a00d59..8782fe1aaa07 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -484,34 +484,16 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde
int lg4ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
- struct hid_report *report;
- struct hid_field *field;
struct lg4ff_device_entry *entry;
struct lg_drv_data *drv_data;
struct usb_device_descriptor *udesc;
int error, i, j;
__u16 bcdDevice, rev_maj, rev_min;
- /* Find the report to use */
- if (list_empty(report_list)) {
- hid_err(hid, "No output report found\n");
- return -1;
- }
-
/* Check that the report looks ok */
- report = list_entry(report_list->next, struct hid_report, list);
- if (!report) {
- hid_err(hid, "NULL output report\n");
+ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -1;
- }
-
- field = report->field[0];
- if (!field) {
- hid_err(hid, "NULL field\n");
- return -1;
- }
/* Check what wheel has been connected */
for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index d7ea8c845b40..e1394af0ae7b 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -128,27 +128,14 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
int lgff_init(struct hid_device* hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
- struct hid_report *report;
- struct hid_field *field;
const signed short *ff_bits = ff_joystick;
int error;
int i;
- /* Find the report to use */
- if (list_empty(report_list)) {
- hid_err(hid, "No output report found\n");
- return -1;
- }
-
/* Check that the report looks ok */
- report = list_entry(report_list->next, struct hid_report, list);
- field = report->field[0];
- if (!field) {
- hid_err(hid, "NULL field\n");
- return -1;
- }
+ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
+ return -ENODEV;
for (i = 0; i < ARRAY_SIZE(devices); i++) {
if (dev->id.vendor == devices[i].idVendor &&
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 7800b1410562..2e5302462efb 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -461,7 +461,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
struct hid_report *report;
struct hid_report_enum *output_report_enum;
u8 *data = (u8 *)(&dj_report->device_index);
- int i;
+ unsigned int i;
output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
@@ -471,7 +471,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
return -ENODEV;
}
- for (i = 0; i < report->field[0]->report_count; i++)
+ for (i = 0; i < DJREPORT_SHORT_LENGTH - 1; i++)
report->field[0]->value[i] = data[i];
hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
@@ -791,6 +791,12 @@ static int logi_dj_probe(struct hid_device *hdev,
goto hid_parse_fail;
}
+ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
+ 0, DJREPORT_SHORT_LENGTH - 1)) {
+ retval = -ENODEV;
+ goto hid_parse_fail;
+ }
+
/* Starts the usb device and connects to upper interfaces hiddev and
* hidraw */
retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index ac28f08c3866..5e5fe1b8eebb 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -101,9 +101,9 @@ struct mt_device {
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
unsigned pen_report_id; /* the report ID of the pen device */
- __s8 inputmode; /* InputMode HID feature, -1 if non-existent */
- __s8 inputmode_index; /* InputMode HID feature index in the report */
- __s8 maxcontact_report_id; /* Maximum Contact Number HID feature,
+ __s16 inputmode; /* InputMode HID feature, -1 if non-existent */
+ __s16 inputmode_index; /* InputMode HID feature index in the report */
+ __s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
-1 if non-existent */
__u8 num_received; /* how many contacts we received */
__u8 num_expected; /* expected last contact index */
@@ -312,20 +312,18 @@ static void mt_feature_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
struct mt_device *td = hid_get_drvdata(hdev);
- int i;
switch (usage->hid) {
case HID_DG_INPUTMODE:
- td->inputmode = field->report->id;
- td->inputmode_index = 0; /* has to be updated below */
-
- for (i=0; i < field->maxusage; i++) {
- if (field->usage[i].hid == usage->hid) {
- td->inputmode_index = i;
- break;
- }
+ /* Ignore if value index is out of bounds. */
+ if (usage->usage_index >= field->report_count) {
+ dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n");
+ break;
}
+ td->inputmode = field->report->id;
+ td->inputmode_index = usage->usage_index;
+
break;
case HID_DG_CONTACTMAX:
td->maxcontact_report_id = field->report->id;
@@ -511,6 +509,10 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
mt_store_field(usage, td, hi);
return 1;
case HID_DG_CONTACTCOUNT:
+ /* Ignore if indexes are out of bounds. */
+ if (field->index >= field->report->maxfield ||
+ usage->usage_index >= field->report_count)
+ return 1;
td->cc_index = field->index;
td->cc_value_index = usage->usage_index;
return 1;
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 30dbb6b40bbf..b18320db5f7d 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -537,6 +537,10 @@ static int buzz_init(struct hid_device *hdev)
drv_data = hid_get_drvdata(hdev);
BUG_ON(!(drv_data->quirks & BUZZ_CONTROLLER));
+ /* Validate expected report characteristics. */
+ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7))
+ return -ENODEV;
+
buzz = kzalloc(sizeof(*buzz), GFP_KERNEL);
if (!buzz) {
hid_err(hdev, "Insufficient memory, cannot allocate driver data\n");
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index d16491192112..29f328f411fb 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -249,6 +249,11 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
goto err_free;
}
+ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 16)) {
+ ret = -ENODEV;
+ goto err_free;
+ }
+
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index 6ec28a37c146..a29756c6ca02 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -68,21 +68,13 @@ static int zpff_init(struct hid_device *hid)
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
- struct list_head *report_list =
- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
- int error;
+ int i, error;
- if (list_empty(report_list)) {
- hid_err(hid, "no output report found\n");
- return -ENODEV;
- }
-
- report = list_entry(report_list->next, struct hid_report, list);
-
- if (report->maxfield < 4) {
- hid_err(hid, "not enough fields in report\n");
- return -ENODEV;
+ for (i = 0; i < 4; i++) {
+ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
+ if (!report)
+ return -ENODEV;
}
zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 8f4743ab5fb2..936093e0271e 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -195,7 +195,7 @@ int vmbus_connect(void)
do {
ret = vmbus_negotiate_version(msginfo, version);
- if (ret)
+ if (ret == -ETIMEDOUT)
goto cleanup;
if (vmbus_connection.conn_state == CONNECTED)
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 28b03325b872..09988b289622 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -32,13 +32,17 @@
/*
* Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
*/
+#define WS2008_SRV_MAJOR 1
+#define WS2008_SRV_MINOR 0
+#define WS2008_SRV_VERSION (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR)
+
#define WIN7_SRV_MAJOR 3
#define WIN7_SRV_MINOR 0
-#define WIN7_SRV_MAJOR_MINOR (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
+#define WIN7_SRV_VERSION (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
#define WIN8_SRV_MAJOR 4
#define WIN8_SRV_MINOR 0
-#define WIN8_SRV_MAJOR_MINOR (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
+#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
/*
* Global state maintained for transaction that is being processed.
@@ -587,6 +591,8 @@ void hv_kvp_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct icmsg_negotiate *negop = NULL;
+ int util_fw_version;
+ int kvp_srv_version;
if (kvp_transaction.active) {
/*
@@ -606,17 +612,26 @@ void hv_kvp_onchannelcallback(void *context)
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
/*
- * We start with win8 version and if the host cannot
- * support that we use the previous version.
+ * Based on the host, select appropriate
+ * framework and service versions we will
+ * negotiate.
*/
- if (vmbus_prep_negotiate_resp(icmsghdrp, negop,
- recv_buffer, UTIL_FW_MAJOR_MINOR,
- WIN8_SRV_MAJOR_MINOR))
- goto done;
-
+ switch (vmbus_proto_version) {
+ case (VERSION_WS2008):
+ util_fw_version = UTIL_WS2K8_FW_VERSION;
+ kvp_srv_version = WS2008_SRV_VERSION;
+ break;
+ case (VERSION_WIN7):
+ util_fw_version = UTIL_FW_VERSION;
+ kvp_srv_version = WIN7_SRV_VERSION;
+ break;
+ default:
+ util_fw_version = UTIL_FW_VERSION;
+ kvp_srv_version = WIN8_SRV_VERSION;
+ }
vmbus_prep_negotiate_resp(icmsghdrp, negop,
- recv_buffer, UTIL_FW_MAJOR_MINOR,
- WIN7_SRV_MAJOR_MINOR);
+ recv_buffer, util_fw_version,
+ kvp_srv_version);
} else {
kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
@@ -649,7 +664,6 @@ void hv_kvp_onchannelcallback(void *context)
return;
}
-done:
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index e4572f3f2834..0c3546224376 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -26,7 +26,7 @@
#define VSS_MAJOR 5
#define VSS_MINOR 0
-#define VSS_MAJOR_MINOR (VSS_MAJOR << 16 | VSS_MINOR)
+#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
@@ -190,8 +190,8 @@ void hv_vss_onchannelcallback(void *context)
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
vmbus_prep_negotiate_resp(icmsghdrp, negop,
- recv_buffer, UTIL_FW_MAJOR_MINOR,
- VSS_MAJOR_MINOR);
+ recv_buffer, UTIL_FW_VERSION,
+ VSS_VERSION);
} else {
vss_msg = (struct hv_vss_msg *)&recv_buffer[
sizeof(struct vmbuspipe_hdr) +
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index cb82233541b1..273e3ddb3a20 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -28,17 +28,32 @@
#include <linux/reboot.h>
#include <linux/hyperv.h>
-#define SHUTDOWN_MAJOR 3
-#define SHUTDOWN_MINOR 0
-#define SHUTDOWN_MAJOR_MINOR (SHUTDOWN_MAJOR << 16 | SHUTDOWN_MINOR)
-#define TIMESYNCH_MAJOR 3
-#define TIMESYNCH_MINOR 0
-#define TIMESYNCH_MAJOR_MINOR (TIMESYNCH_MAJOR << 16 | TIMESYNCH_MINOR)
+#define SD_MAJOR 3
+#define SD_MINOR 0
+#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
-#define HEARTBEAT_MAJOR 3
-#define HEARTBEAT_MINOR 0
-#define HEARTBEAT_MAJOR_MINOR (HEARTBEAT_MAJOR << 16 | HEARTBEAT_MINOR)
+#define SD_WS2008_MAJOR 1
+#define SD_WS2008_VERSION (SD_WS2008_MAJOR << 16 | SD_MINOR)
+
+#define TS_MAJOR 3
+#define TS_MINOR 0
+#define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
+
+#define TS_WS2008_MAJOR 1
+#define TS_WS2008_VERSION (TS_WS2008_MAJOR << 16 | TS_MINOR)
+
+#define HB_MAJOR 3
+#define HB_MINOR 0
+#define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
+
+#define HB_WS2008_MAJOR 1
+#define HB_WS2008_VERSION (HB_WS2008_MAJOR << 16 | HB_MINOR)
+
+static int sd_srv_version;
+static int ts_srv_version;
+static int hb_srv_version;
+static int util_fw_version;
static void shutdown_onchannelcallback(void *context);
static struct hv_util_service util_shutdown = {
@@ -99,8 +114,8 @@ static void shutdown_onchannelcallback(void *context)
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
vmbus_prep_negotiate_resp(icmsghdrp, negop,
- shut_txf_buf, UTIL_FW_MAJOR_MINOR,
- SHUTDOWN_MAJOR_MINOR);
+ shut_txf_buf, util_fw_version,
+ sd_srv_version);
} else {
shutdown_msg =
(struct shutdown_msg_data *)&shut_txf_buf[
@@ -216,6 +231,7 @@ static void timesync_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct ictimesync_data *timedatap;
u8 *time_txf_buf = util_timesynch.recv_buffer;
+ struct icmsg_negotiate *negop = NULL;
vmbus_recvpacket(channel, time_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
@@ -225,9 +241,10 @@ static void timesync_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf,
- UTIL_FW_MAJOR_MINOR,
- TIMESYNCH_MAJOR_MINOR);
+ vmbus_prep_negotiate_resp(icmsghdrp, negop,
+ time_txf_buf,
+ util_fw_version,
+ ts_srv_version);
} else {
timedatap = (struct ictimesync_data *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr) +
@@ -257,6 +274,7 @@ static void heartbeat_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
struct heartbeat_msg_data *heartbeat_msg;
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
+ struct icmsg_negotiate *negop = NULL;
vmbus_recvpacket(channel, hbeat_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
@@ -266,9 +284,9 @@ static void heartbeat_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
- vmbus_prep_negotiate_resp(icmsghdrp, NULL,
- hbeat_txf_buf, UTIL_FW_MAJOR_MINOR,
- HEARTBEAT_MAJOR_MINOR);
+ vmbus_prep_negotiate_resp(icmsghdrp, negop,
+ hbeat_txf_buf, util_fw_version,
+ hb_srv_version);
} else {
heartbeat_msg =
(struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -321,6 +339,25 @@ static int util_probe(struct hv_device *dev,
goto error;
hv_set_drvdata(dev, srv);
+ /*
+ * Based on the host; initialize the framework and
+ * service version numbers we will negotiate.
+ */
+ switch (vmbus_proto_version) {
+ case (VERSION_WS2008):
+ util_fw_version = UTIL_WS2K8_FW_VERSION;
+ sd_srv_version = SD_WS2008_VERSION;
+ ts_srv_version = TS_WS2008_VERSION;
+ hb_srv_version = HB_WS2008_VERSION;
+ break;
+
+ default:
+ util_fw_version = UTIL_FW_VERSION;
+ sd_srv_version = SD_VERSION;
+ ts_srv_version = TS_VERSION;
+ hb_srv_version = HB_VERSION;
+ }
+
return 0;
error:
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 62c2e32e25ef..98814d12a604 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -525,16 +525,25 @@ static int applesmc_init_smcreg_try(void)
{
struct applesmc_registers *s = &smcreg;
bool left_light_sensor, right_light_sensor;
+ unsigned int count;
u8 tmp[1];
int ret;
if (s->init_complete)
return 0;
- ret = read_register_count(&s->key_count);
+ ret = read_register_count(&count);
if (ret)
return ret;
+ if (s->cache && s->key_count != count) {
+ pr_warn("key count changed from %d to %d\n",
+ s->key_count, count);
+ kfree(s->cache);
+ s->cache = NULL;
+ }
+ s->key_count = count;
+
if (!s->cache)
s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
if (!s->cache)
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index dbecf08399f8..5888feef1ac5 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -98,6 +98,8 @@
#define DW_IC_ERR_TX_ABRT 0x1
+#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
+
/*
* status codes
*/
@@ -388,22 +390,34 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
- u32 ic_con;
+ u32 ic_con, ic_tar = 0;
/* Disable the adapter */
__i2c_dw_enable(dev, false);
- /* set the slave (target) address */
- dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
-
/* if the slave address is ten bit address, enable 10BITADDR */
ic_con = dw_readl(dev, DW_IC_CON);
- if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
ic_con |= DW_IC_CON_10BITADDR_MASTER;
- else
+ /*
+ * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
+ * mode has to be enabled via bit 12 of IC_TAR register.
+ * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
+ * detected from registers.
+ */
+ ic_tar = DW_IC_TAR_10BITADDR_MASTER;
+ } else {
ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+ }
+
dw_writel(dev, ic_con, DW_IC_CON);
+ /*
+ * Set the slave (target) address and enable 10-bit addressing mode
+ * if applicable.
+ */
+ dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
+
/* Enable the adapter */
__i2c_dw_enable(dev, true);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 8ed79a086f85..1672effbcebb 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
desc = &priv->hw[priv->head];
+ /* Initialize the DMA buffer */
+ memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer));
+
/* Initialize the descriptor */
memset(desc, 0, sizeof(struct ismt_desc));
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 7f3a47443494..d3e9cc3153a9 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -234,9 +234,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
(msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
- writel_relaxed(data_reg_lo,
+ writel(data_reg_lo,
drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
- writel_relaxed(data_reg_hi,
+ writel(data_reg_hi,
drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
} else {
@@ -697,6 +697,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
#ifdef CONFIG_OF
+#ifdef CONFIG_HAVE_CLK
static int
mv64xxx_calc_freq(const int tclk, const int n, const int m)
{
@@ -726,16 +727,12 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
return false;
return true;
}
+#endif /* CONFIG_HAVE_CLK */
static int
mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
struct device *dev)
{
- const struct of_device_id *device;
- struct device_node *np = dev->of_node;
- u32 bus_freq, tclk;
- int rc = 0;
-
/* CLK is mandatory when using DT to describe the i2c bus. We
* need to know tclk in order to calculate bus clock
* factors.
@@ -744,6 +741,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
/* Have OF but no CLK */
return -ENODEV;
#else
+ const struct of_device_id *device;
+ struct device_node *np = dev->of_node;
+ u32 bus_freq, tclk;
+ int rc = 0;
+
if (IS_ERR(drv_data->clk)) {
rc = -ENODEV;
goto out;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 3535f3c0f7b4..3747b9bf67d6 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1178,8 +1178,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
- clk_disable_unprepare(i2c->clk);
-
if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
s3c24xx_i2c_dt_gpio_free(i2c);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 12e32e6b4103..81e3dc260993 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -620,7 +620,7 @@ static int bma180_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
static int bma180_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct bma180_data *data = iio_priv(indio_dev);
int ret;
@@ -633,7 +633,7 @@ static int bma180_suspend(struct device *dev)
static int bma180_resume(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct bma180_data *data = iio_priv(indio_dev);
int ret;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 84be63bdf038..0f16b553e063 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -556,7 +556,7 @@ static const struct iio_info at91_adc_info = {
static int at91_adc_probe(struct platform_device *pdev)
{
- unsigned int prsc, mstrclk, ticks, adc_clk, shtim;
+ unsigned int prsc, mstrclk, ticks, adc_clk, adc_clk_khz, shtim;
int ret;
struct iio_dev *idev;
struct at91_adc_state *st;
@@ -649,6 +649,7 @@ static int at91_adc_probe(struct platform_device *pdev)
*/
mstrclk = clk_get_rate(st->clk);
adc_clk = clk_get_rate(st->adc_clk);
+ adc_clk_khz = adc_clk / 1000;
prsc = (mstrclk / (2 * adc_clk)) - 1;
if (!st->startup_time) {
@@ -662,15 +663,15 @@ static int at91_adc_probe(struct platform_device *pdev)
* defined in the electrical characteristics of the board, divided by 8.
* The formula thus is : Startup Time = (ticks + 1) * 8 / ADC Clock
*/
- ticks = round_up((st->startup_time * adc_clk /
- 1000000) - 1, 8) / 8;
+ ticks = round_up((st->startup_time * adc_clk_khz /
+ 1000) - 1, 8) / 8;
/*
* a minimal Sample and Hold Time is necessary for the ADC to guarantee
* the best converted final value between two channels selection
* The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock
*/
- shtim = round_up((st->sample_hold_time * adc_clk /
- 1000000) - 1, 1);
+ shtim = round_up((st->sample_hold_time * adc_clk_khz /
+ 1000) - 1, 1);
reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask;
reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask;
diff --git a/drivers/iio/buffer_cb.c b/drivers/iio/buffer_cb.c
index 9d19ba74f22b..415f3c6efd72 100644
--- a/drivers/iio/buffer_cb.c
+++ b/drivers/iio/buffer_cb.c
@@ -41,6 +41,8 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
goto error_ret;
}
+ iio_buffer_init(&cb_buff->buffer);
+
cb_buff->private = private;
cb_buff->cb = cb;
cb_buff->buffer.access = &iio_cb_access;
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 1f4a48e6a82c..1397b6e0e414 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -37,21 +37,21 @@ struct mcp4725_data {
static int mcp4725_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct mcp4725_data *data = iio_priv(indio_dev);
+ struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
u8 outbuf[2];
outbuf[0] = (data->powerdown_mode + 1) << 4;
outbuf[1] = 0;
data->powerdown = true;
- return i2c_master_send(to_i2c_client(dev), outbuf, 2);
+ return i2c_master_send(data->client, outbuf, 2);
}
static int mcp4725_resume(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct mcp4725_data *data = iio_priv(indio_dev);
+ struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
u8 outbuf[2];
/* restore previous DAC value */
@@ -59,7 +59,7 @@ static int mcp4725_resume(struct device *dev)
outbuf[1] = data->dac_value & 0xff;
data->powerdown = false;
- return i2c_master_send(to_i2c_client(dev), outbuf, 2);
+ return i2c_master_send(data->client, outbuf, 2);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 05c1b74502a3..9b32253b824b 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -49,11 +49,15 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
#define iio_buffer_poll_addr (&iio_buffer_poll)
#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
+void iio_disable_all_buffers(struct iio_dev *indio_dev);
+
#else
#define iio_buffer_poll_addr NULL
#define iio_buffer_read_first_n_outer_addr NULL
+static inline void iio_disable_all_buffers(struct iio_dev *indio_dev) {}
+
#endif
int iio_device_register_eventset(struct iio_dev *indio_dev);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index e73033f3839a..2710f7245c3b 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -460,6 +460,25 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
return bytes;
}
+void iio_disable_all_buffers(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer, *_buffer;
+
+ if (list_empty(&indio_dev->buffer_list))
+ return;
+
+ if (indio_dev->setup_ops->predisable)
+ indio_dev->setup_ops->predisable(indio_dev);
+
+ list_for_each_entry_safe(buffer, _buffer,
+ &indio_dev->buffer_list, buffer_list)
+ list_del_init(&buffer->buffer_list);
+
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+ if (indio_dev->setup_ops->postdisable)
+ indio_dev->setup_ops->postdisable(indio_dev);
+}
+
int iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *insert_buffer,
struct iio_buffer *remove_buffer)
@@ -528,8 +547,15 @@ int iio_update_buffers(struct iio_dev *indio_dev,
* Note can only occur when adding a buffer.
*/
list_del(&insert_buffer->buffer_list);
- indio_dev->active_scan_mask = old_mask;
- success = -EINVAL;
+ if (old_mask) {
+ indio_dev->active_scan_mask = old_mask;
+ success = -EINVAL;
+ }
+ else {
+ kfree(compound_mask);
+ ret = -EINVAL;
+ goto error_ret;
+ }
}
} else {
indio_dev->active_scan_mask = compound_mask;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 97f0297b120f..8e84cd522e49 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -848,8 +848,6 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
static void iio_dev_release(struct device *device)
{
struct iio_dev *indio_dev = dev_to_iio_dev(device);
- if (indio_dev->chrdev.dev)
- cdev_del(&indio_dev->chrdev);
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
iio_device_unregister_trigger_consumer(indio_dev);
iio_device_unregister_eventset(indio_dev);
@@ -970,6 +968,8 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
return -EBUSY;
+ iio_device_get(indio_dev);
+
filp->private_data = indio_dev;
return 0;
@@ -983,6 +983,8 @@ static int iio_chrdev_release(struct inode *inode, struct file *filp)
struct iio_dev *indio_dev = container_of(inode->i_cdev,
struct iio_dev, chrdev);
clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
+ iio_device_put(indio_dev);
+
return 0;
}
@@ -1052,18 +1054,20 @@ int iio_device_register(struct iio_dev *indio_dev)
indio_dev->setup_ops == NULL)
indio_dev->setup_ops = &noop_ring_setup_ops;
- ret = device_add(&indio_dev->dev);
- if (ret < 0)
- goto error_unreg_eventset;
cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
indio_dev->chrdev.owner = indio_dev->info->driver_module;
+ indio_dev->chrdev.kobj.parent = &indio_dev->dev.kobj;
ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
if (ret < 0)
- goto error_del_device;
- return 0;
+ goto error_unreg_eventset;
-error_del_device:
- device_del(&indio_dev->dev);
+ ret = device_add(&indio_dev->dev);
+ if (ret < 0)
+ goto error_cdev_del;
+
+ return 0;
+error_cdev_del:
+ cdev_del(&indio_dev->chrdev);
error_unreg_eventset:
iio_device_unregister_eventset(indio_dev);
error_free_sysfs:
@@ -1078,9 +1082,16 @@ EXPORT_SYMBOL(iio_device_register);
void iio_device_unregister(struct iio_dev *indio_dev)
{
mutex_lock(&indio_dev->info_exist_lock);
+
+ device_del(&indio_dev->dev);
+
+ if (indio_dev->chrdev.dev)
+ cdev_del(&indio_dev->chrdev);
+
+ iio_disable_all_buffers(indio_dev);
+
indio_dev->info = NULL;
mutex_unlock(&indio_dev->info_exist_lock);
- device_del(&indio_dev->dev);
}
EXPORT_SYMBOL(iio_device_unregister);
subsys_initcall(iio_init);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 10aa9ef86cec..6be65ef5faa9 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -72,7 +72,8 @@ EXPORT_SYMBOL(iio_push_event);
static unsigned int iio_event_poll(struct file *filep,
struct poll_table_struct *wait)
{
- struct iio_event_interface *ev_int = filep->private_data;
+ struct iio_dev *indio_dev = filep->private_data;
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
unsigned int events = 0;
poll_wait(filep, &ev_int->wait, wait);
@@ -90,7 +91,8 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
size_t count,
loff_t *f_ps)
{
- struct iio_event_interface *ev_int = filep->private_data;
+ struct iio_dev *indio_dev = filep->private_data;
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
unsigned int copied;
int ret;
@@ -121,7 +123,8 @@ error_unlock:
static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
{
- struct iio_event_interface *ev_int = filep->private_data;
+ struct iio_dev *indio_dev = filep->private_data;
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
spin_lock_irq(&ev_int->wait.lock);
__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
@@ -133,6 +136,8 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
kfifo_reset_out(&ev_int->det_events);
spin_unlock_irq(&ev_int->wait.lock);
+ iio_device_put(indio_dev);
+
return 0;
}
@@ -158,12 +163,15 @@ int iio_event_getfd(struct iio_dev *indio_dev)
return -EBUSY;
}
spin_unlock_irq(&ev_int->wait.lock);
- fd = anon_inode_getfd("iio:event",
- &iio_event_chrdev_fileops, ev_int, O_RDONLY);
+ iio_device_get(indio_dev);
+
+ fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
+ indio_dev, O_RDONLY);
if (fd < 0) {
spin_lock_irq(&ev_int->wait.lock);
__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
spin_unlock_irq(&ev_int->wait.lock);
+ iio_device_put(indio_dev);
}
return fd;
}
@@ -276,7 +284,7 @@ static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
goto error_ret;
}
if (chan->modified)
- mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
+ mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2,
i/IIO_EV_DIR_MAX,
i%IIO_EV_DIR_MAX);
else if (chan->differential)
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 64ccde3f1f7a..6d63883da1ab 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -255,12 +255,14 @@ static int tmp006_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
static int tmp006_suspend(struct device *dev)
{
- return tmp006_powerdown(iio_priv(dev_to_iio_dev(dev)));
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ return tmp006_powerdown(iio_priv(indio_dev));
}
static int tmp006_resume(struct device *dev)
{
- struct tmp006_data *data = iio_priv(dev_to_iio_dev(dev));
+ struct tmp006_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
return i2c_smbus_write_word_swapped(data->client, TMP006_CONFIG,
data->config | TMP006_CONFIG_MOD_MASK);
}
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 7f910c76ca0a..3c92780bda09 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2295,8 +2295,8 @@ _hfcpci_softirq(struct device *dev, void *arg)
static void
hfcpci_softirq(void *arg)
{
- (void) driver_for_each_device(&hfc_driver.driver, NULL, arg,
- _hfcpci_softirq);
+ WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg,
+ _hfcpci_softirq) != 0);
/* if next event would be in the past ... */
if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index 1063babe1d3a..36817e0a0b94 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -314,7 +314,7 @@ Amd7930_empty_Dfifo(struct IsdnCardState *cs, int flag)
t += sprintf(t, "Amd7930: empty_Dfifo cnt: %d |", cs->rcvidx);
QuickHex(t, cs->rcvbuf, cs->rcvidx);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
/* moves received data in sk-buffer */
memcpy(skb_put(skb, cs->rcvidx), cs->rcvbuf, cs->rcvidx);
@@ -406,7 +406,7 @@ Amd7930_fill_Dfifo(struct IsdnCardState *cs)
t += sprintf(t, "Amd7930: fill_Dfifo cnt: %d |", count);
QuickHex(t, deb_ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
/* AMD interrupts on */
AmdIrqOn(cs);
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index ee9b9a03cffa..d1427bd6452d 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -285,7 +285,7 @@ hdlc_empty_fifo(struct BCState *bcs, int count)
t += sprintf(t, "hdlc_empty_fifo %c cnt %d",
bcs->channel ? 'B' : 'A', count);
QuickHex(t, p, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
@@ -345,7 +345,7 @@ hdlc_fill_fifo(struct BCState *bcs)
t += sprintf(t, "hdlc_fill_fifo %c cnt %d",
bcs->channel ? 'B' : 'A', count);
QuickHex(t, p, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index bf04d2a3cf4a..b33f53b3ca93 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
ptr--;
*ptr++ = '\n';
*ptr = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
+ HiSax_putstatus(cs, NULL, "%s", cs->dlog);
} else
HiSax_putstatus(cs, "LogEcho: ",
"warning Frame too big (%d)",
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 8d0cf6e4dc00..4fc90de68d18 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -427,7 +427,7 @@ Memhscx_empty_fifo(struct BCState *bcs, int count)
t += sprintf(t, "hscx_empty_fifo %c cnt %d",
bcs->hw.hscx.hscx ? 'B' : 'A', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
@@ -469,7 +469,7 @@ Memhscx_fill_fifo(struct BCState *bcs)
t += sprintf(t, "hscx_fill_fifo %c cnt %d",
bcs->hw.hscx.hscx ? 'B' : 'A', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 1df6f9a56ca2..2be1c8a3bb5f 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -535,7 +535,7 @@ check_arcofi(struct IsdnCardState *cs)
t = tmp;
t += sprintf(tmp, "Arcofi data");
QuickHex(t, p, cs->dc.isac.mon_rxp);
- debugl1(cs, tmp);
+ debugl1(cs, "%s", tmp);
if ((cs->dc.isac.mon_rxp == 2) && (cs->dc.isac.mon_rx[0] == 0xa0)) {
switch (cs->dc.isac.mon_rx[1]) {
case 0x80:
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c
index d4c98d330bfe..3f84dd8f1757 100644
--- a/drivers/isdn/hisax/elsa_ser.c
+++ b/drivers/isdn/hisax/elsa_ser.c
@@ -344,7 +344,7 @@ static inline void receive_chars(struct IsdnCardState *cs,
t += sprintf(t, "modem read cnt %d", cs->hw.elsa.rcvcnt);
QuickHex(t, cs->hw.elsa.rcvbuf, cs->hw.elsa.rcvcnt);
- debugl1(cs, tmp);
+ debugl1(cs, "%s", tmp);
}
cs->hw.elsa.rcvcnt = 0;
}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 3ccd724ff8c2..497bd026c237 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -901,7 +901,7 @@ Begin:
ptr--;
*ptr++ = '\n';
*ptr = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
+ HiSax_putstatus(cs, NULL, "%s", cs->dlog);
} else
HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
}
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index dc4574f735ef..fa1fefd711cd 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs)
ptr--;
*ptr++ = '\n';
*ptr = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
+ HiSax_putstatus(cs, NULL, "%s", cs->dlog);
} else
HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len);
}
diff --git a/drivers/isdn/hisax/hscx_irq.c b/drivers/isdn/hisax/hscx_irq.c
index f398d4838937..a8d6188402c6 100644
--- a/drivers/isdn/hisax/hscx_irq.c
+++ b/drivers/isdn/hisax/hscx_irq.c
@@ -75,7 +75,7 @@ hscx_empty_fifo(struct BCState *bcs, int count)
t += sprintf(t, "hscx_empty_fifo %c cnt %d",
bcs->hw.hscx.hscx ? 'B' : 'A', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
@@ -115,7 +115,7 @@ hscx_fill_fifo(struct BCState *bcs)
t += sprintf(t, "hscx_fill_fifo %c cnt %d",
bcs->hw.hscx.hscx ? 'B' : 'A', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index db5321f6379b..51dae9167238 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -134,7 +134,7 @@ icc_empty_fifo(struct IsdnCardState *cs, int count)
t += sprintf(t, "icc_empty_fifo cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
@@ -176,7 +176,7 @@ icc_fill_fifo(struct IsdnCardState *cs)
t += sprintf(t, "icc_fill_fifo cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
index 74feb5c83067..5faa5de24305 100644
--- a/drivers/isdn/hisax/ipacx.c
+++ b/drivers/isdn/hisax/ipacx.c
@@ -260,7 +260,7 @@ dch_empty_fifo(struct IsdnCardState *cs, int count)
t += sprintf(t, "dch_empty_fifo() cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
@@ -307,7 +307,7 @@ dch_fill_fifo(struct IsdnCardState *cs)
t += sprintf(t, "dch_fill_fifo() cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
@@ -539,7 +539,7 @@ bch_empty_fifo(struct BCState *bcs, int count)
t += sprintf(t, "bch_empty_fifo() B-%d cnt %d", hscx, count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
@@ -582,7 +582,7 @@ bch_fill_fifo(struct BCState *bcs)
t += sprintf(t, "chb_fill_fifo() B-%d cnt %d", hscx, count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index a365ccc1c99c..7fdf78f46433 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -137,7 +137,7 @@ isac_empty_fifo(struct IsdnCardState *cs, int count)
t += sprintf(t, "isac_empty_fifo cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
@@ -179,7 +179,7 @@ isac_fill_fifo(struct IsdnCardState *cs)
t += sprintf(t, "isac_fill_fifo cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 7fdf34704fe5..f4956c73aa11 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -74,7 +74,7 @@ sendmsg(struct IsdnCardState *cs, u_char his, u_char creg, u_char len,
t = tmp;
t += sprintf(t, "sendmbox cnt %d", len);
QuickHex(t, &msg[len-i], (i > 64) ? 64 : i);
- debugl1(cs, tmp);
+ debugl1(cs, "%s", tmp);
i -= 64;
}
}
@@ -105,7 +105,7 @@ rcv_mbox(struct IsdnCardState *cs, struct isar_reg *ireg, u_char *msg)
t = tmp;
t += sprintf(t, "rcv_mbox cnt %d", ireg->clsb);
QuickHex(t, &msg[ireg->clsb - i], (i > 64) ? 64 : i);
- debugl1(cs, tmp);
+ debugl1(cs, "%s", tmp);
i -= 64;
}
}
@@ -1248,7 +1248,7 @@ isar_int_main(struct IsdnCardState *cs)
tp += sprintf(debbuf, "msg iis(%x) msb(%x)",
ireg->iis, ireg->cmsb);
QuickHex(tp, (u_char *)ireg->par, ireg->clsb);
- debugl1(cs, debbuf);
+ debugl1(cs, "%s", debbuf);
}
break;
case ISAR_IIS_INVMSG:
diff --git a/drivers/isdn/hisax/jade.c b/drivers/isdn/hisax/jade.c
index f946c58d8ab1..e2ae7871a209 100644
--- a/drivers/isdn/hisax/jade.c
+++ b/drivers/isdn/hisax/jade.c
@@ -81,10 +81,7 @@ modejade(struct BCState *bcs, int mode, int bc)
int jade = bcs->hw.hscx.hscx;
if (cs->debug & L1_DEB_HSCX) {
- char tmp[40];
- sprintf(tmp, "jade %c mode %d ichan %d",
- 'A' + jade, mode, bc);
- debugl1(cs, tmp);
+ debugl1(cs, "jade %c mode %d ichan %d", 'A' + jade, mode, bc);
}
bcs->mode = mode;
bcs->channel = bc;
@@ -257,23 +254,18 @@ void
clear_pending_jade_ints(struct IsdnCardState *cs)
{
int val;
- char tmp[64];
cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00);
cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00);
val = cs->BC_Read_Reg(cs, 1, jade_HDLC_ISR);
- sprintf(tmp, "jade B ISTA %x", val);
- debugl1(cs, tmp);
+ debugl1(cs, "jade B ISTA %x", val);
val = cs->BC_Read_Reg(cs, 0, jade_HDLC_ISR);
- sprintf(tmp, "jade A ISTA %x", val);
- debugl1(cs, tmp);
+ debugl1(cs, "jade A ISTA %x", val);
val = cs->BC_Read_Reg(cs, 1, jade_HDLC_STAR);
- sprintf(tmp, "jade B STAR %x", val);
- debugl1(cs, tmp);
+ debugl1(cs, "jade B STAR %x", val);
val = cs->BC_Read_Reg(cs, 0, jade_HDLC_STAR);
- sprintf(tmp, "jade A STAR %x", val);
- debugl1(cs, tmp);
+ debugl1(cs, "jade A STAR %x", val);
/* Unmask ints */
cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0xF8);
cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0xF8);
diff --git a/drivers/isdn/hisax/jade_irq.c b/drivers/isdn/hisax/jade_irq.c
index f521fc83dc76..b930da9b5aa6 100644
--- a/drivers/isdn/hisax/jade_irq.c
+++ b/drivers/isdn/hisax/jade_irq.c
@@ -65,7 +65,7 @@ jade_empty_fifo(struct BCState *bcs, int count)
t += sprintf(t, "jade_empty_fifo %c cnt %d",
bcs->hw.hscx.hscx ? 'B' : 'A', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
@@ -105,7 +105,7 @@ jade_fill_fifo(struct BCState *bcs)
t += sprintf(t, "jade_fill_fifo %c cnt %d",
bcs->hw.hscx.hscx ? 'B' : 'A', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index 4c1bca5caa1d..875402e76d0a 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -63,7 +63,7 @@ l3_1tr6_error(struct l3_process *pc, u_char *msg, struct sk_buff *skb)
{
dev_kfree_skb(skb);
if (pc->st->l3.debug & L3_DEB_WARN)
- l3_debug(pc->st, msg);
+ l3_debug(pc->st, "%s", msg);
l3_1tr6_release_req(pc, 0, NULL);
}
@@ -161,7 +161,6 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
{
u_char *p;
int bcfound = 0;
- char tmp[80];
struct sk_buff *skb = arg;
/* Channel Identification */
@@ -214,10 +213,9 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
/* Signal all services, linklevel takes care of Service-Indicator */
if (bcfound) {
if ((pc->para.setup.si1 != 7) && (pc->st->l3.debug & L3_DEB_WARN)) {
- sprintf(tmp, "non-digital call: %s -> %s",
+ l3_debug(pc->st, "non-digital call: %s -> %s",
pc->para.setup.phone,
pc->para.setup.eazmsn);
- l3_debug(pc->st, tmp);
}
newl3state(pc, 6);
pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc);
@@ -301,7 +299,7 @@ l3_1tr6_info(struct l3_process *pc, u_char pr, void *arg)
{
u_char *p;
int i, tmpcharge = 0;
- char a_charge[8], tmp[32];
+ char a_charge[8];
struct sk_buff *skb = arg;
p = skb->data;
@@ -316,8 +314,8 @@ l3_1tr6_info(struct l3_process *pc, u_char pr, void *arg)
pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
}
if (pc->st->l3.debug & L3_DEB_CHARGE) {
- sprintf(tmp, "charging info %d", pc->para.chargeinfo);
- l3_debug(pc->st, tmp);
+ l3_debug(pc->st, "charging info %d",
+ pc->para.chargeinfo);
}
} else if (pc->st->l3.debug & L3_DEB_CHARGE)
l3_debug(pc->st, "charging info not found");
@@ -399,7 +397,7 @@ l3_1tr6_disc(struct l3_process *pc, u_char pr, void *arg)
struct sk_buff *skb = arg;
u_char *p;
int i, tmpcharge = 0;
- char a_charge[8], tmp[32];
+ char a_charge[8];
StopAllL3Timer(pc);
p = skb->data;
@@ -414,8 +412,8 @@ l3_1tr6_disc(struct l3_process *pc, u_char pr, void *arg)
pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
}
if (pc->st->l3.debug & L3_DEB_CHARGE) {
- sprintf(tmp, "charging info %d", pc->para.chargeinfo);
- l3_debug(pc->st, tmp);
+ l3_debug(pc->st, "charging info %d",
+ pc->para.chargeinfo);
}
} else if (pc->st->l3.debug & L3_DEB_CHARGE)
l3_debug(pc->st, "charging info not found");
@@ -746,7 +744,6 @@ up1tr6(struct PStack *st, int pr, void *arg)
int i, mt, cr;
struct l3_process *proc;
struct sk_buff *skb = arg;
- char tmp[80];
switch (pr) {
case (DL_DATA | INDICATION):
@@ -762,26 +759,23 @@ up1tr6(struct PStack *st, int pr, void *arg)
}
if (skb->len < 4) {
if (st->l3.debug & L3_DEB_PROTERR) {
- sprintf(tmp, "up1tr6 len only %d", skb->len);
- l3_debug(st, tmp);
+ l3_debug(st, "up1tr6 len only %d", skb->len);
}
dev_kfree_skb(skb);
return;
}
if ((skb->data[0] & 0xfe) != PROTO_DIS_N0) {
if (st->l3.debug & L3_DEB_PROTERR) {
- sprintf(tmp, "up1tr6%sunexpected discriminator %x message len %d",
+ l3_debug(st, "up1tr6%sunexpected discriminator %x message len %d",
(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
skb->data[0], skb->len);
- l3_debug(st, tmp);
}
dev_kfree_skb(skb);
return;
}
if (skb->data[1] != 1) {
if (st->l3.debug & L3_DEB_PROTERR) {
- sprintf(tmp, "up1tr6 CR len not 1");
- l3_debug(st, tmp);
+ l3_debug(st, "up1tr6 CR len not 1");
}
dev_kfree_skb(skb);
return;
@@ -791,9 +785,8 @@ up1tr6(struct PStack *st, int pr, void *arg)
if (skb->data[0] == PROTO_DIS_N0) {
dev_kfree_skb(skb);
if (st->l3.debug & L3_DEB_STATE) {
- sprintf(tmp, "up1tr6%s N0 mt %x unhandled",
+ l3_debug(st, "up1tr6%s N0 mt %x unhandled",
(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", mt);
- l3_debug(st, tmp);
}
} else if (skb->data[0] == PROTO_DIS_N1) {
if (!(proc = getl3proc(st, cr))) {
@@ -801,8 +794,7 @@ up1tr6(struct PStack *st, int pr, void *arg)
if (cr < 128) {
if (!(proc = new_l3_process(st, cr))) {
if (st->l3.debug & L3_DEB_PROTERR) {
- sprintf(tmp, "up1tr6 no roc mem");
- l3_debug(st, tmp);
+ l3_debug(st, "up1tr6 no roc mem");
}
dev_kfree_skb(skb);
return;
@@ -821,8 +813,7 @@ up1tr6(struct PStack *st, int pr, void *arg)
} else {
if (!(proc = new_l3_process(st, cr))) {
if (st->l3.debug & L3_DEB_PROTERR) {
- sprintf(tmp, "up1tr6 no roc mem");
- l3_debug(st, tmp);
+ l3_debug(st, "up1tr6 no roc mem");
}
dev_kfree_skb(skb);
return;
@@ -837,18 +828,16 @@ up1tr6(struct PStack *st, int pr, void *arg)
if (i == ARRAY_SIZE(datastln1)) {
dev_kfree_skb(skb);
if (st->l3.debug & L3_DEB_STATE) {
- sprintf(tmp, "up1tr6%sstate %d mt %x unhandled",
+ l3_debug(st, "up1tr6%sstate %d mt %x unhandled",
(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
proc->state, mt);
- l3_debug(st, tmp);
}
return;
} else {
if (st->l3.debug & L3_DEB_STATE) {
- sprintf(tmp, "up1tr6%sstate %d mt %x",
+ l3_debug(st, "up1tr6%sstate %d mt %x",
(pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
proc->state, mt);
- l3_debug(st, tmp);
}
datastln1[i].rout(proc, pr, skb);
}
@@ -861,7 +850,6 @@ down1tr6(struct PStack *st, int pr, void *arg)
int i, cr;
struct l3_process *proc;
struct Channel *chan;
- char tmp[80];
if ((DL_ESTABLISH | REQUEST) == pr) {
l3_msg(st, pr, NULL);
@@ -888,15 +876,13 @@ down1tr6(struct PStack *st, int pr, void *arg)
break;
if (i == ARRAY_SIZE(downstl)) {
if (st->l3.debug & L3_DEB_STATE) {
- sprintf(tmp, "down1tr6 state %d prim %d unhandled",
+ l3_debug(st, "down1tr6 state %d prim %d unhandled",
proc->state, pr);
- l3_debug(st, tmp);
}
} else {
if (st->l3.debug & L3_DEB_STATE) {
- sprintf(tmp, "down1tr6 state %d prim %d",
+ l3_debug(st, "down1tr6 state %d prim %d",
proc->state, pr);
- l3_debug(st, tmp);
}
downstl[i].rout(proc, pr, arg);
}
diff --git a/drivers/isdn/hisax/netjet.c b/drivers/isdn/hisax/netjet.c
index b646eed379df..233e432e06f6 100644
--- a/drivers/isdn/hisax/netjet.c
+++ b/drivers/isdn/hisax/netjet.c
@@ -176,7 +176,7 @@ static void printframe(struct IsdnCardState *cs, u_char *buf, int count, char *s
else
j = i;
QuickHex(t, p, j);
- debugl1(cs, tmp);
+ debugl1(cs, "%s", tmp);
p += j;
i -= j;
t = tmp;
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index 041bf52d9d0a..af1b020a81f1 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size)
dp--;
*dp++ = '\n';
*dp = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
+ HiSax_putstatus(cs, NULL, "%s", cs->dlog);
} else
HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size);
}
@@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
}
if (finish) {
*dp = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
+ HiSax_putstatus(cs, NULL, "%s", cs->dlog);
return;
}
if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */
@@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
dp += sprintf(dp, "Unknown protocol %x!", buf[0]);
}
*dp = 0;
- HiSax_putstatus(cs, NULL, cs->dlog);
+ HiSax_putstatus(cs, NULL, "%s", cs->dlog);
}
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index d8cac6935818..a85895585d90 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -154,7 +154,7 @@ W6692_empty_fifo(struct IsdnCardState *cs, int count)
t += sprintf(t, "W6692_empty_fifo cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
@@ -196,7 +196,7 @@ W6692_fill_fifo(struct IsdnCardState *cs)
t += sprintf(t, "W6692_fill_fifo cnt %d", count);
QuickHex(t, ptr, count);
- debugl1(cs, cs->dlog);
+ debugl1(cs, "%s", cs->dlog);
}
}
@@ -226,7 +226,7 @@ W6692B_empty_fifo(struct BCState *bcs, int count)
t += sprintf(t, "W6692B_empty_fifo %c cnt %d",
bcs->channel + '1', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
@@ -264,7 +264,7 @@ W6692B_fill_fifo(struct BCState *bcs)
t += sprintf(t, "W6692B_fill_fifo %c cnt %d",
bcs->channel + '1', count);
QuickHex(t, ptr, count);
- debugl1(cs, bcs->blog);
+ debugl1(cs, "%s", bcs->blog);
}
}
diff --git a/drivers/mailbox/mailbox-omap2.c b/drivers/mailbox/mailbox-omap2.c
index eba380d7b17f..42d2b893ea67 100644
--- a/drivers/mailbox/mailbox-omap2.c
+++ b/drivers/mailbox/mailbox-omap2.c
@@ -325,7 +325,6 @@ static int omap2_mbox_remove(struct platform_device *pdev)
kfree(privblk);
kfree(mboxblk);
kfree(list);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index b39f6f0b45f2..0f12382aa35d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -498,7 +498,7 @@ struct cached_dev {
*/
atomic_t has_dirty;
- struct ratelimit writeback_rate;
+ struct bch_ratelimit writeback_rate;
struct delayed_work writeback_rate_update;
/*
@@ -507,10 +507,9 @@ struct cached_dev {
*/
sector_t last_read;
- /* Number of writeback bios in flight */
- atomic_t in_flight;
+ /* Limit number of writeback bios in flight */
+ struct semaphore in_flight;
struct closure_with_timer writeback;
- struct closure_waitlist writeback_wait;
struct keybuf writeback_keys;
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 8010eed06a51..22d1ae72c282 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
/* Mergesort */
+static void sort_key_next(struct btree_iter *iter,
+ struct btree_iter_set *i)
+{
+ i->k = bkey_next(i->k);
+
+ if (i->k == i->end)
+ *i = iter->data[--iter->used];
+}
+
static void btree_sort_fixup(struct btree_iter *iter)
{
while (iter->used > 1) {
struct btree_iter_set *top = iter->data, *i = top + 1;
- struct bkey *k;
if (iter->used > 2 &&
btree_iter_cmp(i[0], i[1]))
i++;
- for (k = i->k;
- k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
- k = bkey_next(k))
- if (top->k > i->k)
- __bch_cut_front(top->k, k);
- else if (KEY_SIZE(k))
- bch_cut_back(&START_KEY(k), top->k);
-
- if (top->k < i->k || k == i->k)
+ if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
break;
- heap_sift(iter, i - top, btree_iter_cmp);
+ if (!KEY_SIZE(i->k)) {
+ sort_key_next(iter, i);
+ heap_sift(iter, i - top, btree_iter_cmp);
+ continue;
+ }
+
+ if (top->k > i->k) {
+ if (bkey_cmp(top->k, i->k) >= 0)
+ sort_key_next(iter, i);
+ else
+ bch_cut_front(top->k, i->k);
+
+ heap_sift(iter, i - top, btree_iter_cmp);
+ } else {
+ /* can't happen because of comparison func */
+ BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+ bch_cut_back(&START_KEY(i->k), top->k);
+ }
}
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index f9764e61978b..f42fc7ed9cd6 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b)
return;
err:
- bch_cache_set_error(b->c, "io error reading bucket %lu",
+ bch_cache_set_error(b->c, "io error reading bucket %zu",
PTR_BUCKET_NR(b->c, &b->key, 0));
}
@@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
return SHRINK_STOP;
/* Return -1 if we can't do anything right now */
- if (sc->gfp_mask & __GFP_WAIT)
+ if (sc->gfp_mask & __GFP_IO)
mutex_lock(&c->bucket_lock);
else if (!mutex_trylock(&c->bucket_lock))
return -1;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ba95ab84b2be..8435f81e5d85 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
pr_debug("%u journal buckets", ca->sb.njournal_buckets);
- /* Read journal buckets ordered by golden ratio hash to quickly
+ /*
+ * Read journal buckets ordered by golden ratio hash to quickly
* find a sequence of buckets with valid journal entries
*/
for (i = 0; i < ca->sb.njournal_buckets; i++) {
@@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
goto bsearch;
}
- /* If that fails, check all the buckets we haven't checked
+ /*
+ * If that fails, check all the buckets we haven't checked
* already
*/
pr_debug("falling back to linear search");
- for (l = 0; l < ca->sb.njournal_buckets; l++) {
- if (test_bit(l, bitmap))
- continue;
-
+ for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
+ l < ca->sb.njournal_buckets;
+ l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
if (read_bucket(l))
goto bsearch;
- }
+
+ if (list_empty(list))
+ continue;
bsearch:
/* Binary search */
m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
@@ -197,10 +200,12 @@ bsearch:
r = m;
}
- /* Read buckets in reverse order until we stop finding more
+ /*
+ * Read buckets in reverse order until we stop finding more
* journal entries
*/
- pr_debug("finishing up");
+ pr_debug("finishing up: m %u njournal_buckets %u",
+ m, ca->sb.njournal_buckets);
l = m;
while (1) {
@@ -228,9 +233,10 @@ bsearch:
}
}
- c->journal.seq = list_entry(list->prev,
- struct journal_replay,
- list)->j.seq;
+ if (!list_empty(list))
+ c->journal.seq = list_entry(list->prev,
+ struct journal_replay,
+ list)->j.seq;
return 0;
#undef read_bucket
@@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca)
return;
}
- switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) {
+ switch (atomic_read(&ja->discard_in_flight)) {
case DISCARD_IN_FLIGHT:
return;
@@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl)
if (cl)
BUG_ON(!closure_wait(&w->wait, cl));
+ closure_flush(&c->journal.io);
__journal_try_write(c, true);
}
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 786a1a4f74d8..71eb233b9ace 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -997,14 +997,17 @@ static void request_write(struct cached_dev *dc, struct search *s)
} else {
bch_writeback_add(dc);
- if (s->op.flush_journal) {
+ if (bio->bi_rw & REQ_FLUSH) {
/* Also need to send a flush to the backing device */
- s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
- dc->disk.bio_split);
+ struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
+ dc->disk.bio_split);
- bio->bi_size = 0;
- bio->bi_vcnt = 0;
- closure_bio_submit(bio, cl, s->d);
+ flush->bi_rw = WRITE_FLUSH;
+ flush->bi_bdev = bio->bi_bdev;
+ flush->bi_end_io = request_endio;
+ flush->bi_private = cl;
+
+ closure_bio_submit(flush, cl, s->d);
} else {
s->op.cache_bio = bio;
}
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4fe6ab2fbe2e..924dcfdae111 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -223,8 +223,13 @@ STORE(__cached_dev)
}
if (attr == &sysfs_label) {
- /* note: endlines are preserved */
- memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
+ if (size > SB_LABEL_SIZE)
+ return -EINVAL;
+ memcpy(dc->sb.label, buf, size);
+ if (size < SB_LABEL_SIZE)
+ dc->sb.label[size] = '\0';
+ if (size && dc->sb.label[size - 1] == '\n')
+ dc->sb.label[size - 1] = '\0';
bch_write_bdev_super(dc, NULL);
if (dc->disk.c) {
memcpy(dc->disk.c->uuids[dc->disk.id].label,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 98eb81159a22..420dad545c7d 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
stats->last = now ?: 1;
}
-unsigned bch_next_delay(struct ratelimit *d, uint64_t done)
+/**
+ * bch_next_delay() - increment @d by the amount of work done, and return how
+ * long to delay until the next time to do some work.
+ *
+ * @d - the struct bch_ratelimit to update
+ * @done - the amount of work done, in arbitrary units
+ *
+ * Returns the amount of time to delay by, in jiffies
+ */
+uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
{
uint64_t now = local_clock();
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 1ae2a73ad85f..ea345c6896f4 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequency_units)
(ewma) >> factor; \
})
-struct ratelimit {
+struct bch_ratelimit {
+ /* Next time we want to do some work, in nanoseconds */
uint64_t next;
+
+ /*
+ * Rate at which we want to do work, in units per nanosecond
+ * The units here correspond to the units passed to bch_next_delay()
+ */
unsigned rate;
};
-static inline void ratelimit_reset(struct ratelimit *d)
+static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
{
d->next = local_clock();
}
-unsigned bch_next_delay(struct ratelimit *d, uint64_t done);
+uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
#define __DIV_SAFE(n, d, zero) \
({ \
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 22cbff551628..ba3ee48320f2 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -94,11 +94,15 @@ static void update_writeback_rate(struct work_struct *work)
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
{
+ uint64_t ret;
+
if (atomic_read(&dc->disk.detaching) ||
!dc->writeback_percent)
return 0;
- return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
+ ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
+
+ return min_t(uint64_t, ret, HZ);
}
/* Background writeback */
@@ -208,7 +212,7 @@ normal_refill:
up_write(&dc->writeback_lock);
- ratelimit_reset(&dc->writeback_rate);
+ bch_ratelimit_reset(&dc->writeback_rate);
/* Punt to workqueue only so we don't recurse and blow the stack */
continue_at(cl, read_dirty, dirty_wq);
@@ -318,9 +322,7 @@ static void write_dirty_finish(struct closure *cl)
}
bch_keybuf_del(&dc->writeback_keys, w);
- atomic_dec_bug(&dc->in_flight);
-
- closure_wake_up(&dc->writeback_wait);
+ up(&dc->in_flight);
closure_return_with_destructor(cl, dirty_io_destructor);
}
@@ -349,7 +351,7 @@ static void write_dirty(struct closure *cl)
closure_bio_submit(&io->bio, cl, &io->dc->disk);
- continue_at(cl, write_dirty_finish, dirty_wq);
+ continue_at(cl, write_dirty_finish, system_wq);
}
static void read_dirty_endio(struct bio *bio, int error)
@@ -369,7 +371,7 @@ static void read_dirty_submit(struct closure *cl)
closure_bio_submit(&io->bio, cl, &io->dc->disk);
- continue_at(cl, write_dirty, dirty_wq);
+ continue_at(cl, write_dirty, system_wq);
}
static void read_dirty(struct closure *cl)
@@ -394,12 +396,8 @@ static void read_dirty(struct closure *cl)
if (delay > 0 &&
(KEY_START(&w->key) != dc->last_read ||
- jiffies_to_msecs(delay) > 50)) {
- w->private = NULL;
-
- closure_delay(&dc->writeback, delay);
- continue_at(cl, read_dirty, dirty_wq);
- }
+ jiffies_to_msecs(delay) > 50))
+ delay = schedule_timeout_uninterruptible(delay);
dc->last_read = KEY_OFFSET(&w->key);
@@ -424,15 +422,10 @@ static void read_dirty(struct closure *cl)
trace_bcache_writeback(&w->key);
- closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl);
+ down(&dc->in_flight);
+ closure_call(&io->cl, read_dirty_submit, NULL, cl);
delay = writeback_delay(dc, KEY_SIZE(&w->key));
-
- atomic_inc(&dc->in_flight);
-
- if (!closure_wait_event(&dc->writeback_wait, cl,
- atomic_read(&dc->in_flight) < 64))
- continue_at(cl, read_dirty, dirty_wq);
}
if (0) {
@@ -442,7 +435,11 @@ err:
bch_keybuf_del(&dc->writeback_keys, w);
}
- refill_dirty(cl);
+ /*
+ * Wait for outstanding writeback IOs to finish (and keybuf slots to be
+ * freed) before refilling again
+ */
+ continue_at(cl, refill_dirty, dirty_wq);
}
/* Init */
@@ -484,6 +481,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
void bch_cached_dev_writeback_init(struct cached_dev *dc)
{
+ sema_init(&dc->in_flight, 64);
closure_init_unlocked(&dc->writeback);
init_rwsem(&dc->writeback_lock);
@@ -513,7 +511,7 @@ void bch_writeback_exit(void)
int __init bch_writeback_init(void)
{
- dirty_wq = create_singlethread_workqueue("bcache_writeback");
+ dirty_wq = create_workqueue("bcache_writeback");
if (!dirty_wq)
return -ENOMEM;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index ea49834377c8..2a20986a2fec 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -19,8 +19,6 @@
#define DM_MSG_PREFIX "io"
#define DM_IO_MAX_REGIONS BITS_PER_LONG
-#define MIN_IOS 16
-#define MIN_BIOS 16
struct dm_io_client {
mempool_t *pool;
@@ -50,16 +48,17 @@ static struct kmem_cache *_dm_io_cache;
struct dm_io_client *dm_io_client_create(void)
{
struct dm_io_client *client;
+ unsigned min_ios = dm_get_reserved_bio_based_ios();
client = kmalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
- client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
+ client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
if (!client->pool)
goto bad;
- client->bios = bioset_create(MIN_BIOS, 0);
+ client->bios = bioset_create(min_ios, 0);
if (!client->bios)
goto bad;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index b759a127f9c3..de570a558764 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -7,6 +7,7 @@
#include <linux/device-mapper.h>
+#include "dm.h"
#include "dm-path-selector.h"
#include "dm-uevent.h"
@@ -116,8 +117,6 @@ struct dm_mpath_io {
typedef int (*action_fn) (struct pgpath *pgpath);
-#define MIN_IOS 256 /* Mempool size */
-
static struct kmem_cache *_mpio_cache;
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
@@ -190,6 +189,7 @@ static void free_priority_group(struct priority_group *pg,
static struct multipath *alloc_multipath(struct dm_target *ti)
{
struct multipath *m;
+ unsigned min_ios = dm_get_reserved_rq_based_ios();
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (m) {
@@ -202,7 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
INIT_WORK(&m->trigger_event, trigger_event);
init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
- m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
+ m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
return NULL;
@@ -1268,6 +1268,7 @@ static int noretry_error(int error)
case -EREMOTEIO:
case -EILSEQ:
case -ENODATA:
+ case -ENOSPC:
return 1;
}
@@ -1298,8 +1299,17 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!error && !clone->errors)
return 0; /* I/O complete */
- if (noretry_error(error))
+ if (noretry_error(error)) {
+ if ((clone->cmd_flags & REQ_WRITE_SAME) &&
+ !clone->q->limits.max_write_same_sectors) {
+ struct queue_limits *limits;
+
+ /* device doesn't really support WRITE SAME, disable it */
+ limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
+ limits->max_write_same_sectors = 0;
+ }
return error;
+ }
if (mpio->pgpath)
fail_path(mpio->pgpath);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3ac415675b6c..4caa8e6d59d7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
*/
INIT_WORK_ONSTACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work);
- flush_work(&req.work);
+ flush_workqueue(ps->metadata_wq);
return req.result;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c434e5aab2df..aec57d76db5d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -725,17 +725,16 @@ static int calc_max_buckets(void)
*/
static int init_hash_tables(struct dm_snapshot *s)
{
- sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
+ sector_t hash_size, cow_dev_size, max_buckets;
/*
* Calculate based on the size of the original volume or
* the COW volume...
*/
cow_dev_size = get_dev_size(s->cow->bdev);
- origin_dev_size = get_dev_size(s->origin->bdev);
max_buckets = calc_max_buckets();
- hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
+ hash_size = cow_dev_size >> s->store->chunk_shift;
hash_size = min(hash_size, max_buckets);
if (hash_size < 64)
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8ae31e8d3d64..3d404c1371ed 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
struct dm_stat_percpu *p;
/*
- * For strict correctness we should use local_irq_disable/enable
+ * For strict correctness we should use local_irq_save/restore
* instead of preempt_disable/enable.
*
- * This is racy if the driver finishes bios from non-interrupt
- * context as well as from interrupt context or from more different
- * interrupts.
+ * preempt_disable/enable is racy if the driver finishes bios
+ * from non-interrupt context as well as from interrupt context
+ * or from more different interrupts.
*
- * However, the race only results in not counting some events,
- * so it is acceptable.
+ * On 64-bit architectures the race only results in not counting some
+ * events, so it is acceptable. On 32-bit architectures the race could
+ * cause the counter going off by 2^32, so we need to do proper locking
+ * there.
*
* part_stat_lock()/part_stat_unlock() have this race too.
*/
+#if BITS_PER_LONG == 32
+ unsigned long flags;
+ local_irq_save(flags);
+#else
preempt_disable();
+#endif
p = &s->stat_percpu[smp_processor_id()][entry];
if (!end) {
@@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
p->ticks[idx] += duration;
}
+#if BITS_PER_LONG == 32
+ local_irq_restore(flags);
+#else
preempt_enable();
+#endif
}
static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ed063427d676..2c0cf511ec23 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2095,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
* them down to the data device. The thin device's discard
* processing will cause mappings to be removed from the btree.
*/
+ ti->discard_zeroes_data_unsupported = true;
if (pf.discard_enabled && pf.discard_passdown) {
ti->num_discard_bios = 1;
@@ -2104,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
* thin devices' discard limits consistent).
*/
ti->discards_supported = true;
- ti->discard_zeroes_data_unsupported = true;
}
ti->private = pt;
@@ -2689,8 +2689,16 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
* They get transferred to the live pool in bind_control_target()
* called from pool_preresume().
*/
- if (!pt->adjusted_pf.discard_enabled)
+ if (!pt->adjusted_pf.discard_enabled) {
+ /*
+ * Must explicitly disallow stacking discard limits otherwise the
+ * block layer will stack them if pool's data device has support.
+ * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
+ * user to see that, so make sure to set all discard limits to 0.
+ */
+ limits->discard_granularity = 0;
return;
+ }
disable_passdown_if_not_supported(pt);
@@ -2826,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
/* In case the pool supports discards, pass them on. */
+ ti->discard_zeroes_data_unsupported = true;
if (tc->pool->pf.discard_enabled) {
ti->discards_supported = true;
ti->num_discard_bios = 1;
- ti->discard_zeroes_data_unsupported = true;
/* Discard bios must be split on a block boundary */
ti->split_discard_bios = true;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6a5e9ed2fcc3..b3e26c7d1417 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -211,10 +211,55 @@ struct dm_md_mempools {
struct bio_set *bs;
};
-#define MIN_IOS 256
+#define RESERVED_BIO_BASED_IOS 16
+#define RESERVED_REQUEST_BASED_IOS 256
+#define RESERVED_MAX_IOS 1024
static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache;
+/*
+ * Bio-based DM's mempools' reserved IOs set by the user.
+ */
+static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
+
+/*
+ * Request-based DM's mempools' reserved IOs set by the user.
+ */
+static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
+
+static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
+ unsigned def, unsigned max)
+{
+ unsigned ios = ACCESS_ONCE(*reserved_ios);
+ unsigned modified_ios = 0;
+
+ if (!ios)
+ modified_ios = def;
+ else if (ios > max)
+ modified_ios = max;
+
+ if (modified_ios) {
+ (void)cmpxchg(reserved_ios, ios, modified_ios);
+ ios = modified_ios;
+ }
+
+ return ios;
+}
+
+unsigned dm_get_reserved_bio_based_ios(void)
+{
+ return __dm_get_reserved_ios(&reserved_bio_based_ios,
+ RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
+}
+EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
+
+unsigned dm_get_reserved_rq_based_ios(void)
+{
+ return __dm_get_reserved_ios(&reserved_rq_based_ios,
+ RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
+}
+EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
+
static int __init local_init(void)
{
int r = -ENOMEM;
@@ -2278,6 +2323,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
}
/*
+ * The queue_limits are only valid as long as you have a reference
+ * count on 'md'.
+ */
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
+{
+ BUG_ON(!atomic_read(&md->holders));
+ return &md->queue->limits;
+}
+EXPORT_SYMBOL_GPL(dm_get_queue_limits);
+
+/*
* Fully initialize a request-based queue (->elevator, ->request_fn, etc).
*/
static int dm_init_request_based_queue(struct mapped_device *md)
@@ -2862,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
if (type == DM_TYPE_BIO_BASED) {
cachep = _io_cache;
- pool_size = 16;
+ pool_size = dm_get_reserved_bio_based_ios();
front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
} else if (type == DM_TYPE_REQUEST_BASED) {
cachep = _rq_tio_cache;
- pool_size = MIN_IOS;
+ pool_size = dm_get_reserved_rq_based_ios();
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_bio_data_size is not used. See __bind_mempools(). */
WARN_ON(per_bio_data_size != 0);
} else
goto out;
- pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep);
+ pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
if (!pools->io_pool)
goto out;
@@ -2924,6 +2980,13 @@ module_exit(dm_exit);
module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
+
+module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
+
+module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
+
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 5e604cc7b4aa..1d1ad7b7e527 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -184,6 +184,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
* Helpers that are used by DM core
*/
+unsigned dm_get_reserved_bio_based_ios(void);
+unsigned dm_get_reserved_rq_based_ios(void);
+
static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
{
return !maxlen || strlen(result) + 1 >= maxlen;
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index d0fdc134068a..f6ff711aa5bb 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -57,6 +57,7 @@ void mei_amthif_reset_params(struct mei_device *dev)
dev->iamthif_ioctl = false;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
dev->iamthif_timer = 0;
+ dev->iamthif_stall_timer = 0;
}
/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 6d0282c08a06..cd2033cd7120 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -297,10 +297,13 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
if (cl->reading_state != MEI_READ_COMPLETE &&
!waitqueue_active(&cl->rx_wait)) {
+
mutex_unlock(&dev->device_lock);
if (wait_event_interruptible(cl->rx_wait,
- (MEI_READ_COMPLETE == cl->reading_state))) {
+ cl->reading_state == MEI_READ_COMPLETE ||
+ mei_cl_is_transitioning(cl))) {
+
if (signal_pending(current))
return -EINTR;
return -ERESTARTSYS;
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 9eb031e92070..892cc4207fa2 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -90,6 +90,12 @@ static inline bool mei_cl_is_connected(struct mei_cl *cl)
cl->dev->dev_state == MEI_DEV_ENABLED &&
cl->state == MEI_FILE_CONNECTED);
}
+static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
+{
+ return (MEI_FILE_INITIALIZING == cl->state ||
+ MEI_FILE_DISCONNECTED == cl->state ||
+ MEI_FILE_DISCONNECTING == cl->state);
+}
bool mei_cl_is_other_connecting(struct mei_cl *cl);
int mei_cl_disconnect(struct mei_cl *cl);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 6127ab64bb39..0a0448326e9d 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -35,11 +35,15 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
struct mei_me_client *clients;
int b;
+ dev->me_clients_num = 0;
+ dev->me_client_presentation_num = 0;
+ dev->me_client_index = 0;
+
/* count how many ME clients we have */
for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
dev->me_clients_num++;
- if (dev->me_clients_num <= 0)
+ if (dev->me_clients_num == 0)
return;
kfree(dev->me_clients);
@@ -221,7 +225,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
struct hbm_props_request *prop_req;
const size_t len = sizeof(struct hbm_props_request);
unsigned long next_client_index;
- u8 client_num;
+ unsigned long client_num;
client_num = dev->me_client_presentation_num;
@@ -677,8 +681,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
dev->hbm_state == MEI_HBM_ENUM_CLIENTS) {
dev->init_clients_timer = 0;
- dev->me_client_presentation_num = 0;
- dev->me_client_index = 0;
mei_hbm_me_cl_allocate(dev);
dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 92c73118b13c..6197018e2f16 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -175,6 +175,9 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
}
+ /* we're already in reset, cancel the init timer */
+ dev->init_clients_timer = 0;
+
dev->me_clients_num = 0;
dev->rd_msg_hdr = 0;
dev->wd_pending = false;
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 173ff095be0d..cabeddd66c1f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -249,19 +249,16 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
mutex_unlock(&dev->device_lock);
if (wait_event_interruptible(cl->rx_wait,
- (MEI_READ_COMPLETE == cl->reading_state ||
- MEI_FILE_INITIALIZING == cl->state ||
- MEI_FILE_DISCONNECTED == cl->state ||
- MEI_FILE_DISCONNECTING == cl->state))) {
+ MEI_READ_COMPLETE == cl->reading_state ||
+ mei_cl_is_transitioning(cl))) {
+
if (signal_pending(current))
return -EINTR;
return -ERESTARTSYS;
}
mutex_lock(&dev->device_lock);
- if (MEI_FILE_INITIALIZING == cl->state ||
- MEI_FILE_DISCONNECTED == cl->state ||
- MEI_FILE_DISCONNECTING == cl->state) {
+ if (mei_cl_is_transitioning(cl)) {
rets = -EBUSY;
goto out;
}
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 7b918b2fb894..456b322013e2 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -396,9 +396,9 @@ struct mei_device {
struct mei_me_client *me_clients; /* Note: memory has to be allocated */
DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
- u8 me_clients_num;
- u8 me_client_presentation_num;
- u8 me_client_index;
+ unsigned long me_clients_num;
+ unsigned long me_client_presentation_num;
+ unsigned long me_client_index;
struct mei_cl wd_cl;
enum mei_wd_states wd_state;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5db900d917f9..dd03dfdfb0d6 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1236,7 +1236,6 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
static struct of_device_id pxa3xx_nand_dt_ids[] = {
{
.compatible = "marvell,pxa3xx-nand",
@@ -1284,12 +1283,6 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
return 0;
}
-#else
-static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev)
-{
- return 0;
-}
-#endif
static int pxa3xx_nand_probe(struct platform_device *pdev)
{
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 91f179d5135c..f428ef574372 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1472,7 +1472,7 @@ void bond_alb_monitor(struct work_struct *work)
bond_info->lp_counter++;
/* send learning packets */
- if (bond_info->lp_counter >= BOND_ALB_LP_TICKS) {
+ if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
/* change of curr_active_slave involves swapping of mac addresses.
* in order to avoid this swapping from happening while
* sending the learning packets, the curr_slave_lock must be held for
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 28d8e4c7dc06..c5eff5dafdfe 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -36,14 +36,15 @@ struct slave;
* Used for division - never set
* to zero !!!
*/
-#define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of
- * learning packets to the switch
- */
+#define BOND_ALB_DEFAULT_LP_INTERVAL 1
+#define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval) /* In seconds, periodic send of
+ * learning packets to the switch
+ */
#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
* ALB_TIMER_TICKS_PER_SEC)
-#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \
+#define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \
* ALB_TIMER_TICKS_PER_SEC)
#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 72df399c4ab3..55bbb8b8200c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4416,6 +4416,7 @@ static int bond_check_params(struct bond_params *params)
params->all_slaves_active = all_slaves_active;
params->resend_igmp = resend_igmp;
params->min_links = min_links;
+ params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
if (primary) {
strncpy(params->primary, primary, IFNAMSIZ);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index eeab40b01b7a..c29b836749b6 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1699,6 +1699,44 @@ out:
static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
bonding_show_resend_igmp, bonding_store_resend_igmp);
+
+static ssize_t bonding_show_lp_interval(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+ return sprintf(buf, "%d\n", bond->params.lp_interval);
+}
+
+static ssize_t bonding_store_lp_interval(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bonding *bond = to_bond(d);
+ int new_value, ret = count;
+
+ if (sscanf(buf, "%d", &new_value) != 1) {
+ pr_err("%s: no lp interval value specified.\n",
+ bond->dev->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (new_value <= 0) {
+ pr_err ("%s: lp_interval must be between 1 and %d\n",
+ bond->dev->name, INT_MAX);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bond->params.lp_interval = new_value;
+out:
+ return ret;
+}
+
+static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
+ bonding_show_lp_interval, bonding_store_lp_interval);
+
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
&dev_attr_mode.attr,
@@ -1729,6 +1767,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_all_slaves_active.attr,
&dev_attr_resend_igmp.attr,
&dev_attr_min_links.attr,
+ &dev_attr_lp_interval.attr,
NULL,
};
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 7ad8bd5cc947..03cf3fd14490 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -176,6 +176,7 @@ struct bond_params {
int tx_queues;
int all_slaves_active;
int resend_igmp;
+ int lp_interval;
};
struct bond_parm_tbl {
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index e66684a438f5..75fb1d20d6fd 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -530,7 +530,7 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
if (lp->wol && !lp->irq_wake_requested) {
/* register wake irq handler */
rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
- IRQF_DISABLED, "EMAC_WAKE", dev);
+ 0, "EMAC_WAKE", dev);
if (rc)
return rc;
lp->irq_wake_requested = true;
@@ -1686,7 +1686,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
/* now, enable interrupts */
/* register irq handler */
rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
- IRQF_DISABLED, "EMAC_RX", ndev);
+ 0, "EMAC_RX", ndev);
if (rc) {
dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
rc = -EBUSY;
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index d6b20296b8e4..3d8c6b2cdea4 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -358,7 +358,7 @@ static int __init lance_probe( struct net_device *dev)
REGA(CSR0) = CSR0_STOP;
- if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) {
+ if (request_irq(LANCE_IRQ, lance_interrupt, 0, "SUN3 Lance", dev) < 0) {
#ifdef CONFIG_SUN3
iounmap((void __iomem *)ioaddr);
#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 027398ebbba6..fc95b235e210 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1188,7 +1188,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct alx_priv *alx;
struct alx_hw *hw;
bool phy_configured;
- int bars, pm_cap, err;
+ int bars, err;
err = pci_enable_device_mem(pdev);
if (err)
@@ -1225,18 +1225,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
- pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (pm_cap == 0) {
+ if (!pdev->pm_cap) {
dev_err(&pdev->dev,
"Can't find power management capability, aborting\n");
err = -EIO;
goto out_pci_release;
}
- err = pci_set_power_state(pdev, PCI_D0);
- if (err)
- goto out_pci_release;
-
netdev = alloc_etherdev(sizeof(*alx));
if (!netdev) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index eec0af45b859..249468f95365 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -157,6 +157,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
if (++ring->end >= BGMAC_TX_RING_SLOTS)
ring->end = 0;
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
+ ring->index_base +
ring->end * sizeof(struct bgmac_dma_desc));
/* Always keep one slot free to allow detecting bugged calls. */
@@ -181,6 +182,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
/* The last slot that hardware didn't consume yet */
empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
empty_slot &= BGMAC_DMA_TX_STATDPTR;
+ empty_slot -= ring->index_base;
+ empty_slot &= BGMAC_DMA_TX_STATDPTR;
empty_slot /= sizeof(struct bgmac_dma_desc);
while (ring->start != empty_slot) {
@@ -274,6 +277,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
end_slot &= BGMAC_DMA_RX_STATDPTR;
+ end_slot -= ring->index_base;
+ end_slot &= BGMAC_DMA_RX_STATDPTR;
end_slot /= sizeof(struct bgmac_dma_desc);
ring->end = end_slot;
@@ -418,9 +423,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
ring = &bgmac->tx_ring[i];
ring->num_slots = BGMAC_TX_RING_SLOTS;
ring->mmio_base = ring_base[i];
- if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
- bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
- ring->mmio_base);
/* Alloc ring of descriptors */
size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -435,6 +437,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
if (ring->dma_base & 0xC0000000)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+ ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
+ BGMAC_DMA_RING_TX);
+ if (ring->unaligned)
+ ring->index_base = lower_32_bits(ring->dma_base);
+ else
+ ring->index_base = 0;
+
/* No need to alloc TX slots yet */
}
@@ -444,9 +453,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
ring = &bgmac->rx_ring[i];
ring->num_slots = BGMAC_RX_RING_SLOTS;
ring->mmio_base = ring_base[i];
- if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
- bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
- ring->mmio_base);
/* Alloc ring of descriptors */
size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -462,6 +468,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
if (ring->dma_base & 0xC0000000)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+ ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
+ BGMAC_DMA_RING_RX);
+ if (ring->unaligned)
+ ring->index_base = lower_32_bits(ring->dma_base);
+ else
+ ring->index_base = 0;
+
/* Alloc RX slots */
for (j = 0; j < ring->num_slots; j++) {
err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
@@ -489,12 +502,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
ring = &bgmac->tx_ring[i];
- /* We don't implement unaligned addressing, so enable first */
- bgmac_dma_tx_enable(bgmac, ring);
+ if (!ring->unaligned)
+ bgmac_dma_tx_enable(bgmac, ring);
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
lower_32_bits(ring->dma_base));
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
upper_32_bits(ring->dma_base));
+ if (ring->unaligned)
+ bgmac_dma_tx_enable(bgmac, ring);
ring->start = 0;
ring->end = 0; /* Points the slot that should *not* be read */
@@ -505,12 +520,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
ring = &bgmac->rx_ring[i];
- /* We don't implement unaligned addressing, so enable first */
- bgmac_dma_rx_enable(bgmac, ring);
+ if (!ring->unaligned)
+ bgmac_dma_rx_enable(bgmac, ring);
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
lower_32_bits(ring->dma_base));
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
upper_32_bits(ring->dma_base));
+ if (ring->unaligned)
+ bgmac_dma_rx_enable(bgmac, ring);
for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
j++, dma_desc++) {
@@ -531,6 +548,7 @@ static void bgmac_dma_init(struct bgmac *bgmac)
}
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
+ ring->index_base +
ring->num_slots * sizeof(struct bgmac_dma_desc));
ring->start = 0;
@@ -908,10 +926,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
u8 et_swtype = 0;
u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
- BGMAC_CHIPCTL_1_IF_TYPE_RMII;
- char buf[2];
+ BGMAC_CHIPCTL_1_IF_TYPE_MII;
+ char buf[4];
- if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
+ if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
if (kstrtou8(buf, 0, &et_swtype))
bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
buf);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 98d4b5fcc070..66c8afbdc8c7 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -333,7 +333,7 @@
#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030
#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000
-#define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010
+#define BGMAC_CHIPCTL_1_IF_TYPE_MII 0x00000010
#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020
#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0
#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
@@ -384,6 +384,8 @@ struct bgmac_dma_ring {
u16 mmio_base;
struct bgmac_dma_desc *cpu_base;
dma_addr_t dma_base;
+ u32 index_base; /* Used for unaligned rings only, otherwise 0 */
+ bool unaligned;
struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0c338026ce01..97b3d32a98bd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -246,8 +246,37 @@ enum {
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
-#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
+/* use a value high enough to be above all the PFs, which has least significant
+ * nibble as 8, so when cnic needs to come up with a CID for UIO to use to
+ * calculate doorbell address according to old doorbell configuration scheme
+ * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number
+ * We must avoid coming up with cid 8 for iscsi since according to this method
+ * the designated UIO cid will come out 0 and it has a special handling for that
+ * case which doesn't suit us. Therefore will will cieling to closes cid which
+ * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18.
+ */
+
+#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
(bp)->max_cos)
+/* amount of cids traversed by UIO's DPM addition to doorbell */
+#define UIO_DPM 8
+/* roundup to DPM offset */
+#define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
+ UIO_DPM))
+/* offset to nearest value which has lsb nibble matching DPM */
+#define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \
+ (UIO_DPM * 2))
+/* add offset to rounded-up cid to get a value which could be used with UIO */
+#define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp))
+/* but wait - avoid UIO special case for cid 0 */
+#define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \
+ (UIO_DPM_ALIGN(bp) == UIO_DPM))
+/* Properly DPM aligned CID dajusted to cid 0 secal case */
+#define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \
+ (UIO_DPM_CID0_OFFSET(bp)))
+/* how many cids were wasted - need this value for cid allocation */
+#define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \
+ BNX2X_1st_NON_L2_ETH_CID(bp))
/* iSCSI L2 */
#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
/* FCoE L2 */
@@ -1542,7 +1571,6 @@ struct bnx2x {
*/
bool fcoe_init;
- int pm_cap;
int mrrs;
struct delayed_work sp_task;
@@ -1681,10 +1709,11 @@ struct bnx2x {
* Maximum CID count that might be required by the bnx2x:
* Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
+
#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
- + 2 * CNIC_SUPPORT(bp))
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
- + 2 * CNIC_SUPPORT(bp))
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 90045c920d09..61726af1de6e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3008,16 +3008,16 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
u16 pmcsr;
/* If there is no power capability, silently succeed */
- if (!bp->pm_cap) {
+ if (!bp->pdev->pm_cap) {
BNX2X_DEV_INFO("No power capability. Breaking.\n");
return 0;
}
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
switch (state) {
case PCI_D0:
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
PCI_PM_CTRL_PME_STATUS));
@@ -3041,7 +3041,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
if (bp->wol)
pmcsr |= PCI_PM_CTRL_PME_ENABLE;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
pmcsr);
/* No more memory access after this point until
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 2612e3c715d4..324de5f05332 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1387,9 +1387,9 @@ static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
u16 pm = 0;
struct net_device *dev = pci_get_drvdata(bp->pdev);
- if (bp->pm_cap)
+ if (bp->pdev->pm_cap)
rc = pci_read_config_word(bp->pdev,
- bp->pm_cap + PCI_PM_CTRL, &pm);
+ bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
if ((rc && !netif_running(dev)) ||
(!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2f8dbbbd7a86..a6704b555042 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8652,6 +8652,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
else if (bp->wol) {
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u8 *mac_addr = bp->dev->dev_addr;
+ struct pci_dev *pdev = bp->pdev;
u32 val;
u16 pmc;
@@ -8668,9 +8669,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
/* Enable the PME and clear the status */
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
@@ -10399,7 +10400,7 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
break;
}
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
BNX2X_DEV_INFO("%sWoL capable\n",
@@ -12141,8 +12142,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
}
if (IS_PF(bp)) {
- bp->pm_cap = pdev->pm_cap;
- if (bp->pm_cap == 0) {
+ if (!pdev->pm_cap) {
dev_err(&bp->pdev->dev,
"Cannot find power management capability, aborting\n");
rc = -EIO;
@@ -13632,6 +13632,10 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp)
cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+ DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
+ BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
+ cp->iscsi_l2_cid);
+
if (NO_ISCSI_OOO(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
}
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 8142480d9770..99394bd49a13 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -3135,6 +3135,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
@@ -3146,7 +3147,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
CNIC_WR16(dev, cp->kcq1.io_addr,
cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
- if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
+ if (!CNIC_SUPPORTS_FCOE(bp)) {
cp->arm_int(dev, status_idx);
break;
}
@@ -5217,7 +5218,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
"iSCSI CLIENT_SETUP did not complete\n");
cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
cnic_ring_ctl(dev, cid, cli, 1);
- *cid_ptr = cid;
+ *cid_ptr = cid >> 4;
+ *(cid_ptr + 1) = cid * bp->db_size;
}
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5701f3d1a169..12d961c4ebca 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3034,6 +3034,7 @@ static bool tg3_phy_led_bug(struct tg3 *tp)
{
switch (tg3_asic_rev(tp)) {
case ASIC_REV_5719:
+ case ASIC_REV_5720:
if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
!tp->pci_fn)
return true;
@@ -16192,12 +16193,12 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* So explicitly force the chip into D0 here.
*/
pci_read_config_dword(tp->pdev,
- tp->pm_cap + PCI_PM_CTRL,
+ tp->pdev->pm_cap + PCI_PM_CTRL,
&pm_reg);
pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
pci_write_config_dword(tp->pdev,
- tp->pm_cap + PCI_PM_CTRL,
+ tp->pdev->pm_cap + PCI_PM_CTRL,
pm_reg);
/* Also, force SERR#/PERR# in PCI command. */
@@ -17346,7 +17347,6 @@ static int tg3_init_one(struct pci_dev *pdev,
tp = netdev_priv(dev);
tp->pdev = pdev;
tp->dev = dev;
- tp->pm_cap = pdev->pm_cap;
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
tp->irq_sync = 1;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index ddb8be1298ea..70257808aa37 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3234,7 +3234,6 @@ struct tg3 {
u8 pci_lat_timer;
int pci_fn;
- int pm_cap;
int msi_cap;
int pcix_cap;
int pcie_readrq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d0665ca6f19..c73cabdbd4c0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6149,8 +6149,10 @@ static int __init cxgb4_init_module(void)
pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
- if (ret < 0)
+ if (ret < 0) {
debugfs_remove(cxgb4_debugfs_root);
+ destroy_workqueue(workq);
+ }
register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 2db6c573cec7..263b92c00cbf 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1321,7 +1321,7 @@ de4x5_open(struct net_device *dev)
if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
lp->adapter_name, dev)) {
printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
- if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
lp->adapter_name, dev)) {
printk("\n Cannot get IRQ- reconfigure your hardware.\n");
disable_ast(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3224d28cdad4..100b528b9bd0 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2802,7 +2802,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
struct be_resources res = {0};
struct be_vf_cfg *vf_cfg;
u32 cap_flags, en_flags, vf;
- int status;
+ int status = 0;
cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f9aacf5d8523..b2793b91cc55 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2199,7 +2199,7 @@ fec_probe(struct platform_device *pdev)
goto failed_irq;
}
ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
- IRQF_DISABLED, pdev->name, ndev);
+ 0, pdev->name, ndev);
if (ret)
goto failed_irq;
}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index e3c7c697fc45..91227d03274e 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1097,7 +1097,7 @@ static int hp100_open(struct net_device *dev)
/* New: if bus is PCI or EISA, interrupts might be shared interrupts */
if (request_irq(dev->irq, hp100_interrupt,
lp->bus == HP100_BUS_PCI || lp->bus ==
- HP100_BUS_EISA ? IRQF_SHARED : IRQF_DISABLED,
+ HP100_BUS_EISA ? IRQF_SHARED : 0,
"hp100", dev)) {
printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
return -EAGAIN;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 35853b43d66e..2d1c6bdd3618 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -102,6 +102,19 @@ static int ehea_probe_adapter(struct platform_device *dev);
static int ehea_remove(struct platform_device *dev);
+static struct of_device_id ehea_module_device_table[] = {
+ {
+ .name = "lhea",
+ .compatible = "IBM,lhea",
+ },
+ {
+ .type = "network",
+ .compatible = "IBM,lhea-ethernet",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehea_module_device_table);
+
static struct of_device_id ehea_device_table[] = {
{
.name = "lhea",
@@ -109,7 +122,6 @@ static struct of_device_id ehea_device_table[] = {
},
{},
};
-MODULE_DEVICE_TABLE(of, ehea_device_table);
static struct platform_driver ehea_driver = {
.driver = {
@@ -1285,7 +1297,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
ehea_qp_aff_irq_handler,
- IRQF_DISABLED, port->int_aff_name, port);
+ 0, port->int_aff_name, port);
if (ret) {
netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
port->qp_eq->attr.ist1);
@@ -1303,8 +1315,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
"%s-queue%d", dev->name, i);
ret = ibmebus_request_irq(pr->eq->attr.ist1,
ehea_recv_irq_handler,
- IRQF_DISABLED, pr->int_send_name,
- pr);
+ 0, pr->int_send_name, pr);
if (ret) {
netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
i, pr->eq->attr.ist1);
@@ -3320,7 +3331,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
}
ret = ibmebus_request_irq(adapter->neq->attr.ist1,
- ehea_interrupt_neq, IRQF_DISABLED,
+ ehea_interrupt_neq, 0,
"ehea_neq", adapter);
if (ret) {
dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index a8633b8f0ac5..d14c8f53384c 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -922,6 +922,14 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
else
mask &= ~(1 << 30);
}
+ if (mac->type == e1000_pch2lan) {
+ /* SHRAH[0,1,2] different than previous */
+ if (i == 7)
+ mask &= 0xFFF4FFFF;
+ /* SHRAH[3] different than SHRAH[0,1,2] */
+ if (i == 10)
+ mask |= (1 << 30);
+ }
REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
0xFFFFFFFF);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index af08188d7e62..42f0f6717511 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1371,7 +1371,10 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
return;
}
- if (index < hw->mac.rar_entry_count) {
+ /* RAR[1-6] are owned by manageability. Skip those and program the
+ * next address into the SHRA register array.
+ */
+ if (index < (u32)(hw->mac.rar_entry_count - 6)) {
s32 ret_val;
ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1962,8 +1965,8 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
- /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
- for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
+ for (i = 0; i < (hw->mac.rar_entry_count); i++) {
mac_reg = er32(RAL(i));
hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
(u16)(mac_reg & 0xFFFF));
@@ -2007,10 +2010,10 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
if (enable) {
- /* Write Rx addresses (rar_entry_count for RAL/H, +4 for
+ /* Write Rx addresses (rar_entry_count for RAL/H, and
* SHRAL/H) and initial CRC values to the MAC
*/
- for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
u8 mac_addr[ETH_ALEN] = { 0 };
u32 addr_high, addr_low;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 59865695b282..217090df33e7 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -98,7 +98,7 @@
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
-#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */
#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
#define PHY_PAGE_SHIFT 5
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e87e9b01f404..4ef786775acb 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4868,7 +4868,7 @@ static void e1000_watchdog_task(struct work_struct *work)
*/
if ((hw->phy.type == e1000_phy_igp_3 ||
hw->phy.type == e1000_phy_bm) &&
- (hw->mac.autoneg == true) &&
+ hw->mac.autoneg &&
(adapter->link_speed == SPEED_10 ||
adapter->link_speed == SPEED_100) &&
(adapter->link_duplex == HALF_DUPLEX)) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 79b58353d849..47c2d10df826 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -719,6 +719,10 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
u32 ctrl_ext;
u32 mdic;
+ /* Extra read required for some PHY's on i354 */
+ if (hw->mac.type == e1000_i354)
+ igb_get_phy_id(hw);
+
/* For SGMII PHYs, we try the list of possible addresses until
* we find one that works. For non-SGMII PHYs
* (e.g. integrated copper PHYs), an address of 1 should
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index f0dfd41dd4bd..298f0ed50670 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -712,6 +712,7 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
static s32 igb_set_default_fc(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ u16 lan_offset;
u16 nvm_data;
/* Read and store word 0x0F of the EEPROM. This word contains bits
@@ -722,7 +723,14 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
* control setting, then the variable hw->fc will
* be initialized based on a value in the EEPROM.
*/
- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+ if (hw->mac.type == e1000_i350) {
+ lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
+ + lan_offset, 1, &nvm_data);
+ } else {
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
+ 1, &nvm_data);
+ }
if (ret_val) {
hw_dbg("NVM Read Error\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0e1b973659b0..e8649abf97c0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -160,6 +160,13 @@ static int ixgbe_get_settings(struct net_device *netdev,
bool autoneg = false;
bool link_up;
+ /* SFP type is needed for get_link_capabilities */
+ if (hw->phy.media_type & (ixgbe_media_type_fiber |
+ ixgbe_media_type_fiber_qsfp)) {
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ hw->phy.ops.identify_sfp(hw);
+ }
+
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
/* set the supported link speeds */
@@ -186,6 +193,11 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->advertising |= ADVERTISED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
ecmd->advertising |= ADVERTISED_100baseT_Full;
+
+ if (hw->phy.multispeed_fiber && !autoneg) {
+ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
}
if (autoneg) {
@@ -314,6 +326,14 @@ static int ixgbe_set_settings(struct net_device *netdev,
if (ecmd->advertising & ~ecmd->supported)
return -EINVAL;
+ /* only allow one speed at a time if no autoneg */
+ if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
+ if (ecmd->advertising ==
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full))
+ return -EINVAL;
+ }
+
old = hw->phy.autoneg_advertised;
advertised = 0;
if (ecmd->advertising & ADVERTISED_10000baseT_Full)
@@ -1805,6 +1825,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
unsigned int size = 1024;
netdev_tx_t tx_ret_val;
struct sk_buff *skb;
+ u32 flags_orig = adapter->flags;
+
+ /* DCB can modify the frames on Tx */
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
/* allocate test skb */
skb = alloc_skb(size, GFP_KERNEL);
@@ -1857,6 +1881,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
/* free the original skb */
kfree_skb(skb);
+ adapter->flags = flags_orig;
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7aba452833e5..0ade0cd5ef53 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3571,7 +3571,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int i;
- u32 rxctrl;
+ u32 rxctrl, rfctl;
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -3580,6 +3580,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
ixgbe_setup_psrtype(adapter);
ixgbe_setup_rdrxctl(adapter);
+ /* RSC Setup */
+ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+ rfctl &= ~IXGBE_RFCTL_RSC_DIS;
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ rfctl |= IXGBE_RFCTL_RSC_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
+
/* Program registers for the distribution of queues */
ixgbe_setup_mrqc(adapter);
@@ -5993,8 +6000,16 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
speed = hw->phy.autoneg_advertised;
- if ((!speed) && (hw->mac.ops.get_link_capabilities))
+ if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
+
+ /* setup the highest link when no autoneg */
+ if (!autoneg) {
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ speed = IXGBE_LINK_SPEED_10GB_FULL;
+ }
+ }
+
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, speed, true);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6442cf8f9dce..10775cb9b6d8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1861,6 +1861,7 @@ enum {
#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
+#define IXGBE_RFCTL_RSC_DIS 0x00000020
#define IXGBE_RFCTL_NFSW_DIS 0x00000040
#define IXGBE_RFCTL_NFSR_DIS 0x00000080
#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index bfdb06860397..6a6c1f76d8e0 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -282,8 +282,7 @@ ltq_etop_hw_init(struct net_device *dev)
if (IS_TX(i)) {
ltq_dma_alloc_tx(&ch->dma);
- request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
- "etop_tx", priv);
+ request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
} else if (IS_RX(i)) {
ltq_dma_alloc_rx(&ch->dma);
for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
@@ -291,8 +290,7 @@ ltq_etop_hw_init(struct net_device *dev)
if (ltq_etop_alloc_skb(ch))
return -ENOMEM;
ch->dma.desc = 0;
- request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
- "etop_rx", priv);
+ request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
}
ch->dma.irq = irq;
}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 4ae0c7426010..fff62460185c 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1123,8 +1123,7 @@ static int pxa168_eth_open(struct net_device *dev)
struct pxa168_eth_private *pep = netdev_priv(dev);
int err;
- err = request_irq(dev->irq, pxa168_eth_int_handler,
- IRQF_DISABLED, dev->name, dev);
+ err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
if (err) {
dev_err(&dev->dev, "can't assign irq\n");
return -EAGAIN;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index ef94a591f9e5..1a9c4f6269ea 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3092,6 +3092,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
if (!nskb)
goto resubmit;
+ skb = e->skb;
+ prefetch(skb->data);
+
if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
dev_kfree_skb(nskb);
goto resubmit;
@@ -3101,8 +3104,6 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
- skb = e->skb;
- prefetch(skb->data);
}
skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a28cd801a236..0c750985f47e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -53,9 +53,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_cq[i].moder_cnt = priv->tx_frames;
priv->tx_cq[i].moder_time = priv->tx_usecs;
- err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
- if (err)
- return err;
+ if (priv->port_up) {
+ err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
+ if (err)
+ return err;
+ }
}
if (priv->adaptive_rx_coal)
@@ -65,9 +67,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
priv->rx_cq[i].moder_cnt = priv->rx_frames;
priv->rx_cq[i].moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
- err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
- if (err)
- return err;
+ if (priv->port_up) {
+ err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+ if (err)
+ return err;
+ }
}
return err;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 0fba1532d326..075f4e21d33d 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -915,7 +915,7 @@ static int ks_net_open(struct net_device *netdev)
struct ks_net *ks = netdev_priv(netdev);
int err;
-#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
+#define KS_INT_FLAGS IRQF_TRIGGER_LOW
/* lock the card, even if we may not actually do anything
* else at the moment.
*/
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index c20766c2f65b..79257f71c5d9 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -83,8 +83,7 @@ static int jazzsonic_open(struct net_device* dev)
{
int retval;
- retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
- "sonic", dev);
+ retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, dev->irq);
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index c2e0256fe3df..4da172ac5599 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -95,8 +95,7 @@ static int xtsonic_open(struct net_device *dev)
{
int retval;
- retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
- "sonic", dev);
+ retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, dev->irq);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index c498181a9aa8..5b65356e7568 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1219,7 +1219,7 @@ static int pasemi_mac_open(struct net_device *dev)
snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
dev->name);
- ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED,
+ ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, 0,
mac->tx_irq_name, mac->tx);
if (ret) {
dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
@@ -1230,7 +1230,7 @@ static int pasemi_mac_open(struct net_device *dev)
snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
dev->name);
- ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED,
+ ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, 0,
mac->rx_irq_name, mac->rx);
if (ret) {
dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 652cc13c5023..392b9bd12b4f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1561,6 +1561,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
{
int err;
+ adapter->need_fw_reset = 0;
qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
qlcnic_83xx_enable_mbx_interrupt(adapter);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6f87f2cde647..3397cee89777 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4231,6 +4231,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_23:
case RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40:
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 8b7152565c5e..088921294448 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -7,7 +7,7 @@ config SFC
select I2C_ALGOBIT
select PTP_1588_CLOCK
---help---
- This driver supports 10-gigabit Ethernet cards based on
+ This driver supports 10/40-gigabit Ethernet cards based on
the Solarflare SFC4000, SFC9000-family and SFC9100-family
controllers.
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 5f42313b4965..9f18ae984f9e 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -94,7 +94,7 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
}
-static int efx_ef10_init_capabilities(struct efx_nic *efx)
+static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -107,16 +107,27 @@ static int efx_ef10_init_capabilities(struct efx_nic *efx)
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
+ if (outlen < sizeof(outbuf)) {
+ netif_err(efx, drv, efx->net_dev,
+ "unable to read datapath firmware capabilities\n");
+ return -EIO;
+ }
- if (outlen >= sizeof(outbuf)) {
- nic_data->datapath_caps =
- MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
- if (!(nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
- netif_err(efx, drv, efx->net_dev,
- "Capabilities don't indicate TSO support.\n");
- return -ENODEV;
- }
+ nic_data->datapath_caps =
+ MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
+ netif_err(efx, drv, efx->net_dev,
+ "current firmware does not support TSO\n");
+ return -ENODEV;
+ }
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
+ netif_err(efx, probe, efx->net_dev,
+ "current firmware does not support an RX prefix\n");
+ return -ENODEV;
}
return 0;
@@ -217,21 +228,13 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
- rc = efx_ef10_init_capabilities(efx);
+ rc = efx_ef10_init_datapath_caps(efx);
if (rc < 0)
goto fail3;
efx->rx_packet_len_offset =
ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
- if (!(nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
- netif_err(efx, probe, efx->net_dev,
- "current firmware does not support an RX prefix\n");
- rc = -ENODEV;
- goto fail3;
- }
-
rc = efx_mcdi_port_get_number(efx);
if (rc < 0)
goto fail3;
@@ -260,8 +263,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
- efx_ptp_probe(efx);
-
return 0;
fail3:
@@ -342,6 +343,13 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
+ if (nic_data->must_check_datapath_caps) {
+ rc = efx_ef10_init_datapath_caps(efx);
+ if (rc)
+ return rc;
+ nic_data->must_check_datapath_caps = false;
+ }
+
if (nic_data->must_realloc_vis) {
/* We cannot let the number of VIs change now */
rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
@@ -710,6 +718,14 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
nic_data->must_restore_filters = true;
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+ /* The datapath firmware might have been changed */
+ nic_data->must_check_datapath_caps = true;
+
+ /* MAC statistics have been cleared on the NIC; clear the local
+ * statistic that we update with efx_update_diff_stat().
+ */
+ nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+
return -EIO;
}
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 8d33da6697fb..7b6be61d549f 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -556,6 +556,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
default: return -EINVAL;
}
} else {
@@ -841,6 +842,7 @@ static unsigned int efx_mcdi_event_link_speed[] = {
[MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
};
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 4b1e188f7a2f..fda29d39032f 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -400,6 +400,8 @@ enum {
* @rx_rss_context: Firmware handle for our RSS context
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
+ * after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
* %MC_CMD_GET_CAPABILITIES response)
*/
@@ -413,6 +415,7 @@ struct efx_ef10_nic_data {
u32 rx_rss_context;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
+ bool must_check_datapath_caps;
u32 datapath_caps;
};
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 370e13dde115..5730fe2445a6 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -271,7 +271,7 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
-#define SMC_IRQ_FLAGS (IRQF_DISABLED)
+#define SMC_IRQ_FLAGS 0
#else
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index ffa5c4ad1210..5f9e79f7f2df 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1356,8 +1356,7 @@ static int smsc9420_open(struct net_device *dev)
smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
smsc9420_pci_flush_write(pd);
- result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
- DRV_NAME, pd);
+ result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd);
if (result) {
smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
result = -ENODEV;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 9c805e0c0cae..f7f2ef49c0c1 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1726,7 +1726,7 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
goto fail_alloc_irq;
}
result = request_irq(card->irq, gelic_card_interrupt,
- IRQF_DISABLED, netdev->name, card);
+ 0, netdev->name, card);
if (result) {
dev_info(ctodev(card), "%s:request_irq failed (%d)\n",
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index f07c340990da..3f138ca88670 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -191,8 +191,8 @@ static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs)
goto error;
ret = 0;
- error:
- return ret;
+error:
+ return ret;
}
/* Setup a communication between mcs7780 and agilent chip. */
@@ -501,8 +501,11 @@ static inline int mcs_setup_urbs(struct mcs_cb *mcs)
return 0;
mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!mcs->rx_urb)
+ if (!mcs->rx_urb) {
+ usb_free_urb(mcs->tx_urb);
+ mcs->tx_urb = NULL;
return 0;
+ }
return 1;
}
@@ -643,9 +646,9 @@ static int mcs_speed_change(struct mcs_cb *mcs)
ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
mcs->speed = mcs->new_speed;
- error:
- mcs->new_speed = 0;
- return ret;
+error:
+ mcs->new_speed = 0;
+ return ret;
}
/* Ioctl calls not supported at this time. Can be an area of future work. */
@@ -738,17 +741,20 @@ static int mcs_net_open(struct net_device *netdev)
ret = mcs_receive_start(mcs);
if (ret)
- goto error3;
+ goto error4;
netif_start_queue(netdev);
return 0;
- error3:
- irlap_close(mcs->irlap);
- error2:
- kfree_skb(mcs->rx_buff.skb);
- error1:
- return ret;
+error4:
+ usb_free_urb(mcs->rx_urb);
+ usb_free_urb(mcs->tx_urb);
+error3:
+ irlap_close(mcs->irlap);
+error2:
+ kfree_skb(mcs->rx_buff.skb);
+error1:
+ return ret;
}
/* Receive callback function. */
@@ -946,11 +952,11 @@ static int mcs_probe(struct usb_interface *intf,
usb_set_intfdata(intf, mcs);
return 0;
- error2:
- free_netdev(ndev);
+error2:
+ free_netdev(ndev);
- error1:
- return ret;
+error1:
+ return ret;
}
/* The current device is removed, the USB layer tells us to shut down. */
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index fcbf680c3e62..a17d85a331f1 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -146,6 +146,7 @@ static int loopback_dev_init(struct net_device *dev)
static void loopback_dev_free(struct net_device *dev)
{
+ dev_net(dev)->loopback_dev = NULL;
free_percpu(dev->lstats);
free_netdev(dev);
}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index dcb21347c670..adeee615dd19 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -684,15 +684,12 @@ restart:
case NETDEV_RELEASE:
case NETDEV_JOIN:
case NETDEV_UNREGISTER:
- /*
- * rtnl_lock already held
+ /* rtnl_lock already held
* we might sleep in __netpoll_cleanup()
*/
spin_unlock_irqrestore(&target_list_lock, flags);
- mutex_lock(&nt->mutex);
__netpoll_cleanup(&nt->np);
- mutex_unlock(&nt->mutex);
spin_lock_irqsave(&target_list_lock, flags);
dev_put(nt->np.dev);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index db472ffb6e89..313a0377f68f 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -30,9 +30,9 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/* Cicada Extended Control Register 1 */
#define MII_CIS8201_EXT_CON1 0x17
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 6fa5ae00039f..01805319e1e0 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
nf_reset(skb);
skb->ip_summed = CHECKSUM_NONE;
- ip_select_ident(iph, &rt->dst, NULL);
+ ip_select_ident(skb, &rt->dst, NULL);
ip_send_check(iph);
ip_local_out(skb);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a639de8401f8..807815fc9968 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1641,11 +1641,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file, false);
if (err < 0)
- goto err_free_dev;
+ goto err_free_flow;
err = register_netdevice(tun->dev);
if (err < 0)
- goto err_free_dev;
+ goto err_detach;
if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
@@ -1689,7 +1689,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
strcpy(ifr->ifr_name, tun->dev->name);
return 0;
- err_free_dev:
+err_detach:
+ tun_detach_all(dev);
+err_free_flow:
+ tun_flow_uninit(tun);
+ security_tun_dev_free_security(tun->security);
+err_free_dev:
free_netdev(dev);
return err;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 03ad4dc293aa..2023f3ea891e 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -33,7 +33,7 @@
#include <linux/usb/usbnet.h>
-#if defined(CONFIG_USB_NET_RNDIS_HOST) || defined(CONFIG_USB_NET_RNDIS_HOST_MODULE)
+#if IS_ENABLED(CONFIG_USB_NET_RNDIS_HOST)
static int is_rndis(struct usb_interface_descriptor *desc)
{
@@ -69,8 +69,7 @@ static const u8 mbm_guid[16] = {
0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
};
-/*
- * probes control interface, claims data interface, collects the bulk
+/* probes control interface, claims data interface, collects the bulk
* endpoints, activates data interface (if needed), maybe sets MTU.
* all pure cdc, except for certain firmware workarounds, and knowing
* that rndis uses one different rule.
@@ -88,7 +87,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
struct usb_cdc_mdlm_desc *desc = NULL;
struct usb_cdc_mdlm_detail_desc *detail = NULL;
- if (sizeof dev->data < sizeof *info)
+ if (sizeof(dev->data) < sizeof(*info))
return -EDOM;
/* expect strict spec conformance for the descriptors, but
@@ -126,10 +125,10 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
is_activesync(&intf->cur_altsetting->desc) ||
is_wireless_rndis(&intf->cur_altsetting->desc));
- memset(info, 0, sizeof *info);
+ memset(info, 0, sizeof(*info));
info->control = intf;
while (len > 3) {
- if (buf [1] != USB_DT_CS_INTERFACE)
+ if (buf[1] != USB_DT_CS_INTERFACE)
goto next_desc;
/* use bDescriptorSubType to identify the CDC descriptors.
@@ -139,14 +138,14 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
* in favor of a complicated OID-based RPC scheme doing what
* CDC Ethernet achieves with a simple descriptor.
*/
- switch (buf [2]) {
+ switch (buf[2]) {
case USB_CDC_HEADER_TYPE:
if (info->header) {
dev_dbg(&intf->dev, "extra CDC header\n");
goto bad_desc;
}
info->header = (void *) buf;
- if (info->header->bLength != sizeof *info->header) {
+ if (info->header->bLength != sizeof(*info->header)) {
dev_dbg(&intf->dev, "CDC header len %u\n",
info->header->bLength);
goto bad_desc;
@@ -175,7 +174,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
goto bad_desc;
}
info->u = (void *) buf;
- if (info->u->bLength != sizeof *info->u) {
+ if (info->u->bLength != sizeof(*info->u)) {
dev_dbg(&intf->dev, "CDC union len %u\n",
info->u->bLength);
goto bad_desc;
@@ -233,7 +232,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
goto bad_desc;
}
info->ether = (void *) buf;
- if (info->ether->bLength != sizeof *info->ether) {
+ if (info->ether->bLength != sizeof(*info->ether)) {
dev_dbg(&intf->dev, "CDC ether len %u\n",
info->ether->bLength);
goto bad_desc;
@@ -274,8 +273,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
break;
}
next_desc:
- len -= buf [0]; /* bLength */
- buf += buf [0];
+ len -= buf[0]; /* bLength */
+ buf += buf[0];
}
/* Microsoft ActiveSync based and some regular RNDIS devices lack the
@@ -379,9 +378,7 @@ void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
-/*-------------------------------------------------------------------------
- *
- * Communications Device Class, Ethernet Control model
+/* Communications Device Class, Ethernet Control model
*
* Takes two interfaces. The DATA interface is inactive till an altsetting
* is selected. Configuration data includes class descriptors. There's
@@ -389,8 +386,7 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
*
* This should interop with whatever the 2.4 "CDCEther.c" driver
* (by Brad Hards) talked with, with more functionality.
- *
- *-------------------------------------------------------------------------*/
+ */
static void dumpspeed(struct usbnet *dev, __le32 *speeds)
{
@@ -404,7 +400,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
{
struct usb_cdc_notification *event;
- if (urb->actual_length < sizeof *event)
+ if (urb->actual_length < sizeof(*event))
return;
/* SPEED_CHANGE can get split into two 8-byte packets */
@@ -423,7 +419,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
urb->actual_length);
- if (urb->actual_length != (sizeof *event + 8))
+ if (urb->actual_length != (sizeof(*event) + 8))
set_bit(EVENT_STS_SPLIT, &dev->flags);
else
dumpspeed(dev, (__le32 *) &event[1]);
@@ -469,7 +465,6 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
static const struct driver_info cdc_info = {
.description = "CDC Ethernet Device",
.flags = FLAG_ETHER | FLAG_POINTTOPOINT,
- // .check_connect = cdc_check_connect,
.bind = usbnet_cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = usbnet_cdc_status,
@@ -493,9 +488,8 @@ static const struct driver_info wwan_info = {
#define DELL_VENDOR_ID 0x413C
#define REALTEK_VENDOR_ID 0x0bda
-static const struct usb_device_id products [] = {
-/*
- * BLACKLIST !!
+static const struct usb_device_id products[] = {
+/* BLACKLIST !!
*
* First blacklist any products that are egregiously nonconformant
* with the CDC Ethernet specs. Minor braindamage we cope with; when
@@ -542,7 +536,7 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
+ | USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8007, /* C-700 */
ZAURUS_MASTER_INTERFACE,
@@ -659,8 +653,7 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
},
-/*
- * WHITELIST!!!
+/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
* We match the main interface, ignoring the optional device
@@ -672,60 +665,40 @@ static const struct usb_device_id products [] = {
*/
{
/* ZTE (Vodafone) K3805-Z */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = ZTE_VENDOR_ID,
- .idProduct = 0x1003,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1003, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* ZTE (Vodafone) K3806-Z */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = ZTE_VENDOR_ID,
- .idProduct = 0x1015,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1015, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* ZTE (Vodafone) K4510-Z */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = ZTE_VENDOR_ID,
- .idProduct = 0x1173,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1173, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* ZTE (Vodafone) K3770-Z */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = ZTE_VENDOR_ID,
- .idProduct = 0x1177,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1177, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* ZTE (Vodafone) K3772-Z */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_PRODUCT
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = ZTE_VENDOR_ID,
- .idProduct = 0x1181,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1181, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
+ /* Telit modules */
+ USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (kernel_ulong_t) &wwan_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
@@ -736,15 +709,11 @@ static const struct usb_device_id products [] = {
}, {
/* Various Huawei modems with a network port like the UMG1831 */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR
- | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = HUAWEI_VENDOR_ID,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bInterfaceProtocol = 255,
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, 255),
.driver_info = (unsigned long)&wwan_info,
},
- { }, // END
+ { }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bf64b4191dcc..d1292fe746bc 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -564,7 +564,7 @@ static void vxlan_notify_add_rx_port(struct sock *sk)
struct net_device *dev;
struct net *net = sock_net(sk);
sa_family_t sa_family = sk->sk_family;
- u16 port = htons(inet_sk(sk)->inet_sport);
+ __be16 port = inet_sk(sk)->inet_sport;
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
@@ -581,7 +581,7 @@ static void vxlan_notify_del_rx_port(struct sock *sk)
struct net_device *dev;
struct net *net = sock_net(sk);
sa_family_t sa_family = sk->sk_family;
- u16 port = htons(inet_sk(sk)->inet_sport);
+ __be16 port = inet_sk(sk)->inet_sport;
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
@@ -2021,7 +2021,8 @@ static struct device_type vxlan_type = {
};
/* Calls the ndo_add_vxlan_port of the caller in order to
- * supply the listening VXLAN udp ports.
+ * supply the listening VXLAN udp ports. Callers are expected
+ * to implement the ndo_add_vxlan_port.
*/
void vxlan_get_rx_port(struct net_device *dev)
{
@@ -2029,16 +2030,13 @@ void vxlan_get_rx_port(struct net_device *dev)
struct net *net = dev_net(dev);
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
sa_family_t sa_family;
- u16 port;
- int i;
-
- if (!dev || !dev->netdev_ops || !dev->netdev_ops->ndo_add_vxlan_port)
- return;
+ __be16 port;
+ unsigned int i;
spin_lock(&vn->sock_lock);
for (i = 0; i < PORT_HASH_SIZE; ++i) {
- hlist_for_each_entry_rcu(vs, vs_head(net, i), hlist) {
- port = htons(inet_sk(vs->sock->sk)->inet_sport);
+ hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
+ port = inet_sk(vs->sock->sk)->inet_sport;
sa_family = vs->sock->sk->sk_family;
dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
port);
@@ -2492,15 +2490,19 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
- /* create an fdb entry for default destination */
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &vxlan->default_dst.remote_ip,
- NUD_REACHABLE|NUD_PERMANENT,
- NLM_F_EXCL|NLM_F_CREATE,
- vxlan->dst_port, vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_ifindex, NTF_SELF);
- if (err)
- return err;
+ /* create an fdb entry for a valid default destination */
+ if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+ err = vxlan_fdb_create(vxlan, all_zeros_mac,
+ &vxlan->default_dst.remote_ip,
+ NUD_REACHABLE|NUD_PERMANENT,
+ NLM_F_EXCL|NLM_F_CREATE,
+ vxlan->dst_port,
+ vxlan->default_dst.remote_vni,
+ vxlan->default_dst.remote_ifindex,
+ NTF_SELF);
+ if (err)
+ return err;
+ }
err = register_netdevice(dev);
if (err) {
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index fc8a0fa6d3b2..b00a7e92225f 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -28,7 +28,7 @@ config BRCMFMAC
config BRCMFMAC_SDIO
bool "SDIO bus interface support for FullMAC driver"
- depends on MMC
+ depends on (MMC = y || MMC = BRCMFMAC)
depends on BRCMFMAC
select FW_LOADER
default y
@@ -39,7 +39,7 @@ config BRCMFMAC_SDIO
config BRCMFMAC_USB
bool "USB bus interface support for FullMAC driver"
- depends on USB
+ depends on (USB = y || USB = BRCMFMAC)
depends on BRCMFMAC
select FW_LOADER
---help---
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index d06376014bcd..f5e6b489ed32 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -40,7 +40,9 @@ struct hwbus_priv {
struct cw1200_common *core;
const struct cw1200_platform_data_spi *pdata;
spinlock_t lock; /* Serialize all bus operations */
+ wait_queue_head_t wq;
int claimed;
+ int irq_disabled;
};
#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2)
@@ -197,8 +199,11 @@ static void cw1200_spi_lock(struct hwbus_priv *self)
{
unsigned long flags;
+ DECLARE_WAITQUEUE(wait, current);
+
might_sleep();
+ add_wait_queue(&self->wq, &wait);
spin_lock_irqsave(&self->lock, flags);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -211,6 +216,7 @@ static void cw1200_spi_lock(struct hwbus_priv *self)
set_current_state(TASK_RUNNING);
self->claimed = 1;
spin_unlock_irqrestore(&self->lock, flags);
+ remove_wait_queue(&self->wq, &wait);
return;
}
@@ -222,6 +228,8 @@ static void cw1200_spi_unlock(struct hwbus_priv *self)
spin_lock_irqsave(&self->lock, flags);
self->claimed = 0;
spin_unlock_irqrestore(&self->lock, flags);
+ wake_up(&self->wq);
+
return;
}
@@ -230,6 +238,8 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
struct hwbus_priv *self = dev_id;
if (self->core) {
+ disable_irq_nosync(self->func->irq);
+ self->irq_disabled = 1;
cw1200_irq_handler(self->core);
return IRQ_HANDLED;
} else {
@@ -263,13 +273,22 @@ exit:
static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
{
- int ret = 0;
-
pr_debug("SW IRQ unsubscribe\n");
disable_irq_wake(self->func->irq);
free_irq(self->func->irq, self);
- return ret;
+ return 0;
+}
+
+static int cw1200_spi_irq_enable(struct hwbus_priv *self, int enable)
+{
+ /* Disables are handled by the interrupt handler */
+ if (enable && self->irq_disabled) {
+ enable_irq(self->func->irq);
+ self->irq_disabled = 0;
+ }
+
+ return 0;
}
static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
@@ -349,6 +368,7 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
.unlock = cw1200_spi_unlock,
.align_size = cw1200_spi_align_size,
.power_mgmt = cw1200_spi_pm,
+ .irq_enable = cw1200_spi_irq_enable,
};
/* Probe Function to be called by SPI stack when device is discovered */
@@ -400,6 +420,8 @@ static int cw1200_spi_probe(struct spi_device *func)
spi_set_drvdata(func, self);
+ init_waitqueue_head(&self->wq);
+
status = cw1200_spi_irq_subscribe(self);
status = cw1200_core_probe(&cw1200_spi_hwbus_ops,
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index acdff0f7f952..0b2061bbc68b 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -485,7 +485,7 @@ int cw1200_load_firmware(struct cw1200_common *priv)
/* Enable interrupt signalling */
priv->hwbus_ops->lock(priv->hwbus_priv);
- ret = __cw1200_irq_enable(priv, 1);
+ ret = __cw1200_irq_enable(priv, 2);
priv->hwbus_ops->unlock(priv->hwbus_priv);
if (ret < 0)
goto unsubscribe;
diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h
index 8b2fc831c3de..51dfb3a90735 100644
--- a/drivers/net/wireless/cw1200/hwbus.h
+++ b/drivers/net/wireless/cw1200/hwbus.h
@@ -28,6 +28,7 @@ struct hwbus_ops {
void (*unlock)(struct hwbus_priv *self);
size_t (*align_size)(struct hwbus_priv *self, size_t size);
int (*power_mgmt)(struct hwbus_priv *self, bool suspend);
+ int (*irq_enable)(struct hwbus_priv *self, int enable);
};
#endif /* CW1200_HWBUS_H */
diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c
index ff230b7aeedd..41bd7615ccaa 100644
--- a/drivers/net/wireless/cw1200/hwio.c
+++ b/drivers/net/wireless/cw1200/hwio.c
@@ -273,6 +273,21 @@ int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
u16 val16;
int ret;
+ /* We need to do this hack because the SPI layer can sleep on I/O
+ and the general path involves I/O to the device in interrupt
+ context.
+
+ However, the initial enable call needs to go to the hardware.
+
+ We don't worry about shutdown because we do a full reset which
+ clears the interrupt enabled bits.
+ */
+ if (priv->hwbus_ops->irq_enable) {
+ ret = priv->hwbus_ops->irq_enable(priv->hwbus_priv, enable);
+ if (ret || enable < 2)
+ return ret;
+ }
+
if (HIF_8601_SILICON == priv->hw_type) {
ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
if (ret < 0) {
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 95e6e61c3de0..88ce656f96cd 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -6659,19 +6659,20 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_init_registers(rt2x00dev)))
return -EIO;
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
+ return -EIO;
+
/*
* Send signal to firmware during boot time.
*/
rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
- if (rt2x00_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev))
rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
- rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
- }
+ rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
msleep(1);
- if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
- rt2800_wait_bbp_ready(rt2x00dev)))
+ if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
return -EIO;
rt2800_init_bbp(rt2x00dev);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 841fb9dfc9da..9a6edb0c014e 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -438,17 +438,16 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev)
skb_queue_tail(&priv->rx_queue, skb);
usb_anchor_urb(entry, &priv->anchored);
ret = usb_submit_urb(entry, GFP_KERNEL);
+ usb_put_urb(entry);
if (ret) {
skb_unlink(skb, &priv->rx_queue);
usb_unanchor_urb(entry);
goto err;
}
- usb_free_urb(entry);
}
return ret;
err:
- usb_free_urb(entry);
kfree_skb(skb);
usb_kill_anchored_urbs(&priv->anchored);
return ret;
@@ -956,8 +955,12 @@ static int rtl8187_start(struct ieee80211_hw *dev)
(RETRY_COUNT << 8 /* short retry limit */) |
(RETRY_COUNT << 0 /* long retry limit */) |
(7 << 21 /* MAX TX DMA */));
- rtl8187_init_urbs(dev);
- rtl8187b_init_status_urb(dev);
+ ret = rtl8187_init_urbs(dev);
+ if (ret)
+ goto rtl8187_start_exit;
+ ret = rtl8187b_init_status_urb(dev);
+ if (ret)
+ usb_kill_anchored_urbs(&priv->anchored);
goto rtl8187_start_exit;
}
@@ -966,7 +969,9 @@ static int rtl8187_start(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
- rtl8187_init_urbs(dev);
+ ret = rtl8187_init_urbs(dev);
+ if (ret)
+ goto rtl8187_start_exit;
reg = RTL818X_RX_CONF_ONLYERLPKT |
RTL818X_RX_CONF_RX_AUTORESETPHY |
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index a1977430ddfb..5715318d6bab 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -184,6 +184,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int tx_evtchn,
unsigned int rx_evtchn);
void xenvif_disconnect(struct xenvif *vif);
+void xenvif_free(struct xenvif *vif);
int xenvif_xenbus_init(void);
void xenvif_xenbus_fini(void);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 625c6f49cfba..01bb854c7f62 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -353,6 +353,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
}
netdev_dbg(dev, "Successfully created xenvif\n");
+
+ __module_get(THIS_MODULE);
+
return vif;
}
@@ -366,8 +369,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
if (vif->tx_irq)
return 0;
- __module_get(THIS_MODULE);
-
err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
@@ -406,7 +407,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
init_waitqueue_head(&vif->wq);
vif->task = kthread_create(xenvif_kthread,
- (void *)vif, vif->dev->name);
+ (void *)vif, "%s", vif->dev->name);
if (IS_ERR(vif->task)) {
pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
err = PTR_ERR(vif->task);
@@ -452,12 +453,6 @@ void xenvif_carrier_off(struct xenvif *vif)
void xenvif_disconnect(struct xenvif *vif)
{
- /* Disconnect funtion might get called by generic framework
- * even before vif connects, so we need to check if we really
- * need to do a module_put.
- */
- int need_module_put = 0;
-
if (netif_carrier_ok(vif->dev))
xenvif_carrier_off(vif);
@@ -468,23 +463,22 @@ void xenvif_disconnect(struct xenvif *vif)
unbind_from_irqhandler(vif->tx_irq, vif);
unbind_from_irqhandler(vif->rx_irq, vif);
}
- /* vif->irq is valid, we had a module_get in
- * xenvif_connect.
- */
- need_module_put = 1;
+ vif->tx_irq = 0;
}
if (vif->task)
kthread_stop(vif->task);
+ xenvif_unmap_frontend_rings(vif);
+}
+
+void xenvif_free(struct xenvif *vif)
+{
netif_napi_del(&vif->napi);
unregister_netdev(vif->dev);
- xenvif_unmap_frontend_rings(vif);
-
free_netdev(vif->dev);
- if (need_module_put)
- module_put(THIS_MODULE);
+ module_put(THIS_MODULE);
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 956130c70036..f3e591c611de 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -212,6 +212,49 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
return false;
}
+struct xenvif_count_slot_state {
+ unsigned long copy_off;
+ bool head;
+};
+
+unsigned int xenvif_count_frag_slots(struct xenvif *vif,
+ unsigned long offset, unsigned long size,
+ struct xenvif_count_slot_state *state)
+{
+ unsigned count = 0;
+
+ offset &= ~PAGE_MASK;
+
+ while (size > 0) {
+ unsigned long bytes;
+
+ bytes = PAGE_SIZE - offset;
+
+ if (bytes > size)
+ bytes = size;
+
+ if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
+ count++;
+ state->copy_off = 0;
+ }
+
+ if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
+ bytes = MAX_BUFFER_OFFSET - state->copy_off;
+
+ state->copy_off += bytes;
+
+ offset += bytes;
+ size -= bytes;
+
+ if (offset == PAGE_SIZE)
+ offset = 0;
+
+ state->head = false;
+ }
+
+ return count;
+}
+
/*
* Figure out how many ring slots we're going to need to send @skb to
* the guest. This function is essentially a dry run of
@@ -219,48 +262,39 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
*/
unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{
+ struct xenvif_count_slot_state state;
unsigned int count;
- int i, copy_off;
+ unsigned char *data;
+ unsigned i;
- count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
+ state.head = true;
+ state.copy_off = 0;
- copy_off = skb_headlen(skb) % PAGE_SIZE;
+ /* Slot for the first (partial) page of data. */
+ count = 1;
+ /* Need a slot for the GSO prefix for GSO extra data? */
if (skb_shinfo(skb)->gso_size)
count++;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
- unsigned long bytes;
-
- offset &= ~PAGE_MASK;
-
- while (size > 0) {
- BUG_ON(offset >= PAGE_SIZE);
- BUG_ON(copy_off > MAX_BUFFER_OFFSET);
-
- bytes = PAGE_SIZE - offset;
-
- if (bytes > size)
- bytes = size;
+ data = skb->data;
+ while (data < skb_tail_pointer(skb)) {
+ unsigned long offset = offset_in_page(data);
+ unsigned long size = PAGE_SIZE - offset;
- if (start_new_rx_buffer(copy_off, bytes, 0)) {
- count++;
- copy_off = 0;
- }
+ if (data + size > skb_tail_pointer(skb))
+ size = skb_tail_pointer(skb) - data;
- if (copy_off + bytes > MAX_BUFFER_OFFSET)
- bytes = MAX_BUFFER_OFFSET - copy_off;
+ count += xenvif_count_frag_slots(vif, offset, size, &state);
- copy_off += bytes;
+ data += size;
+ }
- offset += bytes;
- size -= bytes;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
- if (offset == PAGE_SIZE)
- offset = 0;
- }
+ count += xenvif_count_frag_slots(vif, offset, size, &state);
}
return count;
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 1fe48fe364ed..a53782ef1540 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -42,7 +42,7 @@ static int netback_remove(struct xenbus_device *dev)
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
- xenvif_disconnect(be->vif);
+ xenvif_free(be->vif);
be->vif = NULL;
}
kfree(be);
@@ -213,9 +213,18 @@ static void disconnect_backend(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
+ if (be->vif)
+ xenvif_disconnect(be->vif);
+}
+
+static void destroy_backend(struct xenbus_device *dev)
+{
+ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
if (be->vif) {
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
- xenvif_disconnect(be->vif);
+ xenvif_free(be->vif);
be->vif = NULL;
}
}
@@ -246,14 +255,11 @@ static void frontend_changed(struct xenbus_device *dev,
case XenbusStateConnected:
if (dev->state == XenbusStateConnected)
break;
- backend_create_xenvif(be);
if (be->vif)
connect(be);
break;
case XenbusStateClosing:
- if (be->vif)
- kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
disconnect_backend(dev);
xenbus_switch_state(dev, XenbusStateClosing);
break;
@@ -262,6 +268,7 @@ static void frontend_changed(struct xenbus_device *dev,
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
+ destroy_backend(dev);
/* fall through if not online */
case XenbusStateUnknown:
device_unregister(&dev->dev);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 7c29ee4ed0ae..b0299e6d9a3f 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -47,6 +47,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
return;
+ if (pci_dev->pme_poll)
+ pci_dev->pme_poll = false;
+
if (pci_dev->current_state == PCI_D3cold) {
pci_wakeup_event(pci_dev);
pm_runtime_resume(&pci_dev->dev);
@@ -57,9 +60,6 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
if (pci_dev->pme_support)
pci_check_pme_status(pci_dev);
- if (pci_dev->pme_poll)
- pci_dev->pme_poll = false;
-
pci_wakeup_event(pci_dev);
pm_runtime_resume(&pci_dev->dev);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e8ccf6c0f08a..bdd64b1b4817 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1155,8 +1155,14 @@ static void pci_enable_bridge(struct pci_dev *dev)
pci_enable_bridge(dev->bus->self);
- if (pci_is_enabled(dev))
+ if (pci_is_enabled(dev)) {
+ if (!dev->is_busmaster) {
+ dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
+ pci_set_master(dev);
+ }
return;
+ }
+
retval = pci_enable_device(dev);
if (retval)
dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 08b22a901c25..d7ca9305ff45 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -105,7 +105,7 @@
#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
-#define BNX2FC_5771X_DB_PAGE_SIZE 128
+#define BNX2X_DB_SHIFT 3
#define BNX2FC_TASK_SIZE 128
#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index c0d035a8f8f9..46a37657307f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1421,8 +1421,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
reg_base = pci_resource_start(hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
- reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
- (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
+ reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
if (!tgt->ctx_base)
return -ENOMEM;
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 6940f0930a84..c73bbcb63c02 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -64,7 +64,7 @@
#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
-#define BNX2I_5771X_DBELL_PAGE_SIZE 128
+#define BNX2X_DB_SHIFT 3
/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
#define MAX_BD_LENGTH 65535
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index af3e675d4d48..5be718c241c4 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2738,8 +2738,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
reg_base = pci_resource_start(ep->hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
- reg_off = BNX2I_5771X_DBELL_PAGE_SIZE * (cid_num & 0x1FFFF) +
- DPM_TRIGER_TYPE;
+ reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
goto arm_cq;
}
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index a84aab47a113..f73287eab373 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -96,6 +96,15 @@ config COMEDI_SKEL
To compile this driver as a module, choose M here: the module will be
called skel.
+config COMEDI_SSV_DNP
+ tristate "SSV Embedded Systems DIL/Net-PC support"
+ depends on X86_32 || COMPILE_TEST
+ ---help---
+ Enable support for SSV Embedded Systems DIL/Net-PC
+
+ To compile this driver as a module, choose M here: the module will be
+ called ssv_dnp.
+
endif # COMEDI_MISC_DRIVERS
menuconfig COMEDI_ISA_DRIVERS
@@ -386,6 +395,14 @@ config COMEDI_DMM32AT
To compile this driver as a module, choose M here: the module will be
called dmm32at.
+config COMEDI_UNIOXX5
+ tristate "Fastwel UNIOxx-5 analog and digital io board support"
+ ---help---
+ Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
+
+ To compile this driver as a module, choose M here: the module will be
+ called unioxx5.
+
config COMEDI_FL512
tristate "FL512 ISA card support"
---help---
@@ -855,14 +872,6 @@ config COMEDI_DYNA_PCI10XX
To compile this driver as a module, choose M here: the module will be
called dyna_pci10xx.
-config COMEDI_UNIOXX5
- tristate "Fastwel UNIOxx-5 analog and digital io board support"
- ---help---
- Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
-
- To compile this driver as a module, choose M here: the module will be
- called unioxx5.
-
config COMEDI_GSC_HPDI
tristate "General Standards PCI-HPDI32 / PMC-HPDI32 support"
select COMEDI_FC
@@ -1085,14 +1094,6 @@ config COMEDI_S626
To compile this driver as a module, choose M here: the module will be
called s626.
-config COMEDI_SSV_DNP
- tristate "SSV Embedded Systems DIL/Net-PC support"
- ---help---
- Enable support for SSV Embedded Systems DIL/Net-PC
-
- To compile this driver as a module, choose M here: the module will be
- called ssv_dnp.
-
config COMEDI_MITE
depends on HAS_DMA
tristate
diff --git a/drivers/staging/dgap/dgap_driver.c b/drivers/staging/dgap/dgap_driver.c
index 724a685753dd..40ef785a0428 100644
--- a/drivers/staging/dgap/dgap_driver.c
+++ b/drivers/staging/dgap/dgap_driver.c
@@ -474,7 +474,7 @@ static void dgap_cleanup_board(struct board_t *brd)
DGAP_LOCK(dgap_global_lock, flags);
brd->msgbuf = NULL;
- printk(brd->msgbuf_head);
+ printk("%s", brd->msgbuf_head);
kfree(brd->msgbuf_head);
brd->msgbuf_head = NULL;
DGAP_UNLOCK(dgap_global_lock, flags);
@@ -628,7 +628,7 @@ static int dgap_found_board(struct pci_dev *pdev, int id)
DPR_INIT(("dgap_scan(%d) - printing out the msgbuf\n", i));
DGAP_LOCK(dgap_global_lock, flags);
brd->msgbuf = NULL;
- printk(brd->msgbuf_head);
+ printk("%s", brd->msgbuf_head);
kfree(brd->msgbuf_head);
brd->msgbuf_head = NULL;
DGAP_UNLOCK(dgap_global_lock, flags);
@@ -955,25 +955,28 @@ static void dgap_mbuf(struct board_t *brd, const char *fmt, ...) {
char buf[1024];
int i;
unsigned long flags;
+ size_t length;
DGAP_LOCK(dgap_global_lock, flags);
/* Format buf using fmt and arguments contained in ap. */
va_start(ap, fmt);
- i = vsprintf(buf, fmt, ap);
+ i = vsnprintf(buf, sizeof(buf), fmt, ap);
va_end(ap);
DPR((buf));
if (!brd || !brd->msgbuf) {
- printk(buf);
+ printk("%s", buf);
DGAP_UNLOCK(dgap_global_lock, flags);
return;
}
- memcpy(brd->msgbuf, buf, strlen(buf));
- brd->msgbuf += strlen(buf);
- *brd->msgbuf = 0;
+ length = strlen(buf) + 1;
+ if (brd->msgbuf - brd->msgbuf_head < length)
+ length = brd->msgbuf - brd->msgbuf_head;
+ memcpy(brd->msgbuf, buf, length);
+ brd->msgbuf += length;
DGAP_UNLOCK(dgap_global_lock, flags);
}
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index f8c1e22585d6..71d2b83cc3a1 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -454,7 +454,7 @@ static void dgnc_cleanup_board(struct board_t *brd)
DGNC_LOCK(dgnc_global_lock, flags);
brd->msgbuf = NULL;
- printk(brd->msgbuf_head);
+ printk("%s", brd->msgbuf_head);
kfree(brd->msgbuf_head);
brd->msgbuf_head = NULL;
DGNC_UNLOCK(dgnc_global_lock, flags);
@@ -710,7 +710,7 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
DPR_INIT(("dgnc_scan(%d) - printing out the msgbuf\n", i));
DGNC_LOCK(dgnc_global_lock, flags);
brd->msgbuf = NULL;
- printk(brd->msgbuf_head);
+ printk("%s", brd->msgbuf_head);
kfree(brd->msgbuf_head);
brd->msgbuf_head = NULL;
DGNC_UNLOCK(dgnc_global_lock, flags);
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index db4d6dc03243..b36feb080cba 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -37,7 +37,7 @@ config IIO_SIMPLE_DUMMY_EVENTS
config IIO_SIMPLE_DUMMY_BUFFER
boolean "Buffered capture support"
- depends on IIO_KFIFO_BUF
+ select IIO_KFIFO_BUF
help
Add buffered data capture to the simple dummy driver.
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 351936c3efd6..e4998e4d4434 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -563,6 +563,7 @@ static int isl29018_probe(struct i2c_client *client,
mutex_init(&chip->lock);
chip->lux_scale = 1;
+ chip->lux_uscale = 0;
chip->range = 1000;
chip->adc_bit = 16;
chip->suspended = false;
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index d2748c329eae..c3f3f539e787 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -229,7 +229,7 @@ static int hmc5843_read_measurement(struct iio_dev *indio_dev,
if (result < 0)
return -EINVAL;
- *val = result;
+ *val = sign_extend32(result, 15);
return IIO_VAL_INT;
}
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index a802cf2491d6..4c6d2041260b 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -299,7 +299,7 @@ static int ade7854_spi_probe(struct spi_device *spi)
if (ret)
iio_device_free(indio_dev);
- return 0;
+ return ret;
}
static int ade7854_spi_remove(struct spi_device *spi)
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 47c5888461ff..a2e52a0c53c9 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -41,7 +41,6 @@ struct imx_drm_device {
struct list_head encoder_list;
struct list_head connector_list;
struct mutex mutex;
- int references;
int pipes;
struct drm_fbdev_cma *fbhelper;
};
@@ -241,8 +240,6 @@ struct drm_device *imx_drm_device_get(void)
}
}
- imxdrm->references++;
-
return imxdrm->drm;
unwind_crtc:
@@ -280,8 +277,6 @@ void imx_drm_device_put(void)
list_for_each_entry(enc, &imxdrm->encoder_list, list)
module_put(enc->owner);
- imxdrm->references--;
-
mutex_unlock(&imxdrm->mutex);
}
EXPORT_SYMBOL_GPL(imx_drm_device_put);
@@ -485,7 +480,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
mutex_lock(&imxdrm->mutex);
- if (imxdrm->references) {
+ if (imxdrm->drm->open_count) {
ret = -EBUSY;
goto err_busy;
}
@@ -564,7 +559,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
mutex_lock(&imxdrm->mutex);
- if (imxdrm->references) {
+ if (imxdrm->drm->open_count) {
ret = -EBUSY;
goto err_busy;
}
@@ -709,7 +704,7 @@ int imx_drm_add_connector(struct drm_connector *connector,
mutex_lock(&imxdrm->mutex);
- if (imxdrm->references) {
+ if (imxdrm->drm->open_count) {
ret = -EBUSY;
goto err_busy;
}
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index 2f44d56700af..776d3632dc7d 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -244,13 +244,17 @@ static int snd_toneport_source_put(struct snd_kcontrol *kcontrol,
struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
struct usb_line6_toneport *toneport =
(struct usb_line6_toneport *)line6pcm->line6;
+ unsigned int source;
- if (ucontrol->value.enumerated.item[0] == toneport->source)
+ source = ucontrol->value.enumerated.item[0];
+ if (source >= ARRAY_SIZE(toneport_source_info))
+ return -EINVAL;
+ if (source == toneport->source)
return 0;
- toneport->source = ucontrol->value.enumerated.item[0];
+ toneport->source = source;
toneport_send_cmd(toneport->line6.usbdev,
- toneport_source_info[toneport->source].code, 0x0000);
+ toneport_source_info[source].code, 0x0000);
return 1;
}
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 086ca3d7241b..26b49a24b3df 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1802,7 +1802,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
int
kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- struct task_struct *task = kthread_run(fn, arg, name);
+ struct task_struct *task = kthread_run(fn, arg, "%s", name);
if (IS_ERR(task))
return PTR_ERR(task);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 2c581b7fa8ad..68a4f52ec998 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -1005,7 +1005,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
int
ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- struct task_struct *task = kthread_run(fn, arg, name);
+ struct task_struct *task = kthread_run(fn, arg, "%s", name);
if (IS_ERR(task))
return PTR_ERR(task);
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index 4e898e491860..2156a44d0740 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -1,6 +1,6 @@
config LUSTRE_FS
tristate "Lustre file system client support"
- depends on INET && m
+ depends on INET && m && !MIPS && !XTENSA && !SUPERH
select LNET
select CRYPTO
select CRYPTO_CRC32
@@ -52,7 +52,7 @@ config LUSTRE_DEBUG_EXPENSIVE_CHECK
config LUSTRE_TRANSLATE_ERRNOS
bool
depends on LUSTRE_FS && !X86
- default true
+ default y
config LUSTRE_LLITE_LLOOP
bool "Lustre virtual block device"
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 3916bda3004c..a100a0b96381 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -800,9 +800,9 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
init_completion(&bltd.bltd_comp);
bltd.bltd_num = atomic_read(&blp->blp_num_threads);
- snprintf(bltd.bltd_name, sizeof(bltd.bltd_name) - 1,
+ snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
"ldlm_bl_%02d", bltd.bltd_num);
- task = kthread_run(ldlm_bl_thread_main, &bltd, bltd.bltd_name);
+ task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
if (IS_ERR(task)) {
CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
atomic_read(&blp->blp_num_threads), PTR_ERR(task));
diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
index 462172d1a756..1a55c81892e0 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -397,7 +397,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
sched->ws_name, sched->ws_nthreads);
}
- task = kthread_run(cfs_wi_scheduler, sched, name);
+ task = kthread_run(cfs_wi_scheduler, sched, "%s", name);
if (!IS_ERR(task)) {
nthrs--;
continue;
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 2644edf438c1..c8b43442dc74 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1387,7 +1387,7 @@ echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
if (nob > ulsm_nob)
return (-EINVAL);
- if (copy_to_user (ulsm, lsm, sizeof(ulsm)))
+ if (copy_to_user (ulsm, lsm, sizeof(*ulsm)))
return (-EFAULT);
for (i = 0; i < lsm->lsm_stripe_count; i++) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index 227a0ae9593b..5dec771d70ee 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -383,8 +383,8 @@ int ptlrpc_start_pinger(void)
/* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
* just drop the VM and FILES in cfs_daemonize_ctxt() right away. */
- rc = PTR_ERR(kthread_run(ptlrpc_pinger_main,
- &pinger_thread, pinger_thread.t_name));
+ rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread,
+ "%s", pinger_thread.t_name));
if (IS_ERR_VALUE(rc)) {
CERROR("cannot start thread: %d\n", rc);
return rc;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index fbdeff65d059..89c9be96f454 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -615,7 +615,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
init_completion(&pc->pc_starting);
init_completion(&pc->pc_finishing);
spin_lock_init(&pc->pc_lock);
- strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
+ strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
pc->pc_set = ptlrpc_prep_set();
if (pc->pc_set == NULL)
GOTO(out, rc = -ENOMEM);
@@ -638,7 +638,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
GOTO(out, rc);
}
- task = kthread_run(ptlrpcd, pc, pc->pc_name);
+ task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
if (IS_ERR(task))
GOTO(out, rc = PTR_ERR(task));
@@ -745,7 +745,7 @@ static int ptlrpcd_init(void)
if (ptlrpcds == NULL)
GOTO(out, rc = -ENOMEM);
- snprintf(name, 15, "ptlrpcd_rcv");
+ snprintf(name, sizeof(name), "ptlrpcd_rcv");
set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
if (rc < 0)
@@ -764,7 +764,7 @@ static int ptlrpcd_init(void)
* unnecessary dependency. But how to distribute async RPCs load
* among all the ptlrpc daemons becomes another trouble. */
for (i = 0; i < nthreads; i++) {
- snprintf(name, 15, "ptlrpcd_%d", i);
+ snprintf(name, sizeof(name), "ptlrpcd_%d", i);
rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
if (rc < 0)
GOTO(out, rc);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index e90c8fb7da6a..6547f46a7729 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -59,8 +59,8 @@
****************************************/
-#define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
-#define PAGES_PER_POOL (PTRS_PER_PAGE)
+#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
+#define PAGES_PER_POOL (POINTERS_PER_PAGE)
#define IDLE_IDX_MAX (100)
#define IDLE_IDX_WEIGHT (3)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index ac8b5fd2300b..acf75f3873d1 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -2718,15 +2718,15 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
spin_unlock(&svcpt->scp_lock);
if (svcpt->scp_cpt >= 0) {
- snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s%02d_%03d",
+ snprintf(thread->t_name, sizeof(thread->t_name), "%s%02d_%03d",
svc->srv_thread_name, svcpt->scp_cpt, thread->t_id);
} else {
- snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s_%04d",
+ snprintf(thread->t_name, sizeof(thread->t_name), "%s_%04d",
svc->srv_thread_name, thread->t_id);
}
CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
- rc = PTR_ERR(kthread_run(ptlrpc_main, thread, thread->t_name));
+ rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name));
if (IS_ERR_VALUE(rc)) {
CERROR("cannot start thread '%s': rc %d\n",
thread->t_name, rc);
diff --git a/drivers/staging/octeon-usb/cvmx-usb.c b/drivers/staging/octeon-usb/cvmx-usb.c
index d7b3c82b5ead..45dfe94199ae 100644
--- a/drivers/staging/octeon-usb/cvmx-usb.c
+++ b/drivers/staging/octeon-usb/cvmx-usb.c
@@ -604,7 +604,7 @@ int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
}
}
- memset(usb, 0, sizeof(usb));
+ memset(usb, 0, sizeof(*usb));
usb->init_flags = flags;
/* Initialize the USB state structure */
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 78b6cb743769..199059d64c9b 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -48,13 +48,8 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
while (freed) {
struct sk_buff *skb = dev_alloc_skb(size + 256);
- if (unlikely(skb == NULL)) {
- pr_warning
- ("Failed to allocate skb for hardware pool %d\n",
- pool);
+ if (unlikely(skb == NULL))
break;
- }
-
skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
*(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index d8f5f694ec35..ea53af30dfa7 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -373,9 +373,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
* Enable interrupts on inband status changes
* for this port.
*/
- gmx_rx_int_en.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
- (index, interface));
+ gmx_rx_int_en.u64 = 0;
gmx_rx_int_en.s.phy_dupx = 1;
gmx_rx_int_en.s.phy_link = 1;
gmx_rx_int_en.s.phy_spd = 1;
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 34afc16bc493..e14a1bb04361 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -303,6 +303,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (backlog > budget * cores_in_use && napi != NULL)
cvm_oct_enable_one_cpu();
}
+ rx_count++;
skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
if (likely(skb_in_hw)) {
@@ -336,9 +337,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
*/
skb = dev_alloc_skb(work->len);
if (!skb) {
- printk_ratelimited("Port %d failed to allocate "
- "skbuff, packet dropped\n",
- work->ipprt);
cvm_oct_free_work(work);
continue;
}
@@ -429,7 +427,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
#endif
}
netif_receive_skb(skb);
- rx_count++;
} else {
/* Drop any packet received for a device that isn't up */
/*
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 3605c5da822d..6fc77428e83a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -157,8 +157,8 @@ _func_enter_;
*frlen = *frlen + (len + 2);
- return pbuf + len + 2;
_func_exit_;
+ return pbuf + len + 2;
}
inline u8 *rtw_set_ie_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode,
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 8b2ba26ba38d..4b2eb8e9b562 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -1827,13 +1827,13 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
#ifdef CONFIG_88EU_P2P
-static int get_reg_classes_full_count(struct p2p_channels channel_list)
+static int get_reg_classes_full_count(struct p2p_channels *channel_list)
{
int cnt = 0;
int i;
- for (i = 0; i < channel_list.reg_classes; i++) {
- cnt += channel_list.reg_class[i].channels;
+ for (i = 0; i < channel_list->reg_classes; i++) {
+ cnt += channel_list->reg_class[i].channels;
}
return cnt;
@@ -2065,7 +2065,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
/* + number of channels in all classes */
len_channellist_attr = 3
+ (1 + 1) * (u16)(pmlmeext->channel_list.reg_classes)
- + get_reg_classes_full_count(pmlmeext->channel_list);
+ + get_reg_classes_full_count(&pmlmeext->channel_list);
*(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
p2pielen += 2;
@@ -2437,7 +2437,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
/* + number of channels in all classes */
len_channellist_attr = 3
+ (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
- + get_reg_classes_full_count(pmlmeext->channel_list);
+ + get_reg_classes_full_count(&pmlmeext->channel_list);
*(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
@@ -2859,7 +2859,7 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
/* + number of channels in all classes */
len_channellist_attr = 3
+ (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
- + get_reg_classes_full_count(pmlmeext->channel_list);
+ + get_reg_classes_full_count(&pmlmeext->channel_list);
*(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
@@ -3120,7 +3120,7 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
/* + number of channels in all classes */
len_channellist_attr = 3
+ (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
- + get_reg_classes_full_count(pmlmeext->channel_list);
+ + get_reg_classes_full_count(&pmlmeext->channel_list);
*(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
p2pielen += 2;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c
index c7ff2e4d1f23..9832dcbbd07f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp.c
@@ -907,7 +907,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
}
- _rtw_memset(data, '\0', sizeof(data));
+ _rtw_memset(data, '\0', sizeof(*data));
i = psd_start;
while (i < psd_stop) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 013ea487e7ac..8018edd3d42e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -631,7 +631,7 @@ void WMMOnAssocRsp(struct adapter *padapter)
inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
if (pregpriv->wifi_spec == 1) {
- u32 j, tmp, change_inx;
+ u32 j, tmp, change_inx = false;
/* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
for (i = 0; i < 4; i++) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 9c2e7a20c09e..ec0028d4e61a 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -57,7 +57,7 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
u8 cut_ver, fab_ver;
/* Init Value */
- _rtw_memset(dm_odm, 0, sizeof(dm_odm));
+ _rtw_memset(dm_odm, 0, sizeof(*dm_odm));
dm_odm->Adapter = Adapter;
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 2bfe72841921..4787bacdcad8 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -1010,7 +1010,7 @@ enum dm_dig_op {
#define DM_false_ALARM_THRESH_LOW 400
#define DM_false_ALARM_THRESH_HIGH 1000
-#define DM_DIG_MAX_NIC 0x3e
+#define DM_DIG_MAX_NIC 0x4e
#define DM_DIG_MIN_NIC 0x1e /* 0x22/0x1c */
#define DM_DIG_MAX_AP 0x32
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 52b280165a92..555c801d2ded 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -188,7 +188,7 @@ enum ChannelPlan {
struct txpowerinfo24g {
u8 IndexCCK_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
- u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G-1];
+ u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
/* If only one tx, only BW20 and OFDM are used. */
s8 CCK_Diff[MAX_RF_PATH][MAX_TX_COUNT];
s8 OFDM_Diff[MAX_RF_PATH][MAX_TX_COUNT];
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index a96b018e5e6a..853ab80a2b86 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -870,6 +870,7 @@ static struct fwevent wlanevents[] = {
{0, NULL},
{0, NULL},
{0, &rtw_cpwm_event_callback},
+ {0, NULL},
};
#endif/* _RTL_MLME_EXT_C_ */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index cd4100fb3645..95953ebc0279 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -6973,7 +6973,7 @@ static int rtw_mp_ctx(struct net_device *dev,
stop = strncmp(extra, "stop", 4);
sscanf(extra, "count =%d, pkt", &count);
- _rtw_memset(extra, '\0', sizeof(extra));
+ _rtw_memset(extra, '\0', sizeof(*extra));
if (stop == 0) {
bStartTest = 0; /* To set Stop */
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index d3078d200e50..9ca3180ebaa0 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -54,6 +54,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
/*=== Customer ID ===*/
/****** 8188EUS ********/
{USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{} /* Terminating entry */
};
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 5bc361b16d4c..56144014b7c9 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -37,6 +37,8 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
/* Get TCB and local buffer from common pool.
(It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */
skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
+ if (!skb)
+ return RT_STATUS_FAILURE;
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index dbf11ecb794e..19d3cf451b88 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -172,8 +172,8 @@ static u16 swGetOFDMControlRate(struct vnt_private *pDevice, u16 wRateIdx)
if (!CARDbIsOFDMinBasicRate(pDevice)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
"swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx);
- if (wRateIdx > RATE_24M)
- wRateIdx = RATE_24M;
+ if (wRateIdx > RATE_24M)
+ wRateIdx = RATE_24M;
return wRateIdx;
}
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index d0cf7d8a20e5..8872e0f84f40 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -1634,6 +1634,9 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
if (pMgmt == NULL)
return -EFAULT;
+ if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
+ return -ENODEV;
+
buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 536971786ae8..6f9d28182445 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1098,6 +1098,8 @@ static int device_close(struct net_device *dev)
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pDevice->flags &= ~DEVICE_FLAGS_OPENED;
+
device_free_tx_bufs(pDevice);
device_free_rx_bufs(pDevice);
device_free_int_bufs(pDevice);
@@ -1109,7 +1111,6 @@ static int device_close(struct net_device *dev)
usb_free_urb(pDevice->pInterruptURB);
BSSvClearNodeDBTable(pDevice, 0);
- pDevice->flags &=(~DEVICE_FLAGS_OPENED);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index fb743a8811bb..14f3e852215d 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -148,6 +148,8 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
for (ii = 0; ii < pDevice->cbTD; ii++) {
+ if (!pDevice->apTD[ii])
+ return NULL;
pContext = pDevice->apTD[ii];
if (pContext->bBoolInUse == false) {
pContext->bBoolInUse = true;
diff --git a/drivers/staging/xillybus/xillybus_core.c b/drivers/staging/xillybus/xillybus_core.c
index efc56987a60b..7db6f03a0054 100644
--- a/drivers/staging/xillybus/xillybus_core.c
+++ b/drivers/staging/xillybus/xillybus_core.c
@@ -2054,7 +2054,7 @@ static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
NULL,
MKDEV(major, i),
NULL,
- devname);
+ "%s", devname);
if (IS_ERR(device)) {
pr_warn("xillybus: Failed to create %s "
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 91d94b564433..2c4ed52ca849 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -981,4 +981,3 @@ MODULE_PARM_DESC(num_devices, "Number of zram devices");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
MODULE_DESCRIPTION("Compressed RAM Block Device");
-MODULE_ALIAS("devname:zram");
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9a9ddd1d0bc..01bf5eb4f238 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1758,8 +1758,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON;
if (canon_change) {
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
- ldata->line_start = 0;
- ldata->canon_head = ldata->read_tail;
+ ldata->line_start = ldata->canon_head = ldata->read_tail;
ldata->erasing = 0;
ldata->lnext = 0;
}
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 52379e56a31e..44077c0b7670 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -667,30 +667,21 @@ static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
static int dma_push_rx(struct eg20t_port *priv, int size)
{
- struct tty_struct *tty;
int room;
struct uart_port *port = &priv->port;
struct tty_port *tport = &port->state->port;
- port = &priv->port;
- tty = tty_port_tty_get(tport);
- if (!tty) {
- dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
- return 0;
- }
-
room = tty_buffer_request_room(tport, size);
if (room < size)
dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
size - room);
if (!room)
- return room;
+ return 0;
tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size);
port->icount.rx += room;
- tty_kref_put(tty);
return room;
}
@@ -1098,6 +1089,8 @@ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
if (tty == NULL) {
for (i = 0; error_msg[i] != NULL; i++)
dev_err(&priv->pdev->dev, error_msg[i]);
+ } else {
+ tty_kref_put(tty);
}
}
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index d0d972f7e43e..0489a2bdcdf9 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -732,7 +732,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
static void tegra_uart_stop_rx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
- struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+ struct tty_struct *tty;
struct tty_port *port = &u->state->port;
struct dma_tx_state state;
unsigned long ier;
@@ -744,6 +744,8 @@ static void tegra_uart_stop_rx(struct uart_port *u)
if (!tup->rx_in_progress)
return;
+ tty = tty_port_tty_get(&tup->uport.state->port);
+
tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
ier = tup->ier_shadow;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index a9355ce1c6d5..3a1a01af9a80 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -854,7 +854,8 @@ void disassociate_ctty(int on_exit)
struct pid *tty_pgrp = tty_get_pgrp(tty);
if (tty_pgrp) {
kill_pgrp(tty_pgrp, SIGHUP, on_exit);
- kill_pgrp(tty_pgrp, SIGCONT, on_exit);
+ if (!on_exit)
+ kill_pgrp(tty_pgrp, SIGCONT, on_exit);
put_pid(tty_pgrp);
}
}
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 03ba081c5772..6fd60fece6b4 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1201,6 +1201,9 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
}
return 0;
case TCFLSH:
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
return __tty_perform_flush(tty, arg);
default:
/* Try the mode commands */
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 4a851e15e58c..77b47d82c9a6 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -1,6 +1,6 @@
config USB_CHIPIDEA
tristate "ChipIdea Highspeed Dual Role Controller"
- depends on (USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)
+ depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
help
Say Y here if your system has a dual role high speed USB
controller based on ChipIdea silicon IP. Currently, only the
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 74d998d9b45b..be822a2c1776 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -131,7 +131,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n",
ret);
- goto err_clk;
+ goto err_phy;
}
}
@@ -143,7 +143,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Can't register ci_hdrc platform device, err=%d\n",
ret);
- goto err_clk;
+ goto err_phy;
}
if (data->usbmisc_data) {
@@ -164,6 +164,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
disable_device:
ci_hdrc_remove_device(data->ci_pdev);
+err_phy:
+ if (data->phy)
+ usb_phy_shutdown(data->phy);
err_clk:
clk_disable_unprepare(data->clk);
return ret;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 94626409559a..23763dcec069 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -605,6 +605,7 @@ static int ci_hdrc_remove(struct platform_device *pdev)
dbg_remove_files(ci);
free_irq(ci->irq, ci);
ci_role_destroy(ci);
+ kfree(ci->hw_bank.regmap);
return 0;
}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 6b4c2f2eb946..9333083dd111 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1600,6 +1600,8 @@ static void destroy_eps(struct ci_hdrc *ci)
for (i = 0; i < ci->hw_ep_max; i++) {
struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
+ if (hwep->pending_td)
+ free_pending_td(hwep);
dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
}
}
@@ -1667,13 +1669,13 @@ static int ci_udc_stop(struct usb_gadget *gadget,
if (ci->platdata->notify_event)
ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_STOPPED_EVENT);
- ci->driver = NULL;
spin_unlock_irqrestore(&ci->lock, flags);
_gadget_stop_activity(&ci->gadget);
spin_lock_irqsave(&ci->lock, flags);
pm_runtime_put(&ci->gadget.dev);
}
+ ci->driver = NULL;
spin_unlock_irqrestore(&ci->lock, flags);
return 0;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 737e3c19967b..71dc5d768fa5 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -742,6 +742,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
if ((index & ~USB_DIR_IN) == 0)
return 0;
ret = findintfep(ps->dev, index);
+ if (ret < 0) {
+ /*
+ * Some not fully compliant Win apps seem to get
+ * index wrong and have the endpoint number here
+ * rather than the endpoint address (with the
+ * correct direction). Win does let this through,
+ * so we'll not reject it here but leave it to
+ * the device to not break KVM. But we warn.
+ */
+ ret = findintfep(ps->dev, index ^ 0x80);
+ if (ret >= 0)
+ dev_info(&ps->dev->dev,
+ "%s: process %i (%s) requesting ep %02x but needs %02x\n",
+ __func__, task_pid_nr(current),
+ current->comm, index, index ^ 0x80);
+ }
if (ret >= 0)
ret = checkintf(ps, ret);
break;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index dde4c83516a1..e6b682c6c236 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3426,6 +3426,9 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
unsigned long long u2_pel;
int ret;
+ if (udev->state != USB_STATE_CONFIGURED)
+ return 0;
+
/* Convert SEL and PEL stored in ns to us */
u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index b870872e020f..70fc43027a5c 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,7 +1,6 @@
config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
depends on (USB || USB_GADGET) && HAS_DMA
- depends on EXTCON
select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD
help
Say Y or M here if your system has a Dual Role SuperSpeed
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 9b138129e856..2e252aae51ca 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -28,6 +28,8 @@
/* FIXME define these in <linux/pci_ids.h> */
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
+#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
+#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
struct dwc3_pci {
struct device *dev;
@@ -187,6 +189,8 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f168eaebdef8..5452c0fce360 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2611,15 +2611,13 @@ int dwc3_gadget_init(struct dwc3 *dwc)
ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
if (ret) {
dev_err(dwc->dev, "failed to register udc\n");
- goto err5;
+ goto err4;
}
return 0;
-err5:
- dwc3_gadget_free_endpoints(dwc);
-
err4:
+ dwc3_gadget_free_endpoints(dwc);
dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
dwc->ep0_bounce, dwc->ep0_bounce_addr);
diff --git a/drivers/usb/gadget/cdc2.c b/drivers/usb/gadget/cdc2.c
index 5a5acf22c694..e126b6b248e6 100644
--- a/drivers/usb/gadget/cdc2.c
+++ b/drivers/usb/gadget/cdc2.c
@@ -113,12 +113,6 @@ static int __init cdc_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- fi_ecm = usb_get_function_instance("ecm");
- if (IS_ERR(fi_ecm)) {
- status = PTR_ERR(fi_ecm);
- goto err_func_ecm;
- }
-
f_ecm = usb_get_function(fi_ecm);
if (IS_ERR(f_ecm)) {
status = PTR_ERR(f_ecm);
@@ -129,35 +123,24 @@ static int __init cdc_do_config(struct usb_configuration *c)
if (status)
goto err_add_ecm;
- fi_serial = usb_get_function_instance("acm");
- if (IS_ERR(fi_serial)) {
- status = PTR_ERR(fi_serial);
- goto err_get_acm;
- }
-
f_acm = usb_get_function(fi_serial);
if (IS_ERR(f_acm)) {
status = PTR_ERR(f_acm);
- goto err_func_acm;
+ goto err_get_acm;
}
status = usb_add_function(c, f_acm);
if (status)
goto err_add_acm;
-
return 0;
err_add_acm:
usb_put_function(f_acm);
-err_func_acm:
- usb_put_function_instance(fi_serial);
err_get_acm:
usb_remove_function(c, f_ecm);
err_add_ecm:
usb_put_function(f_ecm);
err_get_ecm:
- usb_put_function_instance(fi_ecm);
-err_func_ecm:
return status;
}
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 06ecd08fd57a..b8a2376971a4 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -923,8 +923,9 @@ static int dummy_udc_stop(struct usb_gadget *g,
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
- dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
- driver->driver.name);
+ if (driver)
+ dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
+ driver->driver.name);
dum->driver = NULL;
@@ -1000,8 +1001,8 @@ static int dummy_udc_remove(struct platform_device *pdev)
{
struct dummy *dum = platform_get_drvdata(pdev);
- usb_del_gadget_udc(&dum->gadget);
device_remove_file(&dum->gadget.dev, &dev_attr_function);
+ usb_del_gadget_udc(&dum->gadget);
return 0;
}
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index edab45da3741..8d9e6f7e8f1a 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -995,7 +995,7 @@ static void ecm_unbind(struct usb_configuration *c, struct usb_function *f)
usb_ep_free_request(ecm->notify, ecm->notify_req);
}
-struct usb_function *ecm_alloc(struct usb_function_instance *fi)
+static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
{
struct f_ecm *ecm;
struct f_ecm_opts *opts;
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index d00392d879db..d61c11d765d0 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -624,7 +624,7 @@ static void eem_unbind(struct usb_configuration *c, struct usb_function *f)
usb_free_all_descriptors(f);
}
-struct usb_function *eem_alloc(struct usb_function_instance *fi)
+static struct usb_function *eem_alloc(struct usb_function_instance *fi)
{
struct f_eem *eem;
struct f_eem_opts *opts;
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 1a66c5baa0d1..0658908d8968 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1034,37 +1034,19 @@ struct ffs_sb_fill_data {
struct ffs_file_perms perms;
umode_t root_mode;
const char *dev_name;
- union {
- /* set by ffs_fs_mount(), read by ffs_sb_fill() */
- void *private_data;
- /* set by ffs_sb_fill(), read by ffs_fs_mount */
- struct ffs_data *ffs_data;
- };
+ struct ffs_data *ffs_data;
};
static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
{
struct ffs_sb_fill_data *data = _data;
struct inode *inode;
- struct ffs_data *ffs;
+ struct ffs_data *ffs = data->ffs_data;
ENTER();
- /* Initialise data */
- ffs = ffs_data_new();
- if (unlikely(!ffs))
- goto Enomem;
-
ffs->sb = sb;
- ffs->dev_name = kstrdup(data->dev_name, GFP_KERNEL);
- if (unlikely(!ffs->dev_name))
- goto Enomem;
- ffs->file_perms = data->perms;
- ffs->private_data = data->private_data;
-
- /* used by the caller of this function */
- data->ffs_data = ffs;
-
+ data->ffs_data = NULL;
sb->s_fs_info = ffs;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1080,17 +1062,14 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
&data->perms);
sb->s_root = d_make_root(inode);
if (unlikely(!sb->s_root))
- goto Enomem;
+ return -ENOMEM;
/* EP0 file */
if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
&ffs_ep0_operations, NULL)))
- goto Enomem;
+ return -ENOMEM;
return 0;
-
-Enomem:
- return -ENOMEM;
}
static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
@@ -1193,6 +1172,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
struct dentry *rv;
int ret;
void *ffs_dev;
+ struct ffs_data *ffs;
ENTER();
@@ -1200,18 +1180,30 @@ ffs_fs_mount(struct file_system_type *t, int flags,
if (unlikely(ret < 0))
return ERR_PTR(ret);
+ ffs = ffs_data_new();
+ if (unlikely(!ffs))
+ return ERR_PTR(-ENOMEM);
+ ffs->file_perms = data.perms;
+
+ ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
+ if (unlikely(!ffs->dev_name)) {
+ ffs_data_put(ffs);
+ return ERR_PTR(-ENOMEM);
+ }
+
ffs_dev = functionfs_acquire_dev_callback(dev_name);
- if (IS_ERR(ffs_dev))
- return ffs_dev;
+ if (IS_ERR(ffs_dev)) {
+ ffs_data_put(ffs);
+ return ERR_CAST(ffs_dev);
+ }
+ ffs->private_data = ffs_dev;
+ data.ffs_data = ffs;
- data.dev_name = dev_name;
- data.private_data = ffs_dev;
rv = mount_nodev(t, flags, &data, ffs_sb_fill);
-
- /* data.ffs_data is set by ffs_sb_fill */
- if (IS_ERR(rv))
+ if (IS_ERR(rv) && data.ffs_data) {
functionfs_release_dev_callback(data.ffs_data);
-
+ ffs_data_put(data.ffs_data);
+ }
return rv;
}
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 313b835eedfd..a01d7d38c016 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2260,10 +2260,12 @@ reset:
/* Disable the endpoints */
if (fsg->bulk_in_enabled) {
usb_ep_disable(fsg->bulk_in);
+ fsg->bulk_in->driver_data = NULL;
fsg->bulk_in_enabled = 0;
}
if (fsg->bulk_out_enabled) {
usb_ep_disable(fsg->bulk_out);
+ fsg->bulk_out->driver_data = NULL;
fsg->bulk_out_enabled = 0;
}
diff --git a/drivers/usb/gadget/fotg210-udc.c b/drivers/usb/gadget/fotg210-udc.c
index 32db2eee2d87..bbbfd1948778 100644
--- a/drivers/usb/gadget/fotg210-udc.c
+++ b/drivers/usb/gadget/fotg210-udc.c
@@ -1214,6 +1214,6 @@ static struct platform_driver fotg210_driver = {
module_platform_driver(fotg210_driver);
-MODULE_AUTHOR("Yuan-Hsin Chen <yhchen@faraday-tech.com>");
+MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index f1dd6daabe21..b278abe52453 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -22,7 +22,7 @@
MODULE_DESCRIPTION("FUSB300 USB gadget driver");
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Yuan Hsin Chen <yhchen@faraday-tech.com>");
+MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
MODULE_ALIAS("platform:fusb300_udc");
#define DRIVER_VERSION "20 October 2010"
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 2a1ebefd8f9e..23393254a8a3 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -179,7 +179,7 @@ err_conf:
return ret;
}
-static int rndis_config_register(struct usb_composite_dev *cdev)
+static __ref int rndis_config_register(struct usb_composite_dev *cdev)
{
static struct usb_configuration config = {
.bConfigurationValue = MULTI_RNDIS_CONFIG_NUM,
@@ -194,7 +194,7 @@ static int rndis_config_register(struct usb_composite_dev *cdev)
#else
-static int rndis_config_register(struct usb_composite_dev *cdev)
+static __ref int rndis_config_register(struct usb_composite_dev *cdev)
{
return 0;
}
@@ -241,7 +241,7 @@ err_conf:
return ret;
}
-static int cdc_config_register(struct usb_composite_dev *cdev)
+static __ref int cdc_config_register(struct usb_composite_dev *cdev)
{
static struct usb_configuration config = {
.bConfigurationValue = MULTI_CDC_CONFIG_NUM,
@@ -256,7 +256,7 @@ static int cdc_config_register(struct usb_composite_dev *cdev)
#else
-static int cdc_config_register(struct usb_composite_dev *cdev)
+static __ref int cdc_config_register(struct usb_composite_dev *cdev)
{
return 0;
}
diff --git a/drivers/usb/gadget/mv_u3d_core.c b/drivers/usb/gadget/mv_u3d_core.c
index bbb6e98c4384..561b30efb8ee 100644
--- a/drivers/usb/gadget/mv_u3d_core.c
+++ b/drivers/usb/gadget/mv_u3d_core.c
@@ -645,6 +645,7 @@ static int mv_u3d_ep_disable(struct usb_ep *_ep)
struct mv_u3d_ep *ep;
struct mv_u3d_ep_context *ep_context;
u32 epxcr, direction;
+ unsigned long flags;
if (!_ep)
return -EINVAL;
@@ -661,7 +662,9 @@ static int mv_u3d_ep_disable(struct usb_ep *_ep)
direction = mv_u3d_ep_dir(ep);
/* nuke all pending requests (does flush) */
+ spin_lock_irqsave(&u3d->lock, flags);
mv_u3d_nuke(ep, -ESHUTDOWN);
+ spin_unlock_irqrestore(&u3d->lock, flags);
/* Disable the endpoint for Rx or Tx and reset the endpoint type */
if (direction == MV_U3D_EP_DIR_OUT) {
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index d69b36a99dbc..6bddf1aa2347 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2475,8 +2475,6 @@ irq_retry:
if (gintsts & GINTSTS_ErlySusp) {
dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
writel(GINTSTS_ErlySusp, hsotg->regs + GINTSTS);
-
- s3c_hsotg_disconnect(hsotg);
}
/*
@@ -2962,9 +2960,6 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
if (!hsotg)
return -ENODEV;
- if (!driver || driver != hsotg->driver || !driver->unbind)
- return -EINVAL;
-
/* all endpoints should be shutdown */
for (ep = 0; ep < hsotg->num_of_eps; ep++)
s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
@@ -2972,15 +2967,15 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
spin_lock_irqsave(&hsotg->lock, flags);
s3c_hsotg_phy_disable(hsotg);
- regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
- hsotg->driver = NULL;
+ if (!driver)
+ hsotg->driver = NULL;
+
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock_irqrestore(&hsotg->lock, flags);
- dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",
- driver->driver.name);
+ regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
return 0;
}
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 947b009009f1..f2407b2e8a99 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -130,7 +130,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
}
/* Enable USB controller, 83xx or 8536 */
- if (pdata->have_sysif_regs)
+ if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
/* Don't need to set host mode here. It will be done by tdi_reset() */
@@ -232,15 +232,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
case FSL_USB2_PHY_ULPI:
if (pdata->have_sysif_regs && pdata->controller_ver) {
/* controller version 1.6 or above */
+ clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
setbits32(non_ehci + FSL_SOC_USB_CTRL,
- ULPI_PHY_CLK_SEL);
- /*
- * Due to controller issue of PHY_CLK_VALID in ULPI
- * mode, we set USB_CTRL_USB_EN before checking
- * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work.
- */
- clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
- UTMI_PHY_EN, USB_CTRL_USB_EN);
+ ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
}
portsc |= PORT_PTS_ULPI;
break;
@@ -270,8 +264,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
if (pdata->have_sysif_regs && pdata->controller_ver &&
(phy_mode == FSL_USB2_PHY_ULPI)) {
/* check PHY_CLK_VALID to get phy clk valid */
- if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
- PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) {
+ if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
+ PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
+ in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
return -EINVAL;
}
@@ -669,7 +664,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
+ .flags = HCD_USB2 | HCD_MEMORY,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index b52a66ce92e8..83ab51af250f 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -43,7 +43,7 @@ static const struct hc_driver ehci_grlib_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_USB2,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 5d6022f30ebe..86ab9fd9fe9e 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1158,7 +1158,7 @@ static const struct hc_driver ehci_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_USB2,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 417c10da9450..35cdbd88bbbe 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -96,7 +96,7 @@ static const struct hc_driver mv_ehci_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_USB2,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index ab0397e4d8f3..45cc00158412 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -51,7 +51,7 @@ static const struct hc_driver ehci_octeon_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_USB2,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 6bd299e61f58..854c2ec7b699 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -361,7 +361,7 @@ static struct pci_driver ehci_pci_driver = {
.remove = usb_hcd_pci_remove,
.shutdown = usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
.driver = {
.pm = &usb_hcd_pci_pm_ops
},
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index 893b707f0000..601e208bd782 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -286,7 +286,7 @@ static const struct hc_driver ehci_msp_hc_driver = {
#else
.irq = ehci_irq,
#endif
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_USB2,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 6cc5567bf9c8..932293fa32de 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -28,7 +28,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_USB2,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 8188542ba17e..fd983771b025 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -71,7 +71,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
.product_desc = "PS3 EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_USB2,
.reset = ps3_ehci_hc_reset,
.start = ehci_run,
.stop = ehci_stop,
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index e321804c3475..a7f776a13eb1 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -247,6 +247,8 @@ static int qtd_copy_status (
static void
ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
+__releases(ehci->lock)
+__acquires(ehci->lock)
{
if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
/* ... update hc-wide periodic stats */
@@ -272,8 +274,11 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
urb->actual_length, urb->transfer_buffer_length);
#endif
+ /* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+ spin_unlock (&ehci->lock);
usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
+ spin_lock (&ehci->lock);
}
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
index 8a734498079b..b2de52d39614 100644
--- a/drivers/usb/host/ehci-sead3.c
+++ b/drivers/usb/host/ehci-sead3.c
@@ -55,7 +55,7 @@ const struct hc_driver ehci_sead3_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_USB2,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index dc899eb2b861..93e59a13bc1f 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -36,7 +36,7 @@ static const struct hc_driver ehci_sh_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
+ .flags = HCD_USB2 | HCD_MEMORY,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
index 67026ffbf9a8..cca4be90a864 100644
--- a/drivers/usb/host/ehci-tilegx.c
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -61,7 +61,7 @@ static const struct hc_driver ehci_tilegx_hc_driver = {
* Generic hardware linkage.
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_USB2,
/*
* Basic lifecycle operations.
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index 1c370dfbee0d..59e0e24c753f 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -108,7 +108,7 @@ static const struct hc_driver ehci_w90x900_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_USB2|HCD_MEMORY|HCD_BH,
+ .flags = HCD_USB2|HCD_MEMORY,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 95979f9f4381..eba962e6ebfb 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -79,7 +79,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+ .flags = HCD_MEMORY | HCD_USB2,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 9e0020d9e4c8..abd5050a4899 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -24,7 +24,7 @@ struct fsl_usb2_dev_data {
enum fsl_usb2_operating_modes op_mode; /* operating mode */
};
-struct fsl_usb2_dev_data dr_mode_data[] = {
+static struct fsl_usb2_dev_data dr_mode_data[] = {
{
.dr_mode = "host",
.drivers = { "fsl-ehci", NULL, NULL, },
@@ -42,7 +42,7 @@ struct fsl_usb2_dev_data dr_mode_data[] = {
},
};
-struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np)
+static struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np)
{
const unsigned char *prop;
int i;
@@ -75,7 +75,7 @@ static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
return FSL_USB2_PHY_NONE;
}
-struct platform_device *fsl_usb2_device_register(
+static struct platform_device *fsl_usb2_device_register(
struct platform_device *ofdev,
struct fsl_usb2_platform_data *pdata,
const char *name, int id)
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 60a5de505ca1..adb01d950a16 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -824,13 +824,13 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
i = DIV_ROUND_UP(wrap_frame(
cur_frame - urb->start_frame),
urb->interval);
- if (urb->transfer_flags & URB_ISO_ASAP) {
+
+ /* Treat underruns as if URB_ISO_ASAP was set */
+ if ((urb->transfer_flags & URB_ISO_ASAP) ||
+ i >= urb->number_of_packets) {
urb->start_frame = wrap_frame(urb->start_frame
+ i * urb->interval);
i = 0;
- } else if (i >= urb->number_of_packets) {
- ret = -EXDEV;
- goto alloc_dmem_failed;
}
}
}
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8f6b695af6a4..604cad1bcf9c 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -216,31 +216,26 @@ static int ohci_urb_enqueue (
frame &= ~(ed->interval - 1);
frame |= ed->branch;
urb->start_frame = frame;
+ ed->last_iso = frame + ed->interval * (size - 1);
}
} else if (ed->type == PIPE_ISOCHRONOUS) {
u16 next = ohci_frame_no(ohci) + 1;
u16 frame = ed->last_iso + ed->interval;
+ u16 length = ed->interval * (size - 1);
/* Behind the scheduling threshold? */
if (unlikely(tick_before(frame, next))) {
- /* USB_ISO_ASAP: Round up to the first available slot */
+ /* URB_ISO_ASAP: Round up to the first available slot */
if (urb->transfer_flags & URB_ISO_ASAP) {
frame += (next - frame + ed->interval - 1) &
-ed->interval;
/*
- * Not ASAP: Use the next slot in the stream. If
- * the entire URB falls before the threshold, fail.
+ * Not ASAP: Use the next slot in the stream,
+ * no matter what.
*/
} else {
- if (tick_before(frame + ed->interval *
- (urb->number_of_packets - 1), next)) {
- retval = -EXDEV;
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- goto fail;
- }
-
/*
* Some OHCI hardware doesn't handle late TDs
* correctly. After retiring them it proceeds
@@ -251,9 +246,16 @@ static int ohci_urb_enqueue (
urb_priv->td_cnt = DIV_ROUND_UP(
(u16) (next - frame),
ed->interval);
+ if (urb_priv->td_cnt >= urb_priv->length) {
+ ++urb_priv->td_cnt; /* Mark it */
+ ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
+ urb, frame, length,
+ next);
+ }
}
}
urb->start_frame = frame;
+ ed->last_iso = frame + length;
}
/* fill the TDs and link them to the ed; and
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index df4a6707322d..e7f577e63624 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -41,9 +41,13 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
__releases(ohci->lock)
__acquires(ohci->lock)
{
- struct device *dev = ohci_to_hcd(ohci)->self.controller;
+ struct device *dev = ohci_to_hcd(ohci)->self.controller;
+ struct usb_host_endpoint *ep = urb->ep;
+ struct urb_priv *urb_priv;
+
// ASSERT (urb->hcpriv != 0);
+ restart:
urb_free_priv (ohci, urb->hcpriv);
urb->hcpriv = NULL;
if (likely(status == -EINPROGRESS))
@@ -80,6 +84,21 @@ __acquires(ohci->lock)
ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
}
+
+ /*
+ * An isochronous URB that is sumitted too late won't have any TDs
+ * (marked by the fact that the td_cnt value is larger than the
+ * actual number of TDs). If the next URB on this endpoint is like
+ * that, give it back now.
+ */
+ if (!list_empty(&ep->urb_list)) {
+ urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
+ urb_priv = urb->hcpriv;
+ if (urb_priv->td_cnt > urb_priv->length) {
+ status = 0;
+ goto restart;
+ }
+ }
}
@@ -546,7 +565,6 @@ td_fill (struct ohci_hcd *ohci, u32 info,
td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
*ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
(data & 0x0FFF) | 0xE000);
- td->ed->last_iso = info & 0xffff;
} else {
td->hwCBP = cpu_to_hc32 (ohci, data);
}
@@ -996,7 +1014,7 @@ rescan_this:
urb_priv->td_cnt++;
/* if URB is done, clean up */
- if (urb_priv->td_cnt == urb_priv->length) {
+ if (urb_priv->td_cnt >= urb_priv->length) {
modified = completed = 1;
finish_urb(ohci, urb, 0);
}
@@ -1086,7 +1104,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td)
urb_priv->td_cnt++;
/* If all this urb's TDs are done, call complete() */
- if (urb_priv->td_cnt == urb_priv->length)
+ if (urb_priv->td_cnt >= urb_priv->length)
finish_urb(ohci, urb, status);
/* clean schedule: unlink EDs that are no longer busy */
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index c300bd2f7d1c..0f228c46eeda 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -293,7 +293,7 @@ static struct pci_driver uhci_pci_driver = {
.remove = usb_hcd_pci_remove,
.shutdown = uhci_shutdown,
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
.driver = {
.pm = &usb_hcd_pci_pm_ops
},
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 041c6ddb695c..da6f56d996ce 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1303,7 +1303,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
}
/* Fell behind? */
- if (uhci_frame_before_eq(frame, next)) {
+ if (!uhci_frame_before_eq(next, frame)) {
/* USB_ISO_ASAP: Round up to the first available slot */
if (urb->transfer_flags & URB_ISO_ASAP)
@@ -1311,13 +1311,17 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
-qh->period;
/*
- * Not ASAP: Use the next slot in the stream. If
- * the entire URB falls before the threshold, fail.
+ * Not ASAP: Use the next slot in the stream,
+ * no matter what.
*/
else if (!uhci_frame_before_eq(next,
frame + (urb->number_of_packets - 1) *
qh->period))
- return -EXDEV;
+ dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
+ urb, frame,
+ (urb->number_of_packets - 1) *
+ qh->period,
+ next);
}
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index fae697ed0b70..773a6b28c4f1 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -287,7 +287,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
}
- cmd->command_trb = xhci->cmd_ring->enqueue;
+ cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
xhci_ring_cmd_db(xhci);
@@ -552,11 +552,15 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
* - Mark a port as being done with device resume,
* and ring the endpoint doorbells.
* - Stop the Synopsys redriver Compliance Mode polling.
+ * - Drop and reacquire the xHCI lock, in order to wait for port resume.
*/
static u32 xhci_get_port_status(struct usb_hcd *hcd,
struct xhci_bus_state *bus_state,
__le32 __iomem **port_array,
- u16 wIndex, u32 raw_port_status)
+ u16 wIndex, u32 raw_port_status,
+ unsigned long flags)
+ __releases(&xhci->lock)
+ __acquires(&xhci->lock)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
u32 status = 0;
@@ -591,21 +595,42 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
return 0xffffffff;
if (time_after_eq(jiffies,
bus_state->resume_done[wIndex])) {
+ int time_left;
+
xhci_dbg(xhci, "Resume USB2 port %d\n",
wIndex + 1);
bus_state->resume_done[wIndex] = 0;
clear_bit(wIndex, &bus_state->resuming_ports);
+
+ set_bit(wIndex, &bus_state->rexit_ports);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
- xhci_dbg(xhci, "set port %d resume\n",
- wIndex + 1);
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- wIndex + 1);
- if (!slot_id) {
- xhci_dbg(xhci, "slot_id is zero\n");
- return 0xffffffff;
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ time_left = wait_for_completion_timeout(
+ &bus_state->rexit_done[wIndex],
+ msecs_to_jiffies(
+ XHCI_MAX_REXIT_TIMEOUT));
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ if (time_left) {
+ slot_id = xhci_find_slot_id_by_port(hcd,
+ xhci, wIndex + 1);
+ if (!slot_id) {
+ xhci_dbg(xhci, "slot_id is zero\n");
+ return 0xffffffff;
+ }
+ xhci_ring_device(xhci, slot_id);
+ } else {
+ int port_status = xhci_readl(xhci,
+ port_array[wIndex]);
+ xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
+ XHCI_MAX_REXIT_TIMEOUT,
+ port_status);
+ status |= USB_PORT_STAT_SUSPEND;
+ clear_bit(wIndex, &bus_state->rexit_ports);
}
- xhci_ring_device(xhci, slot_id);
+
bus_state->port_c_suspend |= 1 << wIndex;
bus_state->suspended_ports &= ~(1 << wIndex);
} else {
@@ -728,7 +753,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
}
status = xhci_get_port_status(hcd, bus_state, port_array,
- wIndex, temp);
+ wIndex, temp, flags);
if (status == 0xffffffff)
goto error;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 53b972c2a09f..83bcd13622c3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2428,6 +2428,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
for (i = 0; i < USB_MAXCHILDREN; ++i) {
xhci->bus_state[0].resume_done[i] = 0;
xhci->bus_state[1].resume_done[i] = 0;
+ /* Only the USB 2.0 completions will ever be used. */
+ init_completion(&xhci->bus_state[1].rexit_done[i]);
}
if (scratchpad_alloc(xhci, flags))
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c2d495057eb5..236c3aabe940 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -351,7 +351,7 @@ static struct pci_driver xhci_pci_driver = {
/* suspend and resume implemented later */
.shutdown = usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
.driver = {
.pm = &usb_hcd_pci_pm_ops
},
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 411da1fc7ae8..6bfbd80ec2b9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -123,6 +123,16 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
return TRB_TYPE_LINK_LE32(link->control);
}
+union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
+{
+ /* Enqueue pointer can be left pointing to the link TRB,
+ * we must handle that
+ */
+ if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
+ return ring->enq_seg->next->trbs;
+ return ring->enqueue;
+}
+
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
@@ -859,8 +869,12 @@ remove_finished_td:
/* Otherwise ring the doorbell(s) to restart queued transfers */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
- ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
+
+ /* Clear stopped_td and stopped_trb if endpoint is not halted */
+ if (!(ep->ep_state & EP_HALTED)) {
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
+ }
/*
* Drop the lock and complete the URBs in the cancelled TD list.
@@ -1414,6 +1428,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
inc_deq(xhci, xhci->cmd_ring);
return;
}
+ /* There is no command to handle if we get a stop event when the
+ * command ring is empty, event->cmd_trb points to the next
+ * unset command
+ */
+ if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
+ return;
}
switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
@@ -1743,6 +1763,19 @@ static void handle_port_status(struct xhci_hcd *xhci,
}
}
+ /*
+ * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
+ * RExit to a disconnect state). If so, let the the driver know it's
+ * out of the RExit state.
+ */
+ if (!DEV_SUPERSPEED(temp) &&
+ test_and_clear_bit(faked_port_index,
+ &bus_state->rexit_ports)) {
+ complete(&bus_state->rexit_done[faked_port_index]);
+ bogus_port_status = true;
+ goto cleanup;
+ }
+
if (hcd->speed != HCD_USB3)
xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
PORT_PLC);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 49b6edb84a79..1e36dbb48366 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2598,15 +2598,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
if (command) {
cmd_completion = command->completion;
cmd_status = &command->status;
- command->command_trb = xhci->cmd_ring->enqueue;
-
- /* Enqueue pointer can be left pointing to the link TRB,
- * we must handle that
- */
- if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
- command->command_trb =
- xhci->cmd_ring->enq_seg->next->trbs;
-
+ command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
} else {
cmd_completion = &virt_dev->cmd_completion;
@@ -2614,7 +2606,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
}
init_completion(cmd_completion);
- cmd_trb = xhci->cmd_ring->dequeue;
+ cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
if (!ctx_change)
ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
udev->slot_id, must_succeed);
@@ -3439,14 +3431,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
/* Attempt to submit the Reset Device command to the command ring */
spin_lock_irqsave(&xhci->lock, flags);
- reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
-
- /* Enqueue pointer can be left pointing to the link TRB,
- * we must handle that
- */
- if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
- reset_device_cmd->command_trb =
- xhci->cmd_ring->enq_seg->next->trbs;
+ reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
ret = xhci_queue_reset_device(xhci, slot_id);
@@ -3650,7 +3635,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
union xhci_trb *cmd_trb;
spin_lock_irqsave(&xhci->lock, flags);
- cmd_trb = xhci->cmd_ring->dequeue;
+ cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3785,7 +3770,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
slot_ctx->dev_info >> 27);
spin_lock_irqsave(&xhci->lock, flags);
- cmd_trb = xhci->cmd_ring->dequeue;
+ cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
udev->slot_id);
if (ret) {
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 46aa14894148..289fbfbae746 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1412,8 +1412,18 @@ struct xhci_bus_state {
unsigned long resume_done[USB_MAXCHILDREN];
/* which ports have started to resume */
unsigned long resuming_ports;
+ /* Which ports are waiting on RExit to U0 transition. */
+ unsigned long rexit_ports;
+ struct completion rexit_done[USB_MAXCHILDREN];
};
+
+/*
+ * It can take up to 20 ms to transition from RExit to U0 on the
+ * Intel Lynx Point LP xHCI host.
+ */
+#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000)
+
static inline unsigned int hcd_index(struct usb_hcd *hcd)
{
if (hcd->speed == HCD_USB3)
@@ -1840,6 +1850,7 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
union xhci_trb *cmd_trb);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id);
+union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
/* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
diff --git a/drivers/usb/phy/phy-omap-usb3.c b/drivers/usb/phy/phy-omap-usb3.c
index fc15694d3031..4e8a0405f956 100644
--- a/drivers/usb/phy/phy-omap-usb3.c
+++ b/drivers/usb/phy/phy-omap-usb3.c
@@ -79,7 +79,7 @@ static struct usb_dpll_params *omap_usb3_get_dpll_params(unsigned long rate)
return &dpll_map[i].params;
}
- return 0;
+ return NULL;
}
static int omap_usb3_suspend(struct usb_phy *x, int suspend)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index c454bfa22a10..ddb9c51f2c99 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -60,7 +60,7 @@ config USB_SERIAL_SIMPLE
- Suunto ANT+ USB device.
- Fundamental Software dongle.
- HP4x calculators
- - a number of Motoroloa phones
+ - a number of Motorola phones
- Siemens USB/MPI adapter.
- ViVOtech ViVOpay USB device.
- Infineon Modem Flashloader USB interface
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index e7a84f0f5179..bedf8e47713b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -139,6 +139,7 @@ enum pl2303_type {
HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */
HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */
TB, /* TB version */
+ HX_CLONE, /* Cheap and less functional clone of the HX chip */
};
/*
* NOTE: don't know the difference between type 0 and type 1,
@@ -206,8 +207,23 @@ static int pl2303_startup(struct usb_serial *serial)
* the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB
*/
if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) {
- type = HX_TA;
- type_str = "X/HX/TA";
+ /* Check if the device is a clone */
+ pl2303_vendor_read(0x9494, 0, serial, buf);
+ /*
+ * NOTE: Not sure if this read is really needed.
+ * The HX returns 0x00, the clone 0x02, but the Windows
+ * driver seems to ignore the value and continues.
+ */
+ pl2303_vendor_write(0x0606, 0xaa, serial);
+ pl2303_vendor_read(0x8686, 0, serial, buf);
+ if (buf[0] != 0xaa) {
+ type = HX_CLONE;
+ type_str = "X/HX clone (limited functionality)";
+ } else {
+ type = HX_TA;
+ type_str = "X/HX/TA";
+ }
+ pl2303_vendor_write(0x0606, 0x00, serial);
} else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
== 0x400) {
type = HXD_EA_RA_SA;
@@ -305,8 +321,9 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
{
/*
* NOTE: Only the values defined in baud_sup are supported !
- * => if unsupported values are set, the PL2303 seems to
- * use 9600 baud (at least my PL2303X always does)
+ * => if unsupported values are set, the PL2303 uses 9600 baud instead
+ * => HX clones just don't work at unsupported baud rates < 115200 baud,
+ * for baud rates > 115200 they run at 115200 baud
*/
const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
4800, 7200, 9600, 14400, 19200, 28800, 38400,
@@ -316,14 +333,14 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
* NOTE: With the exception of type_0/1 devices, the following
* additional baud rates are supported (tested with HX rev. 3A only):
* 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800,
- * 403200, 806400. (*: not HX)
+ * 403200, 806400. (*: not HX and HX clones)
*
* Maximum values: HXD, TB: 12000000; HX, TA: 6000000;
- * type_0+1: 1228800; RA: 921600; SA: 115200
+ * type_0+1: 1228800; RA: 921600; HX clones, SA: 115200
*
* As long as we are not using this encoding method for anything else
- * than the type_0+1 and HX chips, there is no point in complicating
- * the code to support them.
+ * than the type_0+1, HX and HX clone chips, there is no point in
+ * complicating the code to support them.
*/
int i;
@@ -347,6 +364,8 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
baud = min_t(int, baud, 6000000);
else if (type == type_0 || type == type_1)
baud = min_t(int, baud, 1228800);
+ else if (type == HX_CLONE)
+ baud = min_t(int, baud, 115200);
/* Direct (standard) baud rate encoding method */
put_unaligned_le32(baud, buf);
@@ -359,7 +378,8 @@ static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type,
/*
* Divisor based baud rate encoding method
*
- * NOTE: it's not clear if the type_0/1 chips support this method
+ * NOTE: HX clones do NOT support this method.
+ * It's not clear if the type_0/1 chips support it.
*
* divisor = 12MHz * 32 / baudrate = 2^A * B
*
@@ -452,7 +472,7 @@ static void pl2303_encode_baudrate(struct tty_struct *tty,
* 1) Direct method: encodes the baud rate value directly
* => supported by all chip types
* 2) Divisor based method: encodes a divisor to a base value (12MHz*32)
- * => supported by HX chips (and likely not by type_0/1 chips)
+ * => not supported by HX clones (and likely type_0/1 chips)
*
* NOTE: Although the divisor based baud rate encoding method is much
* more flexible, some of the standard baud rate values can not be
@@ -460,7 +480,7 @@ static void pl2303_encode_baudrate(struct tty_struct *tty,
* the device likely uses the same baud rate generator for both methods
* so that there is likley no difference.
*/
- if (type == type_0 || type == type_1)
+ if (type == type_0 || type == type_1 || type == HX_CLONE)
baud = pl2303_baudrate_encode_direct(baud, type, buf);
else
baud = pl2303_baudrate_encode_divisor(baud, type, buf);
@@ -813,6 +833,7 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
0, NULL, 0, 100);
+ /* NOTE: HX clones don't support sending breaks, -EPIPE is returned */
if (result)
dev_err(&port->dev, "error sending break = %d\n", result);
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 4b79a1f2f901..592b31698fc8 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -461,7 +461,7 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
u32 i;
for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
put_page(sg_page(&tv_cmd->tvc_sgl[i]));
- }
+ }
tcm_vhost_put_inflight(tv_cmd->inflight);
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
@@ -1373,21 +1373,30 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
return 0;
}
+static void vhost_scsi_free(struct vhost_scsi *vs)
+{
+ if (is_vmalloc_addr(vs))
+ vfree(vs);
+ else
+ kfree(vs);
+}
+
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *vs;
struct vhost_virtqueue **vqs;
- int r, i;
+ int r = -ENOMEM, i;
- vs = kzalloc(sizeof(*vs), GFP_KERNEL);
- if (!vs)
- return -ENOMEM;
+ vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!vs) {
+ vs = vzalloc(sizeof(*vs));
+ if (!vs)
+ goto err_vs;
+ }
vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
- if (!vqs) {
- kfree(vs);
- return -ENOMEM;
- }
+ if (!vqs)
+ goto err_vqs;
vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
@@ -1407,14 +1416,18 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
tcm_vhost_init_inflight(vs, NULL);
- if (r < 0) {
- kfree(vqs);
- kfree(vs);
- return r;
- }
+ if (r < 0)
+ goto err_init;
f->private_data = vs;
return 0;
+
+err_init:
+ kfree(vqs);
+err_vqs:
+ vhost_scsi_free(vs);
+err_vs:
+ return r;
}
static int vhost_scsi_release(struct inode *inode, struct file *f)
@@ -1431,7 +1444,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
vhost_scsi_flush(vs);
kfree(vs->dev.vqs);
- kfree(vs);
+ vhost_scsi_free(vs);
return 0;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9a9502a4aa50..69068e0d8f31 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -161,9 +161,11 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
if (list_empty(&work->node)) {
list_add_tail(&work->node, &dev->work_list);
work->queue_seq++;
+ spin_unlock_irqrestore(&dev->work_lock, flags);
wake_up_process(dev->worker);
+ } else {
+ spin_unlock_irqrestore(&dev->work_lock, flags);
}
- spin_unlock_irqrestore(&dev->work_lock, flags);
}
EXPORT_SYMBOL_GPL(vhost_work_queue);
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c
index 75dca19bf214..6ac755270ab4 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/mmp/hw/mmp_ctrl.c
@@ -514,7 +514,7 @@ static int mmphw_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->clk)) {
dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
ret = -ENOENT;
- goto failed_get_clk;
+ goto failed;
}
clk_prepare_enable(ctrl->clk);
@@ -551,21 +551,8 @@ failed_path_init:
path_deinit(path_plat);
}
- if (ctrl->clk) {
- devm_clk_put(ctrl->dev, ctrl->clk);
- clk_disable_unprepare(ctrl->clk);
- }
-failed_get_clk:
- devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
+ clk_disable_unprepare(ctrl->clk);
failed:
- if (ctrl) {
- if (ctrl->reg_base)
- devm_iounmap(ctrl->dev, ctrl->reg_base);
- devm_release_mem_region(ctrl->dev, res->start,
- resource_size(res));
- devm_kfree(ctrl->dev, ctrl);
- }
-
dev_err(&pdev->dev, "device init failed\n");
return ret;
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index d250ed0f806d..27197a8048c0 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -620,6 +620,7 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
break;
case 3:
bits_per_pixel = 32;
+ break;
case 1:
default:
return -EINVAL;
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 7ef079c146e7..c172a5281f9e 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -2075,6 +2075,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!fb_find_mode(&info->var, info, mode_option, NULL, 0,
info->monspecs.modedb, 16)) {
printk(KERN_ERR "neofb: Unable to find usable video mode.\n");
+ err = -EINVAL;
goto err_map_video;
}
@@ -2097,7 +2098,8 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
info->fix.smem_len >> 10, info->var.xres,
info->var.yres, h_sync / 1000, h_sync % 1000, v_sync);
- if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
+ err = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (err < 0)
goto err_map_video;
err = register_framebuffer(info);
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 171821ddd78d..ba5b40f581f6 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -120,7 +120,7 @@ int of_get_display_timing(struct device_node *np, const char *name,
return -EINVAL;
}
- timing_np = of_find_node_by_name(np, name);
+ timing_np = of_get_child_by_name(np, name);
if (!timing_np) {
pr_err("%s: could not find node '%s'\n",
of_node_full_name(np), name);
@@ -143,11 +143,11 @@ struct display_timings *of_get_display_timings(struct device_node *np)
struct display_timings *disp;
if (!np) {
- pr_err("%s: no devicenode given\n", of_node_full_name(np));
+ pr_err("%s: no device node given\n", of_node_full_name(np));
return NULL;
}
- timings_np = of_find_node_by_name(np, "display-timings");
+ timings_np = of_get_child_by_name(np, "display-timings");
if (!timings_np) {
pr_err("%s: could not find display-timings node\n",
of_node_full_name(np));
diff --git a/drivers/video/omap2/displays-new/Kconfig b/drivers/video/omap2/displays-new/Kconfig
index 6c90885b0940..10b25e7cd878 100644
--- a/drivers/video/omap2/displays-new/Kconfig
+++ b/drivers/video/omap2/displays-new/Kconfig
@@ -35,6 +35,7 @@ config DISPLAY_PANEL_DPI
config DISPLAY_PANEL_DSI_CM
tristate "Generic DSI Command Mode Panel"
+ depends on BACKLIGHT_CLASS_DEVICE
help
Driver for generic DSI command mode panels.
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c
index 1b60698f141e..ccd9073f706f 100644
--- a/drivers/video/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/omap2/displays-new/connector-analog-tv.c
@@ -191,7 +191,7 @@ static int tvc_probe_pdata(struct platform_device *pdev)
in = omap_dss_find_output(pdata->source);
if (in == NULL) {
dev_err(&pdev->dev, "Failed to find video source\n");
- return -ENODEV;
+ return -EPROBE_DEFER;
}
ddata->in = in;
diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/omap2/displays-new/connector-dvi.c
index bc5f8ceda371..63d88ee6dfe4 100644
--- a/drivers/video/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/omap2/displays-new/connector-dvi.c
@@ -263,7 +263,7 @@ static int dvic_probe_pdata(struct platform_device *pdev)
in = omap_dss_find_output(pdata->source);
if (in == NULL) {
dev_err(&pdev->dev, "Failed to find video source\n");
- return -ENODEV;
+ return -EPROBE_DEFER;
}
ddata->in = in;
diff --git a/drivers/video/omap2/displays-new/connector-hdmi.c b/drivers/video/omap2/displays-new/connector-hdmi.c
index c5826716d6ab..9abe2c039ae9 100644
--- a/drivers/video/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/omap2/displays-new/connector-hdmi.c
@@ -290,7 +290,7 @@ static int hdmic_probe_pdata(struct platform_device *pdev)
in = omap_dss_find_output(pdata->source);
if (in == NULL) {
dev_err(&pdev->dev, "Failed to find video source\n");
- return -ENODEV;
+ return -EPROBE_DEFER;
}
ddata->in = in;
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 02a7340111df..477975009eee 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -3691,6 +3691,7 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
+ pm_runtime_irq_safe(&pdev->dev);
r = dispc_runtime_get();
if (r)
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 47ca86c5c6c0..d838ba829459 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -1336,14 +1336,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
(info->var.bits_per_pixel * info->var.xres_virtual);
if (info->var.yres_virtual < info->var.yres) {
dev_err(info->device, "virtual vertical size smaller than real\n");
- goto err_find_mode;
- }
-
- /* maximize virtual vertical size for fast scrolling */
- info->var.yres_virtual = info->fix.smem_len * 8 /
- (info->var.bits_per_pixel * info->var.xres_virtual);
- if (info->var.yres_virtual < info->var.yres) {
- dev_err(info->device, "virtual vertical size smaller than real\n");
+ rc = -EINVAL;
goto err_find_mode;
}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a50c6e3a7cc4..b232908a6192 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -398,8 +398,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
- scratch_page = get_balloon_scratch_page();
-
for (i = 0; i < nr_pages; i++) {
page = alloc_page(gfp);
if (page == NULL) {
@@ -413,6 +411,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
scrub_page(page);
+ /*
+ * Ballooned out frames are effectively replaced with
+ * a scratch frame. Ensure direct mappings and the
+ * p2m are consistent.
+ */
+ scratch_page = get_balloon_scratch_page();
#ifdef CONFIG_XEN_HAVE_PVMMU
if (xen_pv_domain() && !PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
@@ -422,24 +426,19 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
BUG_ON(ret);
}
#endif
- }
-
- /* Ensure that ballooned highmem pages don't have kmaps. */
- kmap_flush_unused();
- flush_tlb_all();
-
- /* No more mappings: invalidate P2M and add to balloon. */
- for (i = 0; i < nr_pages; i++) {
- pfn = mfn_to_pfn(frame_list[i]);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long p;
p = page_to_pfn(scratch_page);
__set_phys_to_machine(pfn, pfn_to_mfn(p));
}
+ put_balloon_scratch_page();
+
balloon_append(pfn_to_page(pfn));
}
- put_balloon_scratch_page();
+ /* Ensure that ballooned highmem pages don't have kmaps. */
+ kmap_flush_unused();
+ flush_tlb_all();
set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 58e6cbce4156..08f2e1e9a7e6 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -603,10 +603,11 @@ static int v9fs_cache_register(void)
if (ret < 0)
return ret;
#ifdef CONFIG_9P_FSCACHE
- return fscache_register_netfs(&v9fs_cache_netfs);
-#else
- return ret;
+ ret = fscache_register_netfs(&v9fs_cache_netfs);
+ if (ret < 0)
+ v9fs_destroy_inode_cache();
#endif
+ return ret;
}
static void v9fs_cache_unregister(void)
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 53687bbf2296..a7c481402c46 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -267,14 +267,8 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
}
/* Only creates */
- if (!(flags & O_CREAT))
+ if (!(flags & O_CREAT) || dentry->d_inode)
return finish_no_open(file, res);
- else if (dentry->d_inode) {
- if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
- return -EEXIST;
- else
- return finish_no_open(file, res);
- }
v9ses = v9fs_inode2v9ses(dir);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 3db70dae40d3..689e40d983ad 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -109,13 +109,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
pkt.hdr.proto_version = sbi->version;
pkt.hdr.type = type;
- mutex_lock(&sbi->wq_mutex);
- /* Check if we have become catatonic */
- if (sbi->catatonic) {
- mutex_unlock(&sbi->wq_mutex);
- return;
- }
switch (type) {
/* Kernel protocol v4 missing and expire packets */
case autofs_ptype_missing:
@@ -427,7 +421,6 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
wq->tgid = current->tgid;
wq->status = -EINTR; /* Status return if interrupted */
wq->wait_ctr = 2;
- mutex_unlock(&sbi->wq_mutex);
if (sbi->version < 5) {
if (notify == NFY_MOUNT)
@@ -449,15 +442,15 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
(unsigned long) wq->wait_queue_token, wq->name.len,
wq->name.name, notify);
- /* autofs4_notify_daemon() may block */
+ /* autofs4_notify_daemon() may block; it will unlock ->wq_mutex */
autofs4_notify_daemon(sbi, wq, type);
} else {
wq->wait_ctr++;
- mutex_unlock(&sbi->wq_mutex);
- kfree(qstr.name);
DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
(unsigned long) wq->wait_queue_token, wq->name.len,
wq->name.name, notify);
+ mutex_unlock(&sbi->wq_mutex);
+ kfree(qstr.name);
}
/*
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 60250847929f..fc60b31453ee 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -735,7 +735,7 @@ void bioset_integrity_free(struct bio_set *bs)
mempool_destroy(bs->bio_integrity_pool);
if (bs->bvec_integrity_pool)
- mempool_destroy(bs->bio_integrity_pool);
+ mempool_destroy(bs->bvec_integrity_pool);
}
EXPORT_SYMBOL(bioset_integrity_free);
diff --git a/fs/bio.c b/fs/bio.c
index b3b20ed9510e..ea5035da4d9a 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -917,8 +917,8 @@ void bio_copy_data(struct bio *dst, struct bio *src)
src_p = kmap_atomic(src_bv->bv_page);
dst_p = kmap_atomic(dst_bv->bv_page);
- memcpy(dst_p + dst_bv->bv_offset,
- src_p + src_bv->bv_offset,
+ memcpy(dst_p + dst_offset,
+ src_p + src_offset,
bytes);
kunmap_atomic(dst_p);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index d0ae226926ee..71f074e1870b 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -213,7 +213,10 @@ static inline bool btrfs_is_free_space_inode(struct inode *inode)
static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
{
if (BTRFS_I(inode)->logged_trans == generation &&
- BTRFS_I(inode)->last_sub_trans <= BTRFS_I(inode)->last_log_commit)
+ BTRFS_I(inode)->last_sub_trans <=
+ BTRFS_I(inode)->last_log_commit &&
+ BTRFS_I(inode)->last_sub_trans <=
+ BTRFS_I(inode)->root->last_log_commit)
return 1;
return 0;
}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 64346721173f..61b5bcd57b7e 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1005,8 +1005,11 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
return ret;
}
- if (root->ref_cows)
- btrfs_reloc_cow_block(trans, root, buf, cow);
+ if (root->ref_cows) {
+ ret = btrfs_reloc_cow_block(trans, root, buf, cow);
+ if (ret)
+ return ret;
+ }
if (buf == root->node) {
WARN_ON(parent && parent != buf);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 3c1da6f98a4d..0506f40ede83 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1118,15 +1118,6 @@ struct btrfs_space_info {
*/
struct percpu_counter total_bytes_pinned;
- /*
- * we bump reservation progress every time we decrement
- * bytes_reserved. This way people waiting for reservations
- * know something good has happened and they can check
- * for progress. The number here isn't to be trusted, it
- * just shows reclaim activity
- */
- unsigned long reservation_progress;
-
unsigned int full:1; /* indicates that we cannot allocate any more
chunks for this space */
unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
@@ -3135,7 +3126,7 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
unsigned num_items)
{
return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
- 3 * num_items;
+ 2 * num_items;
}
/*
@@ -3939,9 +3930,9 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_recover_relocation(struct btrfs_root *root);
int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len);
-void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *buf,
- struct extent_buffer *cow);
+int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct extent_buffer *buf,
+ struct extent_buffer *cow);
void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending,
u64 *bytes_to_reserve);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index a64435359385..70681686e8dc 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -400,7 +400,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
btrfs_dev_replace_unlock(dev_replace);
- btrfs_wait_all_ordered_extents(root->fs_info, 0);
+ btrfs_wait_all_ordered_extents(root->fs_info);
/* force writing the updated state information to disk */
trans = btrfs_start_transaction(root, 0);
@@ -475,7 +475,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret;
}
- btrfs_wait_all_ordered_extents(root->fs_info, 0);
+ btrfs_wait_all_ordered_extents(root->fs_info);
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4cbb00af92ff..4ae17ed13b32 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -157,6 +157,7 @@ static struct btrfs_lockdep_keyset {
{ .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
{ .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
+ { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" },
{ .id = 0, .name_stem = "tree" },
};
@@ -3415,6 +3416,7 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
if (total_errors > max_errors) {
printk(KERN_ERR "btrfs: %d errors while writing supers\n",
total_errors);
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
/* FUA is masked off if unsupported and can't be the reason */
btrfs_error(root->fs_info, -EIO,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index cfb3cf711b34..d58bef130a41 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3925,7 +3925,6 @@ static int can_overcommit(struct btrfs_root *root,
u64 space_size;
u64 avail;
u64 used;
- u64 to_add;
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly;
@@ -3959,25 +3958,17 @@ static int can_overcommit(struct btrfs_root *root,
BTRFS_BLOCK_GROUP_RAID10))
avail >>= 1;
- to_add = space_info->total_bytes;
-
/*
* If we aren't flushing all things, let us overcommit up to
* 1/2th of the space. If we can flush, don't let us overcommit
* too much, let it overcommit up to 1/8 of the space.
*/
if (flush == BTRFS_RESERVE_FLUSH_ALL)
- to_add >>= 3;
+ avail >>= 3;
else
- to_add >>= 1;
-
- /*
- * Limit the overcommit to the amount of free space we could possibly
- * allocate for chunks.
- */
- to_add = min(avail, to_add);
+ avail >>= 1;
- if (used + bytes < space_info->total_bytes + to_add)
+ if (used + bytes < space_info->total_bytes + avail)
return 1;
return 0;
}
@@ -4000,7 +3991,7 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
*/
btrfs_start_all_delalloc_inodes(root->fs_info, 0);
if (!current->journal_info)
- btrfs_wait_all_ordered_extents(root->fs_info, 0);
+ btrfs_wait_all_ordered_extents(root->fs_info);
}
}
@@ -4030,7 +4021,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
if (delalloc_bytes == 0) {
if (trans)
return;
- btrfs_wait_all_ordered_extents(root->fs_info, 0);
+ btrfs_wait_all_ordered_extents(root->fs_info);
return;
}
@@ -4058,7 +4049,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
loops++;
if (wait_ordered && !trans) {
- btrfs_wait_all_ordered_extents(root->fs_info, 0);
+ btrfs_wait_all_ordered_extents(root->fs_info);
} else {
time_left = schedule_timeout_killable(1);
if (time_left)
@@ -4465,7 +4456,6 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
space_info->bytes_may_use -= num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags, num_bytes, 0);
- space_info->reservation_progress++;
spin_unlock(&space_info->lock);
}
}
@@ -4666,7 +4656,6 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
sinfo->bytes_may_use -= num_bytes;
trace_btrfs_space_reservation(fs_info, "space_info",
sinfo->flags, num_bytes, 0);
- sinfo->reservation_progress++;
block_rsv->reserved = block_rsv->size;
block_rsv->full = 1;
}
@@ -5446,7 +5435,6 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
space_info->bytes_readonly += num_bytes;
cache->reserved -= num_bytes;
space_info->bytes_reserved -= num_bytes;
- space_info->reservation_progress++;
}
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
@@ -6117,10 +6105,13 @@ enum btrfs_loop_type {
/*
* walks the btree of allocated extents and find a hole of a given size.
* The key ins is changed to record the hole:
- * ins->objectid == block start
+ * ins->objectid == start position
* ins->flags = BTRFS_EXTENT_ITEM_KEY
- * ins->offset == number of blocks
+ * ins->offset == the size of the hole.
* Any available blocks before search_start are skipped.
+ *
+ * If there is no suitable free space, we will record the max size of
+ * the free space extent currently.
*/
static noinline int find_free_extent(struct btrfs_root *orig_root,
u64 num_bytes, u64 empty_size,
@@ -6133,6 +6124,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
struct btrfs_block_group_cache *block_group = NULL;
struct btrfs_block_group_cache *used_block_group;
u64 search_start = 0;
+ u64 max_extent_size = 0;
int empty_cluster = 2 * 1024 * 1024;
struct btrfs_space_info *space_info;
int loop = 0;
@@ -6292,7 +6284,10 @@ have_block_group:
btrfs_get_block_group(used_block_group);
offset = btrfs_alloc_from_cluster(used_block_group,
- last_ptr, num_bytes, used_block_group->key.objectid);
+ last_ptr,
+ num_bytes,
+ used_block_group->key.objectid,
+ &max_extent_size);
if (offset) {
/* we have a block, we're done */
spin_unlock(&last_ptr->refill_lock);
@@ -6355,8 +6350,10 @@ refill_cluster:
* cluster
*/
offset = btrfs_alloc_from_cluster(block_group,
- last_ptr, num_bytes,
- search_start);
+ last_ptr,
+ num_bytes,
+ search_start,
+ &max_extent_size);
if (offset) {
/* we found one, proceed */
spin_unlock(&last_ptr->refill_lock);
@@ -6391,13 +6388,18 @@ unclustered_alloc:
if (cached &&
block_group->free_space_ctl->free_space <
num_bytes + empty_cluster + empty_size) {
+ if (block_group->free_space_ctl->free_space >
+ max_extent_size)
+ max_extent_size =
+ block_group->free_space_ctl->free_space;
spin_unlock(&block_group->free_space_ctl->tree_lock);
goto loop;
}
spin_unlock(&block_group->free_space_ctl->tree_lock);
offset = btrfs_find_space_for_alloc(block_group, search_start,
- num_bytes, empty_size);
+ num_bytes, empty_size,
+ &max_extent_size);
/*
* If we didn't find a chunk, and we haven't failed on this
* block group before, and this block group is in the middle of
@@ -6515,7 +6517,8 @@ loop:
ret = 0;
}
out:
-
+ if (ret == -ENOSPC)
+ ins->offset = max_extent_size;
return ret;
}
@@ -6573,8 +6576,8 @@ again:
flags);
if (ret == -ENOSPC) {
- if (!final_tried) {
- num_bytes = num_bytes >> 1;
+ if (!final_tried && ins->offset) {
+ num_bytes = min(num_bytes >> 1, ins->offset);
num_bytes = round_down(num_bytes, root->sectorsize);
num_bytes = max(num_bytes, min_alloc_size);
if (num_bytes == min_alloc_size)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 09582b81640c..c09a40db53db 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1481,10 +1481,12 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
*end = state->end;
cur_start = state->end + 1;
node = rb_next(node);
- if (!node)
- break;
total_bytes += state->end - state->start + 1;
- if (total_bytes >= max_bytes)
+ if (total_bytes >= max_bytes) {
+ *end = *start + max_bytes - 1;
+ break;
+ }
+ if (!node)
break;
}
out:
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index bc5072b2db53..72da4df53c9a 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1859,8 +1859,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
ret = btrfs_log_dentry_safe(trans, root, dentry);
if (ret < 0) {
- mutex_unlock(&inode->i_mutex);
- goto out;
+ /* Fallthrough and commit/free transaction. */
+ ret = 1;
}
/* we've logged all the items and now have a consistent
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 3f0ddfce96e6..b4f9904c4c6b 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1431,13 +1431,19 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
ctl->free_space += bytes;
}
+/*
+ * If we can not find suitable extent, we will use bytes to record
+ * the size of the max extent.
+ */
static int search_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *bitmap_info, u64 *offset,
u64 *bytes)
{
unsigned long found_bits = 0;
+ unsigned long max_bits = 0;
unsigned long bits, i;
unsigned long next_zero;
+ unsigned long extent_bits;
i = offset_to_bit(bitmap_info->offset, ctl->unit,
max_t(u64, *offset, bitmap_info->offset));
@@ -1446,9 +1452,12 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
next_zero = find_next_zero_bit(bitmap_info->bitmap,
BITS_PER_BITMAP, i);
- if ((next_zero - i) >= bits) {
- found_bits = next_zero - i;
+ extent_bits = next_zero - i;
+ if (extent_bits >= bits) {
+ found_bits = extent_bits;
break;
+ } else if (extent_bits > max_bits) {
+ max_bits = extent_bits;
}
i = next_zero;
}
@@ -1459,38 +1468,41 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
return 0;
}
+ *bytes = (u64)(max_bits) * ctl->unit;
return -1;
}
+/* Cache the size of the max extent in bytes */
static struct btrfs_free_space *
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
- unsigned long align)
+ unsigned long align, u64 *max_extent_size)
{
struct btrfs_free_space *entry;
struct rb_node *node;
- u64 ctl_off;
u64 tmp;
u64 align_off;
int ret;
if (!ctl->free_space_offset.rb_node)
- return NULL;
+ goto out;
entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
if (!entry)
- return NULL;
+ goto out;
for (node = &entry->offset_index; node; node = rb_next(node)) {
entry = rb_entry(node, struct btrfs_free_space, offset_index);
- if (entry->bytes < *bytes)
+ if (entry->bytes < *bytes) {
+ if (entry->bytes > *max_extent_size)
+ *max_extent_size = entry->bytes;
continue;
+ }
/* make sure the space returned is big enough
* to match our requested alignment
*/
if (*bytes >= align) {
- ctl_off = entry->offset - ctl->start;
- tmp = ctl_off + align - 1;;
+ tmp = entry->offset - ctl->start + align - 1;
do_div(tmp, align);
tmp = tmp * align + ctl->start;
align_off = tmp - entry->offset;
@@ -1499,14 +1511,22 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
tmp = entry->offset;
}
- if (entry->bytes < *bytes + align_off)
+ if (entry->bytes < *bytes + align_off) {
+ if (entry->bytes > *max_extent_size)
+ *max_extent_size = entry->bytes;
continue;
+ }
if (entry->bitmap) {
- ret = search_bitmap(ctl, entry, &tmp, bytes);
+ u64 size = *bytes;
+
+ ret = search_bitmap(ctl, entry, &tmp, &size);
if (!ret) {
*offset = tmp;
+ *bytes = size;
return entry;
+ } else if (size > *max_extent_size) {
+ *max_extent_size = size;
}
continue;
}
@@ -1515,7 +1535,7 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
*bytes = entry->bytes - align_off;
return entry;
}
-
+out:
return NULL;
}
@@ -2116,7 +2136,8 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
}
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
- u64 offset, u64 bytes, u64 empty_size)
+ u64 offset, u64 bytes, u64 empty_size,
+ u64 *max_extent_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry = NULL;
@@ -2127,7 +2148,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
spin_lock(&ctl->tree_lock);
entry = find_free_space(ctl, &offset, &bytes_search,
- block_group->full_stripe_len);
+ block_group->full_stripe_len, max_extent_size);
if (!entry)
goto out;
@@ -2137,7 +2158,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
if (!entry->bytes)
free_bitmap(ctl, entry);
} else {
-
unlink_free_space(ctl, entry);
align_gap_len = offset - entry->offset;
align_gap = entry->offset;
@@ -2151,7 +2171,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
else
link_free_space(ctl, entry);
}
-
out:
spin_unlock(&ctl->tree_lock);
@@ -2206,7 +2225,8 @@ int btrfs_return_cluster_to_free_space(
static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
struct btrfs_free_space *entry,
- u64 bytes, u64 min_start)
+ u64 bytes, u64 min_start,
+ u64 *max_extent_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
int err;
@@ -2218,8 +2238,11 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
search_bytes = bytes;
err = search_bitmap(ctl, entry, &search_start, &search_bytes);
- if (err)
+ if (err) {
+ if (search_bytes > *max_extent_size)
+ *max_extent_size = search_bytes;
return 0;
+ }
ret = search_start;
__bitmap_clear_bits(ctl, entry, ret, bytes);
@@ -2234,7 +2257,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
*/
u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster, u64 bytes,
- u64 min_start)
+ u64 min_start, u64 *max_extent_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry = NULL;
@@ -2254,6 +2277,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
entry = rb_entry(node, struct btrfs_free_space, offset_index);
while(1) {
+ if (entry->bytes < bytes && entry->bytes > *max_extent_size)
+ *max_extent_size = entry->bytes;
+
if (entry->bytes < bytes ||
(!entry->bitmap && entry->offset < min_start)) {
node = rb_next(&entry->offset_index);
@@ -2267,7 +2293,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
if (entry->bitmap) {
ret = btrfs_alloc_from_bitmap(block_group,
cluster, entry, bytes,
- cluster->window_start);
+ cluster->window_start,
+ max_extent_size);
if (ret == 0) {
node = rb_next(&entry->offset_index);
if (!node)
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index c74904167476..e737f92cf6d0 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -94,7 +94,8 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group);
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
- u64 offset, u64 bytes, u64 empty_size);
+ u64 offset, u64 bytes, u64 empty_size,
+ u64 *max_extent_size);
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes);
@@ -105,7 +106,7 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster);
u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster, u64 bytes,
- u64 min_start);
+ u64 min_start, u64 *max_extent_size);
int btrfs_return_cluster_to_free_space(
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f338c5672d58..22ebc13b6c99 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4688,11 +4688,11 @@ static void inode_tree_add(struct inode *inode)
struct btrfs_inode *entry;
struct rb_node **p;
struct rb_node *parent;
+ struct rb_node *new = &BTRFS_I(inode)->rb_node;
u64 ino = btrfs_ino(inode);
if (inode_unhashed(inode))
return;
-again:
parent = NULL;
spin_lock(&root->inode_lock);
p = &root->inode_tree.rb_node;
@@ -4707,14 +4707,14 @@ again:
else {
WARN_ON(!(entry->vfs_inode.i_state &
(I_WILL_FREE | I_FREEING)));
- rb_erase(parent, &root->inode_tree);
+ rb_replace_node(parent, new, &root->inode_tree);
RB_CLEAR_NODE(parent);
spin_unlock(&root->inode_lock);
- goto again;
+ return;
}
}
- rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
- rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
+ rb_link_node(new, parent, p);
+ rb_insert_color(new, &root->inode_tree);
spin_unlock(&root->inode_lock);
}
@@ -8216,6 +8216,10 @@ static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
if (unlikely(!work)) {
+ if (delay_iput)
+ btrfs_add_delayed_iput(inode);
+ else
+ iput(inode);
ret = -ENOMEM;
goto out;
}
@@ -8613,11 +8617,13 @@ static const struct inode_operations btrfs_dir_inode_operations = {
.removexattr = btrfs_removexattr,
.permission = btrfs_permission,
.get_acl = btrfs_get_acl,
+ .update_time = btrfs_update_time,
};
static const struct inode_operations btrfs_dir_ro_inode_operations = {
.lookup = btrfs_lookup,
.permission = btrfs_permission,
.get_acl = btrfs_get_acl,
+ .update_time = btrfs_update_time,
};
static const struct file_operations btrfs_dir_file_operations = {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 1a5b9462dd9a..9d46f60cb943 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -574,7 +574,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
if (ret)
return ret;
- btrfs_wait_ordered_extents(root, 0);
+ btrfs_wait_ordered_extents(root);
pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
if (!pending_snapshot)
@@ -2696,9 +2696,9 @@ out_unlock:
static long btrfs_ioctl_file_extent_same(struct file *file,
void __user *argp)
{
- struct btrfs_ioctl_same_args *args = argp;
- struct btrfs_ioctl_same_args same;
- struct btrfs_ioctl_same_extent_info info;
+ struct btrfs_ioctl_same_args tmp;
+ struct btrfs_ioctl_same_args *same;
+ struct btrfs_ioctl_same_extent_info *info;
struct inode *src = file->f_dentry->d_inode;
struct file *dst_file = NULL;
struct inode *dst;
@@ -2706,6 +2706,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
u64 len;
int i;
int ret;
+ unsigned long size;
u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
bool is_admin = capable(CAP_SYS_ADMIN);
@@ -2716,15 +2717,30 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
if (ret)
return ret;
- if (copy_from_user(&same,
+ if (copy_from_user(&tmp,
(struct btrfs_ioctl_same_args __user *)argp,
- sizeof(same))) {
+ sizeof(tmp))) {
ret = -EFAULT;
goto out;
}
- off = same.logical_offset;
- len = same.length;
+ size = sizeof(tmp) +
+ tmp.dest_count * sizeof(struct btrfs_ioctl_same_extent_info);
+
+ same = kmalloc(size, GFP_NOFS);
+ if (!same) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (copy_from_user(same,
+ (struct btrfs_ioctl_same_args __user *)argp, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ off = same->logical_offset;
+ len = same->length;
/*
* Limit the total length we will dedupe for each operation.
@@ -2752,27 +2768,28 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
if (!S_ISREG(src->i_mode))
goto out;
- ret = 0;
- for (i = 0; i < same.dest_count; i++) {
- if (copy_from_user(&info, &args->info[i], sizeof(info))) {
- ret = -EFAULT;
- goto out;
- }
+ /* pre-format output fields to sane values */
+ for (i = 0; i < same->dest_count; i++) {
+ same->info[i].bytes_deduped = 0ULL;
+ same->info[i].status = 0;
+ }
- info.bytes_deduped = 0;
+ ret = 0;
+ for (i = 0; i < same->dest_count; i++) {
+ info = &same->info[i];
- dst_file = fget(info.fd);
+ dst_file = fget(info->fd);
if (!dst_file) {
- info.status = -EBADF;
+ info->status = -EBADF;
goto next;
}
if (!(is_admin || (dst_file->f_mode & FMODE_WRITE))) {
- info.status = -EINVAL;
+ info->status = -EINVAL;
goto next;
}
- info.status = -EXDEV;
+ info->status = -EXDEV;
if (file->f_path.mnt != dst_file->f_path.mnt)
goto next;
@@ -2781,32 +2798,29 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
goto next;
if (S_ISDIR(dst->i_mode)) {
- info.status = -EISDIR;
+ info->status = -EISDIR;
goto next;
}
if (!S_ISREG(dst->i_mode)) {
- info.status = -EACCES;
+ info->status = -EACCES;
goto next;
}
- info.status = btrfs_extent_same(src, off, len, dst,
- info.logical_offset);
- if (info.status == 0)
- info.bytes_deduped += len;
+ info->status = btrfs_extent_same(src, off, len, dst,
+ info->logical_offset);
+ if (info->status == 0)
+ info->bytes_deduped += len;
next:
if (dst_file)
fput(dst_file);
-
- if (__put_user_unaligned(info.status, &args->info[i].status) ||
- __put_user_unaligned(info.bytes_deduped,
- &args->info[i].bytes_deduped)) {
- ret = -EFAULT;
- goto out;
- }
}
+ ret = copy_to_user(argp, same, size);
+ if (ret)
+ ret = -EFAULT;
+
out:
mnt_drop_write_file(file);
return ret;
@@ -3310,7 +3324,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
}
if (!objectid)
- objectid = root->root_key.objectid;
+ objectid = BTRFS_FS_TREE_OBJECTID;
location.objectid = objectid;
location.type = BTRFS_ROOT_ITEM_KEY;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 966b413a33b8..c702cb62f78a 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -563,11 +563,10 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
* wait for all the ordered extents in a root. This is done when balancing
* space between drives.
*/
-void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
+void btrfs_wait_ordered_extents(struct btrfs_root *root)
{
struct list_head splice, works;
struct btrfs_ordered_extent *ordered, *next;
- struct inode *inode;
INIT_LIST_HEAD(&splice);
INIT_LIST_HEAD(&works);
@@ -580,15 +579,6 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
root_extent_list);
list_move_tail(&ordered->root_extent_list,
&root->ordered_extents);
- /*
- * the inode may be getting freed (in sys_unlink path).
- */
- inode = igrab(ordered->inode);
- if (!inode) {
- cond_resched_lock(&root->ordered_extent_lock);
- continue;
- }
-
atomic_inc(&ordered->refs);
spin_unlock(&root->ordered_extent_lock);
@@ -605,21 +595,13 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
list_for_each_entry_safe(ordered, next, &works, work_list) {
list_del_init(&ordered->work_list);
wait_for_completion(&ordered->completion);
-
- inode = ordered->inode;
btrfs_put_ordered_extent(ordered);
- if (delay_iput)
- btrfs_add_delayed_iput(inode);
- else
- iput(inode);
-
cond_resched();
}
mutex_unlock(&root->fs_info->ordered_operations_mutex);
}
-void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
- int delay_iput)
+void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
struct list_head splice;
@@ -637,7 +619,7 @@ void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
&fs_info->ordered_roots);
spin_unlock(&fs_info->ordered_root_lock);
- btrfs_wait_ordered_extents(root, delay_iput);
+ btrfs_wait_ordered_extents(root);
btrfs_put_fs_root(root);
spin_lock(&fs_info->ordered_root_lock);
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index d9a5aa097b4f..0c0b35612d7a 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -195,9 +195,8 @@ int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode);
-void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput);
-void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
- int delay_iput);
+void btrfs_wait_ordered_extents(struct btrfs_root *root);
+void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info);
void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode);
void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index aacc2121e87c..a5a26320503f 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1548,7 +1548,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
btrfs_file_extent_other_encoding(leaf, fi));
if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
- ret = 1;
+ ret = -EINVAL;
goto out;
}
@@ -1579,7 +1579,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
u64 end;
u32 nritems;
u32 i;
- int ret;
+ int ret = 0;
int first = 1;
int dirty = 0;
@@ -1642,11 +1642,13 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
ret = get_new_location(rc->data_inode, &new_bytenr,
bytenr, num_bytes);
- if (ret > 0) {
- WARN_ON(1);
- continue;
+ if (ret) {
+ /*
+ * Don't have to abort since we've not changed anything
+ * in the file extent yet.
+ */
+ break;
}
- BUG_ON(ret < 0);
btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
dirty = 1;
@@ -1656,18 +1658,24 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
num_bytes, parent,
btrfs_header_owner(leaf),
key.objectid, key.offset, 1);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ break;
+ }
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
parent, btrfs_header_owner(leaf),
key.objectid, key.offset, 1);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ break;
+ }
}
if (dirty)
btrfs_mark_buffer_dirty(leaf);
if (inode)
btrfs_add_delayed_iput(inode);
- return 0;
+ return ret;
}
static noinline_for_stack
@@ -4238,7 +4246,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
err = ret;
goto out;
}
- btrfs_wait_all_ordered_extents(fs_info, 0);
+ btrfs_wait_all_ordered_extents(fs_info);
while (1) {
mutex_lock(&fs_info->cleaner_mutex);
@@ -4499,19 +4507,19 @@ out:
return ret;
}
-void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *buf,
- struct extent_buffer *cow)
+int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct extent_buffer *buf,
+ struct extent_buffer *cow)
{
struct reloc_control *rc;
struct backref_node *node;
int first_cow = 0;
int level;
- int ret;
+ int ret = 0;
rc = root->fs_info->reloc_ctl;
if (!rc)
- return;
+ return 0;
BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
@@ -4547,10 +4555,9 @@ void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
rc->nodes_relocated += buf->len;
}
- if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) {
+ if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
ret = replace_file_extents(trans, rc, root, cow);
- BUG_ON(ret);
- }
+ return ret;
}
/*
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 0afcd452fcb3..a18e0e23f6a6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -158,12 +158,20 @@ struct scrub_fixup_nodatasum {
int mirror_num;
};
+struct scrub_nocow_inode {
+ u64 inum;
+ u64 offset;
+ u64 root;
+ struct list_head list;
+};
+
struct scrub_copy_nocow_ctx {
struct scrub_ctx *sctx;
u64 logical;
u64 len;
int mirror_num;
u64 physical_for_dev_replace;
+ struct list_head inodes;
struct btrfs_work work;
};
@@ -245,7 +253,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
static int write_page_nocow(struct scrub_ctx *sctx,
u64 physical_for_dev_replace, struct page *page);
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
- void *ctx);
+ struct scrub_copy_nocow_ctx *ctx);
static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
int mirror_num, u64 physical_for_dev_replace);
static void copy_nocow_pages_worker(struct btrfs_work *work);
@@ -3126,12 +3134,30 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
nocow_ctx->mirror_num = mirror_num;
nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
nocow_ctx->work.func = copy_nocow_pages_worker;
+ INIT_LIST_HEAD(&nocow_ctx->inodes);
btrfs_queue_worker(&fs_info->scrub_nocow_workers,
&nocow_ctx->work);
return 0;
}
+static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
+{
+ struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
+ struct scrub_nocow_inode *nocow_inode;
+
+ nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
+ if (!nocow_inode)
+ return -ENOMEM;
+ nocow_inode->inum = inum;
+ nocow_inode->offset = offset;
+ nocow_inode->root = root;
+ list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
+ return 0;
+}
+
+#define COPY_COMPLETE 1
+
static void copy_nocow_pages_worker(struct btrfs_work *work)
{
struct scrub_copy_nocow_ctx *nocow_ctx =
@@ -3167,8 +3193,7 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
}
ret = iterate_inodes_from_logical(logical, fs_info, path,
- copy_nocow_pages_for_inode,
- nocow_ctx);
+ record_inode_for_nocow, nocow_ctx);
if (ret != 0 && ret != -ENOENT) {
pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n",
logical, physical_for_dev_replace, len, mirror_num,
@@ -3177,7 +3202,33 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
goto out;
}
+ btrfs_end_transaction(trans, root);
+ trans = NULL;
+ while (!list_empty(&nocow_ctx->inodes)) {
+ struct scrub_nocow_inode *entry;
+ entry = list_first_entry(&nocow_ctx->inodes,
+ struct scrub_nocow_inode,
+ list);
+ list_del_init(&entry->list);
+ ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
+ entry->root, nocow_ctx);
+ kfree(entry);
+ if (ret == COPY_COMPLETE) {
+ ret = 0;
+ break;
+ } else if (ret) {
+ break;
+ }
+ }
out:
+ while (!list_empty(&nocow_ctx->inodes)) {
+ struct scrub_nocow_inode *entry;
+ entry = list_first_entry(&nocow_ctx->inodes,
+ struct scrub_nocow_inode,
+ list);
+ list_del_init(&entry->list);
+ kfree(entry);
+ }
if (trans && !IS_ERR(trans))
btrfs_end_transaction(trans, root);
if (not_written)
@@ -3190,20 +3241,25 @@ out:
scrub_pending_trans_workers_dec(sctx);
}
-static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
+static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
+ struct scrub_copy_nocow_ctx *nocow_ctx)
{
- struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
struct btrfs_key key;
struct inode *inode;
struct page *page;
struct btrfs_root *local_root;
+ struct btrfs_ordered_extent *ordered;
+ struct extent_map *em;
+ struct extent_state *cached_state = NULL;
+ struct extent_io_tree *io_tree;
u64 physical_for_dev_replace;
- u64 len;
+ u64 len = nocow_ctx->len;
+ u64 lockstart = offset, lockend = offset + len - 1;
unsigned long index;
int srcu_index;
- int ret;
- int err;
+ int ret = 0;
+ int err = 0;
key.objectid = root;
key.type = BTRFS_ROOT_ITEM_KEY;
@@ -3229,9 +3285,33 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
mutex_lock(&inode->i_mutex);
inode_dio_wait(inode);
- ret = 0;
physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
- len = nocow_ctx->len;
+ io_tree = &BTRFS_I(inode)->io_tree;
+
+ lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
+ ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
+ if (ordered) {
+ btrfs_put_ordered_extent(ordered);
+ goto out_unlock;
+ }
+
+ em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out_unlock;
+ }
+
+ /*
+ * This extent does not actually cover the logical extent anymore,
+ * move on to the next inode.
+ */
+ if (em->block_start > nocow_ctx->logical ||
+ em->block_start + em->block_len < nocow_ctx->logical + len) {
+ free_extent_map(em);
+ goto out_unlock;
+ }
+ free_extent_map(em);
+
while (len >= PAGE_CACHE_SIZE) {
index = offset >> PAGE_CACHE_SHIFT;
again:
@@ -3247,10 +3327,9 @@ again:
goto next_page;
} else {
ClearPageError(page);
- err = extent_read_full_page(&BTRFS_I(inode)->
- io_tree,
- page, btrfs_get_extent,
- nocow_ctx->mirror_num);
+ err = extent_read_full_page_nolock(io_tree, page,
+ btrfs_get_extent,
+ nocow_ctx->mirror_num);
if (err) {
ret = err;
goto next_page;
@@ -3264,6 +3343,7 @@ again:
* page in the page cache.
*/
if (page->mapping != inode->i_mapping) {
+ unlock_page(page);
page_cache_release(page);
goto again;
}
@@ -3287,6 +3367,10 @@ next_page:
physical_for_dev_replace += PAGE_CACHE_SIZE;
len -= PAGE_CACHE_SIZE;
}
+ ret = COPY_COMPLETE;
+out_unlock:
+ unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
+ GFP_NOFS);
out:
mutex_unlock(&inode->i_mutex);
iput(inode);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 3aab10ce63e8..e913328d0f2a 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -921,7 +921,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
return 0;
}
- btrfs_wait_all_ordered_extents(fs_info, 1);
+ btrfs_wait_all_ordered_extents(fs_info);
trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
@@ -1340,6 +1340,12 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
if (ret)
goto restore;
} else {
+ if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
+ btrfs_err(fs_info,
+ "Remounting read-write after error is not allowed\n");
+ ret = -EINVAL;
+ goto restore;
+ }
if (fs_info->fs_devices->rw_devices == 0) {
ret = -EACCES;
goto restore;
@@ -1377,6 +1383,16 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
pr_warn("btrfs: failed to resume dev_replace\n");
goto restore;
}
+
+ if (!fs_info->uuid_root) {
+ pr_info("btrfs: creating UUID tree\n");
+ ret = btrfs_create_uuid_tree(fs_info);
+ if (ret) {
+ pr_warn("btrfs: failed to create the uuid tree"
+ "%d\n", ret);
+ goto restore;
+ }
+ }
sb->s_flags &= ~MS_RDONLY;
}
out:
@@ -1762,6 +1778,9 @@ static void btrfs_print_info(void)
#ifdef CONFIG_BTRFS_DEBUG
", debug=on"
#endif
+#ifdef CONFIG_BTRFS_ASSERT
+ ", assert=on"
+#endif
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
", integrity-checker=on"
#endif
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index cac4a3f76323..e7a95356df83 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1603,7 +1603,7 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
{
if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
- btrfs_wait_all_ordered_extents(fs_info, 1);
+ btrfs_wait_all_ordered_extents(fs_info);
}
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 0d9613c3f5e5..79f057c0619a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -93,7 +93,8 @@
*/
#define LOG_WALK_PIN_ONLY 0
#define LOG_WALK_REPLAY_INODES 1
-#define LOG_WALK_REPLAY_ALL 2
+#define LOG_WALK_REPLAY_DIR_INDEX 2
+#define LOG_WALK_REPLAY_ALL 3
static int btrfs_log_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
@@ -393,6 +394,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
if (inode_item) {
struct btrfs_inode_item *item;
u64 nbytes;
+ u32 mode;
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
@@ -400,9 +402,19 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
item = btrfs_item_ptr(eb, slot,
struct btrfs_inode_item);
btrfs_set_inode_nbytes(eb, item, nbytes);
+
+ /*
+ * If this is a directory we need to reset the i_size to
+ * 0 so that we can set it up properly when replaying
+ * the rest of the items in this log.
+ */
+ mode = btrfs_inode_mode(eb, item);
+ if (S_ISDIR(mode))
+ btrfs_set_inode_size(eb, item, 0);
}
} else if (inode_item) {
struct btrfs_inode_item *item;
+ u32 mode;
/*
* New inode, set nbytes to 0 so that the nbytes comes out
@@ -410,6 +422,15 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
*/
item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
btrfs_set_inode_nbytes(eb, item, 0);
+
+ /*
+ * If this is a directory we need to reset the i_size to 0 so
+ * that we can set it up properly when replaying the rest of
+ * the items in this log.
+ */
+ mode = btrfs_inode_mode(eb, item);
+ if (S_ISDIR(mode))
+ btrfs_set_inode_size(eb, item, 0);
}
insert:
btrfs_release_path(path);
@@ -1496,6 +1517,7 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
iput(inode);
return -EIO;
}
+
ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
/* FIXME, put inode into FIXUP list */
@@ -1534,6 +1556,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
u8 log_type;
int exists;
int ret = 0;
+ bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
dir = read_one_inode(root, key->objectid);
if (!dir)
@@ -1604,6 +1627,10 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
goto insert;
out:
btrfs_release_path(path);
+ if (!ret && update_size) {
+ btrfs_i_size_write(dir, dir->i_size + name_len * 2);
+ ret = btrfs_update_inode(trans, root, dir);
+ }
kfree(name);
iput(dir);
return ret;
@@ -1614,6 +1641,7 @@ insert:
name, name_len, log_type, &log_key);
if (ret && ret != -ENOENT)
goto out;
+ update_size = false;
ret = 0;
goto out;
}
@@ -2027,6 +2055,15 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
if (ret)
break;
}
+
+ if (key.type == BTRFS_DIR_INDEX_KEY &&
+ wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
+ ret = replay_one_dir_item(wc->trans, root, path,
+ eb, i, &key);
+ if (ret)
+ break;
+ }
+
if (wc->stage < LOG_WALK_REPLAY_ALL)
continue;
@@ -2048,8 +2085,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
eb, i, &key);
if (ret)
break;
- } else if (key.type == BTRFS_DIR_ITEM_KEY ||
- key.type == BTRFS_DIR_INDEX_KEY) {
+ } else if (key.type == BTRFS_DIR_ITEM_KEY) {
ret = replay_one_dir_item(wc->trans, root, path,
eb, i, &key);
if (ret)
@@ -3805,6 +3841,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
int ret = 0;
struct btrfs_root *root;
struct dentry *old_parent = NULL;
+ struct inode *orig_inode = inode;
/*
* for regular files, if its inode is already on disk, we don't
@@ -3824,7 +3861,14 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
}
while (1) {
- BTRFS_I(inode)->logged_trans = trans->transid;
+ /*
+ * If we are logging a directory then we start with our inode,
+ * not our parents inode, so we need to skipp setting the
+ * logged_trans so that further down in the log code we don't
+ * think this inode has already been logged.
+ */
+ if (inode != orig_inode)
+ BTRFS_I(inode)->logged_trans = trans->transid;
smp_mb();
if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0052ca8264d9..a10645830223 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -796,7 +796,8 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fs_devices->rotating = 1;
fs_devices->open_devices++;
- if (device->writeable && !device->is_tgtdev_for_dev_replace) {
+ if (device->writeable &&
+ device->devid != BTRFS_DEV_REPLACE_DEVID) {
fs_devices->rw_devices++;
list_add(&device->dev_alloc_list,
&fs_devices->alloc_list);
@@ -911,9 +912,9 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
if (disk_super->label[0]) {
if (disk_super->label[BTRFS_LABEL_SIZE - 1])
disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
- printk(KERN_INFO "device label %s ", disk_super->label);
+ printk(KERN_INFO "btrfs: device label %s ", disk_super->label);
} else {
- printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
+ printk(KERN_INFO "btrfs: device fsid %pU ", disk_super->fsid);
}
printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 25badd1aec5c..f4a08d7fa2f7 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -56,7 +56,7 @@ void __cachefiles_printk_object(struct cachefiles_object *object,
object->fscache.cookie->parent,
object->fscache.cookie->netfs_data,
object->fscache.cookie->flags);
- if (keybuf)
+ if (keybuf && cookie->def)
keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
CACHEFILES_KEYBUF_SIZE);
else
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 34c88b83e39f..12b0eef84183 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -162,8 +162,9 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
int cachefiles_check_auxdata(struct cachefiles_object *object)
{
struct cachefiles_xattr *auxbuf;
+ enum fscache_checkaux validity;
struct dentry *dentry = object->dentry;
- unsigned int dlen;
+ ssize_t xlen;
int ret;
ASSERT(dentry);
@@ -174,22 +175,22 @@ int cachefiles_check_auxdata(struct cachefiles_object *object)
if (!auxbuf)
return -ENOMEM;
- auxbuf->len = vfs_getxattr(dentry, cachefiles_xattr_cache,
- &auxbuf->type, 512 + 1);
- if (auxbuf->len < 1)
- return -ESTALE;
-
- if (auxbuf->type != object->fscache.cookie->def->type)
- return -ESTALE;
+ xlen = vfs_getxattr(dentry, cachefiles_xattr_cache,
+ &auxbuf->type, 512 + 1);
+ ret = -ESTALE;
+ if (xlen < 1 ||
+ auxbuf->type != object->fscache.cookie->def->type)
+ goto error;
- dlen = auxbuf->len - 1;
- ret = fscache_check_aux(&object->fscache, &auxbuf->data, dlen);
+ xlen--;
+ validity = fscache_check_aux(&object->fscache, &auxbuf->data, xlen);
+ if (validity != FSCACHE_CHECKAUX_OKAY)
+ goto error;
+ ret = 0;
+error:
kfree(auxbuf);
- if (ret != FSCACHE_CHECKAUX_OKAY)
- return -ESTALE;
-
- return 0;
+ return ret;
}
/*
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d3e2eaa503a6..5384c2a640ca 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -500,6 +500,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
if (server->ops->close)
server->ops->close(xid, tcon, &fid);
cifs_del_pending_open(&open);
+ fput(file);
rc = -ENOMEM;
}
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 318e8433527c..b2a86e324aac 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -586,7 +586,8 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
fscache_operation_init(op, NULL, NULL);
op->flags = FSCACHE_OP_MYTHREAD |
- (1 << FSCACHE_OP_WAITING);
+ (1 << FSCACHE_OP_WAITING) |
+ (1 << FSCACHE_OP_UNUSE_COOKIE);
spin_lock(&cookie->lock);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 64915eeae5a7..ced3257f06e8 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -694,8 +694,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
mark_inode_dirty(inode);
d_instantiate(dentry, inode);
- if (file)
+ if (file) {
+ *opened |= FILE_CREATED;
error = finish_open(file, dentry, gfs2_open_common, opened);
+ }
gfs2_glock_dq_uninit(ghs);
gfs2_glock_dq_uninit(ghs + 1);
return error;
diff --git a/fs/namei.c b/fs/namei.c
index 0dc4cbf21f37..645268f23eb6 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2656,6 +2656,7 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
int acc_mode;
int create_error = 0;
struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
+ bool excl;
BUG_ON(dentry->d_inode);
@@ -2669,10 +2670,9 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))
mode &= ~current_umask();
- if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT)) {
+ excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT);
+ if (excl)
open_flag &= ~O_TRUNC;
- *opened |= FILE_CREATED;
- }
/*
* Checking write permission is tricky, bacuse we don't know if we are
@@ -2725,12 +2725,6 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
goto out;
}
- acc_mode = op->acc_mode;
- if (*opened & FILE_CREATED) {
- fsnotify_create(dir, dentry);
- acc_mode = MAY_OPEN;
- }
-
if (error) { /* returned 1, that is */
if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
error = -EIO;
@@ -2740,9 +2734,19 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
dput(dentry);
dentry = file->f_path.dentry;
}
- if (create_error && dentry->d_inode == NULL) {
- error = create_error;
- goto out;
+ if (*opened & FILE_CREATED)
+ fsnotify_create(dir, dentry);
+ if (!dentry->d_inode) {
+ WARN_ON(*opened & FILE_CREATED);
+ if (create_error) {
+ error = create_error;
+ goto out;
+ }
+ } else {
+ if (excl && !(*opened & FILE_CREATED)) {
+ error = -EEXIST;
+ goto out;
+ }
}
goto looked_up;
}
@@ -2751,6 +2755,12 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
* We didn't have the inode before the open, so check open permission
* here.
*/
+ acc_mode = op->acc_mode;
+ if (*opened & FILE_CREATED) {
+ WARN_ON(!(open_flag & O_CREAT));
+ fsnotify_create(dir, dentry);
+ acc_mode = MAY_OPEN;
+ }
error = may_open(&file->f_path, acc_mode, open_flag);
if (error)
fput(file);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index de434f309af0..854a8f05a610 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1392,6 +1392,9 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
{
int err;
+ if ((open_flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+ *opened |= FILE_CREATED;
+
err = finish_open(file, dentry, do_open, opened);
if (err)
goto out;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 121da2dc3be8..d4e81e4a9b04 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1924,7 +1924,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
{
int tmp, hangup_needed = 0;
struct ocfs2_super *osb = NULL;
- char nodestr[8];
+ char nodestr[12];
trace_ocfs2_dismount_volume(sb);
diff --git a/fs/open.c b/fs/open.c
index 2a731b0d08bc..d420331ca32a 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -744,14 +744,24 @@ cleanup_file:
/**
* finish_open - finish opening a file
- * @od: opaque open data
+ * @file: file pointer
* @dentry: pointer to dentry
* @open: open callback
+ * @opened: state of open
*
* This can be used to finish opening a file passed to i_op->atomic_open().
*
* If the open callback is set to NULL, then the standard f_op->open()
* filesystem callback is substituted.
+ *
+ * NB: the dentry reference is _not_ consumed. If, for example, the dentry is
+ * the return value of d_splice_alias(), then the caller needs to perform dput()
+ * on it after finish_open().
+ *
+ * On successful return @file is a fully instantiated open file. After this, if
+ * an error occurs in ->atomic_open(), it needs to clean up with fput().
+ *
+ * Returns zero on success or -errno if the open failed.
*/
int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *),
@@ -772,11 +782,16 @@ EXPORT_SYMBOL(finish_open);
/**
* finish_no_open - finish ->atomic_open() without opening the file
*
- * @od: opaque open data
+ * @file: file pointer
* @dentry: dentry or NULL (as returned from ->lookup())
*
* This can be used to set the result of a successful lookup in ->atomic_open().
- * The filesystem's atomic_open() method shall return NULL after calling this.
+ *
+ * NB: unlike finish_open() this function does consume the dentry reference and
+ * the caller need not dput() it.
+ *
+ * Returns "1" which must be the return value of ->atomic_open() after having
+ * called this function.
*/
int finish_no_open(struct file *file, struct dentry *dentry)
{
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 4ffb7ab5e397..b8e93a40a5d3 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -168,7 +168,7 @@ static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen)
int err, ret;
ret = -EIO;
- err = zlib_inflateInit(&stream);
+ err = zlib_inflateInit2(&stream, WINDOW_BITS);
if (err != Z_OK)
goto error;
@@ -195,8 +195,29 @@ error:
static void allocate_buf_for_compression(void)
{
size_t size;
+ size_t cmpr;
+
+ switch (psinfo->bufsize) {
+ /* buffer range for efivars */
+ case 1000 ... 2000:
+ cmpr = 56;
+ break;
+ case 2001 ... 3000:
+ cmpr = 54;
+ break;
+ case 3001 ... 3999:
+ cmpr = 52;
+ break;
+ /* buffer range for nvram, erst */
+ case 4000 ... 10000:
+ cmpr = 45;
+ break;
+ default:
+ cmpr = 60;
+ break;
+ }
- big_oops_buf_sz = (psinfo->bufsize * 100) / 45;
+ big_oops_buf_sz = (psinfo->bufsize * 100) / cmpr;
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (big_oops_buf) {
size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
@@ -295,10 +316,6 @@ static void pstore_dump(struct kmsg_dumper *dumper,
compressed = true;
total_len = zipped_len;
} else {
- pr_err("pstore: compression failed for Part %d"
- " returned %d\n", part, zipped_len);
- pr_err("pstore: Capture uncompressed"
- " oops/panic report of Part %d\n", part);
compressed = false;
total_len = copy_kmsg_to_buffer(hsize, len);
}
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 73feacc49b2e..fd777032c2ba 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1163,21 +1163,6 @@ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
return NULL;
}
-static int newer_jl_done(struct reiserfs_journal_cnode *cn)
-{
- struct super_block *sb = cn->sb;
- b_blocknr_t blocknr = cn->blocknr;
-
- cn = cn->hprev;
- while (cn) {
- if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
- atomic_read(&cn->jlist->j_commit_left) != 0)
- return 0;
- cn = cn->hprev;
- }
- return 1;
-}
-
static void remove_journal_hash(struct super_block *,
struct reiserfs_journal_cnode **,
struct reiserfs_journal_list *, unsigned long,
@@ -1353,7 +1338,6 @@ static int flush_journal_list(struct super_block *s,
reiserfs_warning(s, "clm-2048", "called with wcount %d",
atomic_read(&journal->j_wcount));
}
- BUG_ON(jl->j_trans_id == 0);
/* if flushall == 0, the lock is already held */
if (flushall) {
@@ -1593,31 +1577,6 @@ static int flush_journal_list(struct super_block *s,
return err;
}
-static int test_transaction(struct super_block *s,
- struct reiserfs_journal_list *jl)
-{
- struct reiserfs_journal_cnode *cn;
-
- if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
- return 1;
-
- cn = jl->j_realblock;
- while (cn) {
- /* if the blocknr == 0, this has been cleared from the hash,
- ** skip it
- */
- if (cn->blocknr == 0) {
- goto next;
- }
- if (cn->bh && !newer_jl_done(cn))
- return 0;
- next:
- cn = cn->next;
- cond_resched();
- }
- return 0;
-}
-
static int write_one_transaction(struct super_block *s,
struct reiserfs_journal_list *jl,
struct buffer_chunk *chunk)
@@ -1805,6 +1764,8 @@ static int flush_used_journal_lists(struct super_block *s,
break;
tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
}
+ get_journal_list(jl);
+ get_journal_list(flush_jl);
/* try to find a group of blocks we can flush across all the
** transactions, but only bother if we've actually spanned
** across multiple lists
@@ -1813,6 +1774,8 @@ static int flush_used_journal_lists(struct super_block *s,
ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
}
flush_journal_list(s, flush_jl, 1);
+ put_journal_list(s, flush_jl);
+ put_journal_list(s, jl);
return 0;
}
@@ -3868,27 +3831,6 @@ int reiserfs_prepare_for_journal(struct super_block *sb,
return 1;
}
-static void flush_old_journal_lists(struct super_block *s)
-{
- struct reiserfs_journal *journal = SB_JOURNAL(s);
- struct reiserfs_journal_list *jl;
- struct list_head *entry;
- time_t now = get_seconds();
-
- while (!list_empty(&journal->j_journal_list)) {
- entry = journal->j_journal_list.next;
- jl = JOURNAL_LIST_ENTRY(entry);
- /* this check should always be run, to send old lists to disk */
- if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
- atomic_read(&jl->j_commit_left) == 0 &&
- test_transaction(s, jl)) {
- flush_used_journal_lists(s, jl);
- } else {
- break;
- }
- }
-}
-
/*
** long and ugly. If flush, will not return until all commit
** blocks and all real buffers in the trans are on disk.
@@ -4232,7 +4174,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
}
}
}
- flush_old_journal_lists(sb);
journal->j_current_jl->j_list_bitmap =
get_list_bitmap(sb, journal->j_current_jl);
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 7e5aae4bf46f..6eaf5edf1ea1 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -30,18 +30,17 @@ void udf_free_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
+ struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
- mutex_lock(&sbi->s_alloc_mutex);
- if (sbi->s_lvid_bh) {
- struct logicalVolIntegrityDescImpUse *lvidiu =
- udf_sb_lvidiu(sbi);
+ if (lvidiu) {
+ mutex_lock(&sbi->s_alloc_mutex);
if (S_ISDIR(inode->i_mode))
le32_add_cpu(&lvidiu->numDirs, -1);
else
le32_add_cpu(&lvidiu->numFiles, -1);
udf_updated_lvid(sb);
+ mutex_unlock(&sbi->s_alloc_mutex);
}
- mutex_unlock(&sbi->s_alloc_mutex);
udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
}
@@ -55,6 +54,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
struct udf_inode_info *iinfo;
struct udf_inode_info *dinfo = UDF_I(dir);
+ struct logicalVolIntegrityDescImpUse *lvidiu;
inode = new_inode(sb);
@@ -92,12 +92,10 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
return NULL;
}
- if (sbi->s_lvid_bh) {
- struct logicalVolIntegrityDescImpUse *lvidiu;
-
+ lvidiu = udf_sb_lvidiu(sb);
+ if (lvidiu) {
iinfo->i_unique = lvid_get_unique_id(sb);
mutex_lock(&sbi->s_alloc_mutex);
- lvidiu = udf_sb_lvidiu(sbi);
if (S_ISDIR(mode))
le32_add_cpu(&lvidiu->numDirs, 1);
else
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 839a2bad7f45..91219385691d 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -94,13 +94,25 @@ static unsigned int udf_count_free(struct super_block *);
static int udf_statfs(struct dentry *, struct kstatfs *);
static int udf_show_options(struct seq_file *, struct dentry *);
-struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
+struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
{
- struct logicalVolIntegrityDesc *lvid =
- (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
- __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions);
- __u32 offset = number_of_partitions * 2 *
- sizeof(uint32_t)/sizeof(uint8_t);
+ struct logicalVolIntegrityDesc *lvid;
+ unsigned int partnum;
+ unsigned int offset;
+
+ if (!UDF_SB(sb)->s_lvid_bh)
+ return NULL;
+ lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
+ partnum = le32_to_cpu(lvid->numOfPartitions);
+ if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
+ offsetof(struct logicalVolIntegrityDesc, impUse)) /
+ (2 * sizeof(uint32_t)) < partnum) {
+ udf_err(sb, "Logical volume integrity descriptor corrupted "
+ "(numOfPartitions = %u)!\n", partnum);
+ return NULL;
+ }
+ /* The offset is to skip freeSpaceTable and sizeTable arrays */
+ offset = partnum * 2 * sizeof(uint32_t);
return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
}
@@ -629,9 +641,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
struct udf_options uopt;
struct udf_sb_info *sbi = UDF_SB(sb);
int error = 0;
+ struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
- if (sbi->s_lvid_bh) {
- int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
+ if (lvidiu) {
+ int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
return -EACCES;
}
@@ -1905,11 +1918,12 @@ static void udf_open_lvid(struct super_block *sb)
if (!bh)
return;
-
- mutex_lock(&sbi->s_alloc_mutex);
lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
- lvidiu = udf_sb_lvidiu(sbi);
+ lvidiu = udf_sb_lvidiu(sb);
+ if (!lvidiu)
+ return;
+ mutex_lock(&sbi->s_alloc_mutex);
lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
@@ -1937,10 +1951,12 @@ static void udf_close_lvid(struct super_block *sb)
if (!bh)
return;
+ lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+ lvidiu = udf_sb_lvidiu(sb);
+ if (!lvidiu)
+ return;
mutex_lock(&sbi->s_alloc_mutex);
- lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
- lvidiu = udf_sb_lvidiu(sbi);
lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
@@ -2093,15 +2109,19 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDescImpUse *lvidiu =
- udf_sb_lvidiu(sbi);
- uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
- uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
- /* uint16_t maxUDFWriteRev =
- le16_to_cpu(lvidiu->maxUDFWriteRev); */
+ udf_sb_lvidiu(sb);
+ uint16_t minUDFReadRev;
+ uint16_t minUDFWriteRev;
+ if (!lvidiu) {
+ ret = -EINVAL;
+ goto error_out;
+ }
+ minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
+ minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
if (minUDFReadRev > UDF_MAX_READ_VERSION) {
udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
- le16_to_cpu(lvidiu->minUDFReadRev),
+ minUDFReadRev,
UDF_MAX_READ_VERSION);
ret = -EINVAL;
goto error_out;
@@ -2265,11 +2285,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
struct logicalVolIntegrityDescImpUse *lvidiu;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
- if (sbi->s_lvid_bh != NULL)
- lvidiu = udf_sb_lvidiu(sbi);
- else
- lvidiu = NULL;
-
+ lvidiu = udf_sb_lvidiu(sb);
buf->f_type = UDF_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index ed401e94aa8c..1f32c7bd9f57 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -162,7 +162,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
return sb->s_fs_info;
}
-struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi);
+struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb);
int udf_compute_nr_groups(struct super_block *sb, u32 partition);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 88c5ea75ebf6..f1d85cfc0a54 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -628,6 +628,7 @@ xfs_buf_item_unlock(
else if (aborted) {
ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
if (lip->li_flags & XFS_LI_IN_AIL) {
+ spin_lock(&lip->li_ailp->xa_lock);
xfs_trans_ail_delete(lip->li_ailp, lip,
SHUTDOWN_LOG_IO_ERROR);
}
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 069537c845e5..20bf8e8002d6 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1224,6 +1224,7 @@ xfs_da3_node_toosmall(
/* start with smaller blk num */
forward = nodehdr.forw < nodehdr.back;
for (i = 0; i < 2; forward = !forward, i++) {
+ struct xfs_da3_icnode_hdr thdr;
if (forward)
blkno = nodehdr.forw;
else
@@ -1236,10 +1237,10 @@ xfs_da3_node_toosmall(
return(error);
node = bp->b_addr;
- xfs_da3_node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(&thdr, node);
xfs_trans_brelse(state->args->trans, bp);
- if (count - nodehdr.count >= 0)
+ if (count - thdr.count >= 0)
break; /* fits with at least 25% to spare */
}
if (i >= 2) {
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 1edb5cc3e5f4..18272c766a50 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -515,7 +515,7 @@ typedef struct xfs_swapext
/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */
#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap)
#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64)
-#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_eofblocks)
+#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_fs_eofblocks)
/*
* ioctl commands that replace IRIX syssgi()'s
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 193206ba4358..474807a401c8 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -119,11 +119,6 @@ xfs_inode_free(
ip->i_itemp = NULL;
}
- /* asserts to verify all state is correct here */
- ASSERT(atomic_read(&ip->i_pincount) == 0);
- ASSERT(!spin_is_locked(&ip->i_flags_lock));
- ASSERT(!xfs_isiflocked(ip));
-
/*
* Because we use RCU freeing we need to ensure the inode always
* appears to be reclaimed with an invalid inode number when in the
@@ -135,6 +130,10 @@ xfs_inode_free(
ip->i_ino = 0;
spin_unlock(&ip->i_flags_lock);
+ /* asserts to verify all state is correct here */
+ ASSERT(atomic_read(&ip->i_pincount) == 0);
+ ASSERT(!xfs_isiflocked(ip));
+
call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index dabda9521b4b..cc179878fe41 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1970,6 +1970,13 @@ xlog_recover_do_inode_buffer(
* magic number. If we don't recognise the magic number in the buffer, then
* return a LSN of -1 so that the caller knows it was an unrecognised block and
* so can recover the buffer.
+ *
+ * Note: we cannot rely solely on magic number matches to determine that the
+ * buffer has a valid LSN - we also need to verify that it belongs to this
+ * filesystem, so we need to extract the object's LSN and compare it to that
+ * which we read from the superblock. If the UUIDs don't match, then we've got a
+ * stale metadata block from an old filesystem instance that we need to recover
+ * over the top of.
*/
static xfs_lsn_t
xlog_recover_get_buf_lsn(
@@ -1980,6 +1987,8 @@ xlog_recover_get_buf_lsn(
__uint16_t magic16;
__uint16_t magicda;
void *blk = bp->b_addr;
+ uuid_t *uuid;
+ xfs_lsn_t lsn = -1;
/* v4 filesystems always recover immediately */
if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -1992,43 +2001,79 @@ xlog_recover_get_buf_lsn(
case XFS_ABTB_MAGIC:
case XFS_ABTC_MAGIC:
case XFS_IBT_CRC_MAGIC:
- case XFS_IBT_MAGIC:
- return be64_to_cpu(
- ((struct xfs_btree_block *)blk)->bb_u.s.bb_lsn);
+ case XFS_IBT_MAGIC: {
+ struct xfs_btree_block *btb = blk;
+
+ lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
+ uuid = &btb->bb_u.s.bb_uuid;
+ break;
+ }
case XFS_BMAP_CRC_MAGIC:
- case XFS_BMAP_MAGIC:
- return be64_to_cpu(
- ((struct xfs_btree_block *)blk)->bb_u.l.bb_lsn);
+ case XFS_BMAP_MAGIC: {
+ struct xfs_btree_block *btb = blk;
+
+ lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
+ uuid = &btb->bb_u.l.bb_uuid;
+ break;
+ }
case XFS_AGF_MAGIC:
- return be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
+ lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
+ uuid = &((struct xfs_agf *)blk)->agf_uuid;
+ break;
case XFS_AGFL_MAGIC:
- return be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
+ lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
+ uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
+ break;
case XFS_AGI_MAGIC:
- return be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
+ lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
+ uuid = &((struct xfs_agi *)blk)->agi_uuid;
+ break;
case XFS_SYMLINK_MAGIC:
- return be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
+ lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
+ uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
+ break;
case XFS_DIR3_BLOCK_MAGIC:
case XFS_DIR3_DATA_MAGIC:
case XFS_DIR3_FREE_MAGIC:
- return be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
+ lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
+ uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
+ break;
case XFS_ATTR3_RMT_MAGIC:
- return be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
+ lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
+ uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
+ break;
case XFS_SB_MAGIC:
- return be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
+ lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
+ uuid = &((struct xfs_dsb *)blk)->sb_uuid;
+ break;
default:
break;
}
+ if (lsn != (xfs_lsn_t)-1) {
+ if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
+ goto recover_immediately;
+ return lsn;
+ }
+
magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
switch (magicda) {
case XFS_DIR3_LEAF1_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
case XFS_DA3_NODE_MAGIC:
- return be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
+ lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
+ uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
+ break;
default:
break;
}
+ if (lsn != (xfs_lsn_t)-1) {
+ if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
+ goto recover_immediately;
+ return lsn;
+ }
+
/*
* We do individual object checks on dquot and inode buffers as they
* have their own individual LSN records. Also, we could have a stale
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 290734191f72..b46fb45f2cca 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1322,10 +1322,9 @@ extern int drm_newctx(struct drm_device *dev, void *data,
extern int drm_rmctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void drm_legacy_ctxbitmap_init(struct drm_device *dev);
-extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
-extern void drm_legacy_ctxbitmap_release(struct drm_device *dev,
- struct drm_file *file_priv);
+extern int drm_ctxbitmap_init(struct drm_device *dev);
+extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
+extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
extern int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index fd54a14a7c2a..3d79e513c0b3 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -12,11 +12,14 @@
{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2fdb4a451b49..0e6f765aa1f5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -862,6 +862,17 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
return blk_queue_get_max_sectors(q, rq->cmd_flags);
}
+static inline unsigned int blk_rq_count_bios(struct request *rq)
+{
+ unsigned int nr_bios = 0;
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ nr_bios++;
+
+ return nr_bios;
+}
+
/*
* Request issue related functions.
*/
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index ce6df39f60ff..8f47625a0661 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -335,6 +335,8 @@ extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
+extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
+
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
struct ceph_vino vino,
struct ceph_file_layout *layout,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 653073de09e3..ed419c62dde1 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -406,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti);
union map_info *dm_get_mapinfo(struct bio *bio);
union map_info *dm_get_rq_mapinfo(struct request *rq);
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
+
/*
* Geometry functions.
*/
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
-
/*-----------------------------------------------------------------
* Functions for manipulating device-mapper tables.
*---------------------------------------------------------------*/
diff --git a/include/linux/hid.h b/include/linux/hid.h
index ee1ffc5e19c9..31b9d299ef6c 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -756,6 +756,10 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
+struct hid_report *hid_validate_values(struct hid_device *hid,
+ unsigned int type, unsigned int id,
+ unsigned int field_index,
+ unsigned int report_counts);
int hid_open_report(struct hid_device *device);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index a3b8b2e2d244..d98503bde7e9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -30,10 +30,13 @@
/*
* Framework version for util services.
*/
+#define UTIL_FW_MINOR 0
+
+#define UTIL_WS2K8_FW_MAJOR 1
+#define UTIL_WS2K8_FW_VERSION (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR)
#define UTIL_FW_MAJOR 3
-#define UTIL_FW_MINOR 0
-#define UTIL_FW_MAJOR_MINOR (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
+#define UTIL_FW_VERSION (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
/*
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ca645a01d37a..0fbbc7aa02cb 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -533,6 +533,7 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
+unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 60e95872da29..ecc82b37c4cc 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
unsigned int generation;
};
-enum mem_cgroup_filter_t {
- VISIT, /* visit current node */
- SKIP, /* skip the current node and continue traversal */
- SKIP_TREE, /* skip the whole subtree and continue traversal */
-};
-
-/*
- * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
- * iterate through the hierarchy tree. Each tree element is checked by the
- * predicate before it is returned by the iterator. If a filter returns
- * SKIP or SKIP_TREE then the iterator code continues traversal (with the
- * next node down the hierarchy or the next node that doesn't belong under the
- * memcg's subtree).
- */
-typedef enum mem_cgroup_filter_t
-(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
-
#ifdef CONFIG_MEMCG
/*
* All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok);
-struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
- struct mem_cgroup *prev,
- struct mem_cgroup_reclaim_cookie *reclaim,
- mem_cgroup_iter_filter cond);
-
-static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
- struct mem_cgroup *prev,
- struct mem_cgroup_reclaim_cookie *reclaim)
-{
- return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
-}
-
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
+ struct mem_cgroup *,
+ struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
/*
@@ -260,9 +234,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
mem_cgroup_update_page_stat(page, idx, -1);
}
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
- struct mem_cgroup *root);
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned);
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@@ -376,15 +350,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok)
{
}
-static inline struct mem_cgroup *
-mem_cgroup_iter_cond(struct mem_cgroup *root,
- struct mem_cgroup *prev,
- struct mem_cgroup_reclaim_cookie *reclaim,
- mem_cgroup_iter_filter cond)
-{
- /* first call must return non-NULL, second return NULL */
- return (struct mem_cgroup *)(unsigned long)!prev;
-}
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
@@ -471,11 +436,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
}
static inline
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
- struct mem_cgroup *root)
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned)
{
- return VISIT;
+ return 0;
}
static inline void mem_cgroup_split_huge_fixup(struct page *head)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index ccd4260834c5..bab49da8a0f0 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -15,8 +15,8 @@
#include <linux/spinlock_types.h>
#include <linux/linkage.h>
#include <linux/lockdep.h>
-
#include <linux/atomic.h>
+#include <asm/processor.h>
/*
* Simple, straightforward mutexes with strict semantics:
@@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
-#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
-#define arch_mutex_cpu_relax() cpu_relax()
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
#endif
#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 041b42a305f6..3de49aca4519 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -950,14 +950,14 @@ struct netdev_phys_port_id {
* multiple net devices on single physical port.
*
* void (*ndo_add_vxlan_port)(struct net_device *dev,
- * sa_family_t sa_family, __u16 port);
+ * sa_family_t sa_family, __be16 port);
* Called by vxlan to notiy a driver about the UDP port and socket
* address family that vxlan is listnening to. It is called only when
* a new port starts listening. The operation is protected by the
* vxlan_net->sock_lock.
*
* void (*ndo_del_vxlan_port)(struct net_device *dev,
- * sa_family_t sa_family, __u16 port);
+ * sa_family_t sa_family, __be16 port);
* Called by vxlan to notify the driver about a UDP port and socket
* address family that vxlan is not listening to anymore. The operation
* is protected by the vxlan_net->sock_lock.
@@ -1093,10 +1093,10 @@ struct net_device_ops {
struct netdev_phys_port_id *ppid);
void (*ndo_add_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
- __u16 port);
+ __be16 port);
void (*ndo_del_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
- __u16 port);
+ __be16 port);
};
/*
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index d80e2753847c..9ac9fbde7b61 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -296,10 +296,12 @@ ip_set_eexist(int ret, u32 flags)
/* Match elements marked with nomatch */
static inline bool
-ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt)
+ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set)
{
return adt == IPSET_TEST &&
- ret == -ENOTEMPTY && ((flags >> 16) & IPSET_FLAG_NOMATCH);
+ (set->type->features & IPSET_TYPE_NOMATCH) &&
+ ((flags >> 16) & IPSET_FLAG_NOMATCH) &&
+ (ret > 0 || ret == -ENOTEMPTY);
}
/* Check the NLA_F_NET_BYTEORDER flag */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 535cecf1e02f..fcd63baee5f2 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -1,8 +1,6 @@
#ifndef __OF_IRQ_H
#define __OF_IRQ_H
-#if defined(CONFIG_OF)
-struct of_irq;
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/irq.h>
@@ -10,14 +8,6 @@ struct of_irq;
#include <linux/ioport.h>
#include <linux/of.h>
-/*
- * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
- * implements it differently. However, the prototype is the same for all,
- * so declare it here regardless of the CONFIG_OF_IRQ setting.
- */
-extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
-
-#if defined(CONFIG_OF_IRQ)
/**
* of_irq - container for device_node/irq_specifier pair for an irq controller
* @controller: pointer to interrupt controller device tree node
@@ -71,11 +61,17 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
extern int of_irq_count(struct device_node *dev);
extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
-extern struct device_node *of_irq_find_parent(struct device_node *child);
extern void of_irq_init(const struct of_device_id *matches);
-#endif /* CONFIG_OF_IRQ */
+#if defined(CONFIG_OF)
+/*
+ * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
+ * implements it differently. However, the prototype is the same for all,
+ * so declare it here regardless of the CONFIG_OF_IRQ setting.
+ */
+extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
+extern struct device_node *of_irq_find_parent(struct device_node *child);
#else /* !CONFIG_OF */
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/smp.h b/include/linux/smp.h
index cfb7ca094b38..731f5237d5f4 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
static inline void kick_all_cpus_sync(void) { }
+static inline void __smp_call_function_single(int cpuid,
+ struct call_single_data *data, int wait)
+{
+ on_each_cpu(data->func, data->info, wait);
+}
+
#endif /* !SMP */
/*
diff --git a/include/linux/timex.h b/include/linux/timex.h
index b3726e61368e..dd3edd7dfc94 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -141,6 +141,7 @@ extern int do_adjtimex(struct timex *);
extern void hardpps(const struct timespec *, const struct timespec *);
int read_current_timer(unsigned long *timer_val);
+void ntp_notify_cmos_timer(void);
/* The clock frequency of the i8253/i8254 PIT */
#define PIT_TICK_RATE 1193182ul
diff --git a/include/net/ip.h b/include/net/ip.h
index 48f55979d842..5e5268807a1c 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -264,9 +264,11 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
-static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
+static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
{
- if (iph->frag_off & htons(IP_DF)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
/* This is only to work around buggy Windows95/2000
* VJ compression implementations. If the ID field
* does not change, they drop every other packet in
@@ -278,9 +280,11 @@ static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, str
__ip_select_ident(iph, dst, 0);
}
-static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
+static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
{
- if (iph->frag_off & htons(IP_DF)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
if (sk && inet_sk(sk)->inet_daddr) {
iph->id = htons(inet_sk(sk)->inet_id);
inet_sk(sk)->inet_id += 1 + more;
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index ff95434e50ca..88a1d4060d52 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -86,7 +86,7 @@ static inline void nf_ct_ext_destroy(struct nf_conn *ct)
static inline void nf_ct_ext_free(struct nf_conn *ct)
{
if (ct->ext)
- kfree(ct->ext);
+ kfree_rcu(ct->ext, rcu);
}
/* Add this type, returns pointer to data or NULL. */
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 60ae7c3db912..4c2301d2ef1a 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -618,6 +618,7 @@ TRACE_EVENT(block_rq_remap,
__field( unsigned int, nr_sector )
__field( dev_t, old_dev )
__field( sector_t, old_sector )
+ __field( unsigned int, nr_bios )
__array( char, rwbs, RWBS_LEN)
),
@@ -627,15 +628,16 @@ TRACE_EVENT(block_rq_remap,
__entry->nr_sector = blk_rq_sectors(rq);
__entry->old_dev = dev;
__entry->old_sector = from;
+ __entry->nr_bios = blk_rq_count_bios(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
),
- TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
+ TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector,
MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
- (unsigned long long)__entry->old_sector)
+ (unsigned long long)__entry->old_sector, __entry->nr_bios)
);
#endif /* _TRACE_BLOCK_H */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 45702c3c3837..f18b3b76e01e 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -42,6 +42,7 @@ struct extent_buffer;
{ BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
{ BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \
{ BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
+ { BTRFS_UUID_TREE_OBJECTID, "UUID_RELOC" }, \
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
#define show_root_type(obj) \
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index fa8b3adf9ffb..46d41e8b0dcc 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -1007,4 +1007,6 @@ struct drm_radeon_info {
#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
+#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
+
#endif
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index ca1d90bcb74d..009a655a5d35 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -324,7 +324,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
-#define PERF_EVENT_IOC_ID _IOR('$', 7, u64 *)
+#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -380,10 +380,13 @@ struct perf_event_mmap_page {
union {
__u64 capabilities;
struct {
- __u64 cap_usr_time : 1,
- cap_usr_rdpmc : 1,
- cap_usr_time_zero : 1,
- cap_____res : 61;
+ __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
+ cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
+
+ cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
+ cap_user_time : 1, /* The time_* fields are used */
+ cap_user_time_zero : 1, /* The time_zero field is used */
+ cap_____res : 59;
};
};
@@ -442,12 +445,13 @@ struct perf_event_mmap_page {
* ((rem * time_mult) >> time_shift);
*/
__u64 time_zero;
+ __u32 size; /* Header size up to __reserved[] fields. */
/*
* Hole for extension of the self monitor capabilities
*/
- __u64 __reserved[119]; /* align to 1k */
+ __u8 __reserved[118*8+4]; /* align to 1k. */
/*
* Control data for the mmap() data buffer.
@@ -528,6 +532,7 @@ enum perf_event_type {
* u64 len;
* u64 pgoff;
* char filename[];
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_MMAP = 1,
diff --git a/ipc/msg.c b/ipc/msg.c
index b0d541d42677..9e4310c546ae 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -165,6 +165,15 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
ipc_rmid(&msg_ids(ns), &s->q_perm);
}
+static void msg_rcu_free(struct rcu_head *head)
+{
+ struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+ struct msg_queue *msq = ipc_rcu_to_struct(p);
+
+ security_msg_queue_free(msq);
+ ipc_rcu_free(head);
+}
+
/**
* newque - Create a new msg queue
* @ns: namespace
@@ -189,15 +198,14 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(msq);
if (retval) {
- ipc_rcu_putref(msq);
+ ipc_rcu_putref(msq, ipc_rcu_free);
return retval;
}
/* ipc_addid() locks msq upon success. */
id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
if (id < 0) {
- security_msg_queue_free(msq);
- ipc_rcu_putref(msq);
+ ipc_rcu_putref(msq, msg_rcu_free);
return id;
}
@@ -276,8 +284,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
free_msg(msg);
}
atomic_sub(msq->q_cbytes, &ns->msg_bytes);
- security_msg_queue_free(msq);
- ipc_rcu_putref(msq);
+ ipc_rcu_putref(msq, msg_rcu_free);
}
/*
@@ -717,7 +724,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
rcu_read_lock();
ipc_lock_object(&msq->q_perm);
- ipc_rcu_putref(msq);
+ ipc_rcu_putref(msq, ipc_rcu_free);
if (msq->q_perm.deleted) {
err = -EIDRM;
goto out_unlock0;
diff --git a/ipc/sem.c b/ipc/sem.c
index 69b6a21f3844..19c8b980d1fe 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -243,6 +243,15 @@ static void merge_queues(struct sem_array *sma)
}
}
+static void sem_rcu_free(struct rcu_head *head)
+{
+ struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+ struct sem_array *sma = ipc_rcu_to_struct(p);
+
+ security_sem_free(sma);
+ ipc_rcu_free(head);
+}
+
/*
* If the request contains only one semaphore operation, and there are
* no complex transactions pending, lock only the semaphore involved.
@@ -374,12 +383,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
sem_lock(sma, NULL, -1);
- ipc_rcu_putref(sma);
-}
-
-static inline void sem_putref(struct sem_array *sma)
-{
- ipc_rcu_putref(sma);
+ ipc_rcu_putref(sma, ipc_rcu_free);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -458,14 +462,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_perm.security = NULL;
retval = security_sem_alloc(sma);
if (retval) {
- ipc_rcu_putref(sma);
+ ipc_rcu_putref(sma, ipc_rcu_free);
return retval;
}
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
if (id < 0) {
- security_sem_free(sma);
- ipc_rcu_putref(sma);
+ ipc_rcu_putref(sma, sem_rcu_free);
return id;
}
ns->used_sems += nsems;
@@ -1047,8 +1050,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
- security_sem_free(sma);
- ipc_rcu_putref(sma);
+ ipc_rcu_putref(sma, sem_rcu_free);
}
static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
@@ -1292,7 +1294,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
rcu_read_unlock();
sem_io = ipc_alloc(sizeof(ushort)*nsems);
if(sem_io == NULL) {
- sem_putref(sma);
+ ipc_rcu_putref(sma, ipc_rcu_free);
return -ENOMEM;
}
@@ -1328,20 +1330,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
if(nsems > SEMMSL_FAST) {
sem_io = ipc_alloc(sizeof(ushort)*nsems);
if(sem_io == NULL) {
- sem_putref(sma);
+ ipc_rcu_putref(sma, ipc_rcu_free);
return -ENOMEM;
}
}
if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
- sem_putref(sma);
+ ipc_rcu_putref(sma, ipc_rcu_free);
err = -EFAULT;
goto out_free;
}
for (i = 0; i < nsems; i++) {
if (sem_io[i] > SEMVMX) {
- sem_putref(sma);
+ ipc_rcu_putref(sma, ipc_rcu_free);
err = -ERANGE;
goto out_free;
}
@@ -1629,7 +1631,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 2: allocate new undo structure */
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
if (!new) {
- sem_putref(sma);
+ ipc_rcu_putref(sma, ipc_rcu_free);
return ERR_PTR(-ENOMEM);
}
diff --git a/ipc/shm.c b/ipc/shm.c
index 2821cdf93adb..d69739610fd4 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -167,6 +167,15 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
ipc_lock_object(&ipcp->shm_perm);
}
+static void shm_rcu_free(struct rcu_head *head)
+{
+ struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+ struct shmid_kernel *shp = ipc_rcu_to_struct(p);
+
+ security_shm_free(shp);
+ ipc_rcu_free(head);
+}
+
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
{
ipc_rmid(&shm_ids(ns), &s->shm_perm);
@@ -208,8 +217,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
user_shm_unlock(file_inode(shp->shm_file)->i_size,
shp->mlock_user);
fput (shp->shm_file);
- security_shm_free(shp);
- ipc_rcu_putref(shp);
+ ipc_rcu_putref(shp, shm_rcu_free);
}
/*
@@ -497,7 +505,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
shp->shm_perm.security = NULL;
error = security_shm_alloc(shp);
if (error) {
- ipc_rcu_putref(shp);
+ ipc_rcu_putref(shp, ipc_rcu_free);
return error;
}
@@ -566,8 +574,7 @@ no_id:
user_shm_unlock(size, shp->mlock_user);
fput(file);
no_file:
- security_shm_free(shp);
- ipc_rcu_putref(shp);
+ ipc_rcu_putref(shp, shm_rcu_free);
return error;
}
diff --git a/ipc/util.c b/ipc/util.c
index e829da9ed01f..fdb8ae740775 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -474,11 +474,6 @@ void ipc_free(void* ptr, int size)
kfree(ptr);
}
-struct ipc_rcu {
- struct rcu_head rcu;
- atomic_t refcount;
-} ____cacheline_aligned_in_smp;
-
/**
* ipc_rcu_alloc - allocate ipc and rcu space
* @size: size desired
@@ -505,27 +500,24 @@ int ipc_rcu_getref(void *ptr)
return atomic_inc_not_zero(&p->refcount);
}
-/**
- * ipc_schedule_free - free ipc + rcu space
- * @head: RCU callback structure for queued work
- */
-static void ipc_schedule_free(struct rcu_head *head)
-{
- vfree(container_of(head, struct ipc_rcu, rcu));
-}
-
-void ipc_rcu_putref(void *ptr)
+void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
{
struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
if (!atomic_dec_and_test(&p->refcount))
return;
- if (is_vmalloc_addr(ptr)) {
- call_rcu(&p->rcu, ipc_schedule_free);
- } else {
- kfree_rcu(p, rcu);
- }
+ call_rcu(&p->rcu, func);
+}
+
+void ipc_rcu_free(struct rcu_head *head)
+{
+ struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+
+ if (is_vmalloc_addr(p))
+ vfree(p);
+ else
+ kfree(p);
}
/**
diff --git a/ipc/util.h b/ipc/util.h
index c5f3338ba1fa..f2f5036f2eed 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { }
static inline void shm_exit_ns(struct ipc_namespace *ns) { }
#endif
+struct ipc_rcu {
+ struct rcu_head rcu;
+ atomic_t refcount;
+} ____cacheline_aligned_in_smp;
+
+#define ipc_rcu_to_struct(p) ((void *)(p+1))
+
/*
* Structure that holds the parameters needed by the ipc operations
* (see after)
@@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size);
*/
void* ipc_rcu_alloc(int size);
int ipc_rcu_getref(void *ptr);
-void ipc_rcu_putref(void *ptr);
+void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
+void ipc_rcu_free(struct rcu_head *head);
struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);
diff --git a/kernel/audit.c b/kernel/audit.c
index 91e53d04b6a9..7b0e23a740ce 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
sleep_time = timeout_start + audit_backlog_wait_time -
jiffies;
- if ((long)sleep_time > 0)
+ if ((long)sleep_time > 0) {
wait_for_auditd(sleep_time);
- continue;
+ continue;
+ }
}
if (audit_rate_check() && printk_ratelimit())
printk(KERN_WARNING
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 247091bf0587..859c8dfd78a1 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -51,6 +51,15 @@ void context_tracking_user_enter(void)
unsigned long flags;
/*
+ * Repeat the user_enter() check here because some archs may be calling
+ * this from asm and if no CPU needs context tracking, they shouldn't
+ * go further. Repeat the check here until they support the static key
+ * check.
+ */
+ if (!static_key_false(&context_tracking_enabled))
+ return;
+
+ /*
* Some contexts may involve an exception occuring in an irq,
* leading to that nesting:
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void)
{
unsigned long flags;
+ if (!static_key_false(&context_tracking_enabled))
+ return;
+
if (in_interrupt())
return;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dd236b66ca3a..cb4238e85b38 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
*running = ctx_time - event->tstamp_running;
}
+static void perf_event_init_userpage(struct perf_event *event)
+{
+ struct perf_event_mmap_page *userpg;
+ struct ring_buffer *rb;
+
+ rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+ if (!rb)
+ goto unlock;
+
+ userpg = rb->user_page;
+
+ /* Allow new userspace to detect that bit 0 is deprecated */
+ userpg->cap_bit0_is_deprecated = 1;
+ userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
+
+unlock:
+ rcu_read_unlock();
+}
+
void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
}
@@ -4044,6 +4064,7 @@ again:
ring_buffer_attach(event, rb);
rcu_assign_pointer(event->rb, rb);
+ perf_event_init_userpage(event);
perf_event_update_userpage(event);
unlock:
diff --git a/kernel/params.c b/kernel/params.c
index 81c4e78c8f4c..c00d5b502aa4 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -254,11 +254,11 @@ int parse_args(const char *doing,
STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul);
+STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul);
+STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul);
+STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
int param_set_charp(const char *val, const struct kernel_param *kp)
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 269ed9384cc4..f813b3474646 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
#endif
enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
-int reboot_default;
+/*
+ * This variable is used privately to keep track of whether or not
+ * reboot_type is still set to its default value (i.e., reboot= hasn't
+ * been set on the command line). This is needed so that we can
+ * suppress DMI scanning for reboot quirks. Without it, it's
+ * impossible to override a faulty reboot quirk without recompiling.
+ */
+int reboot_default = 1;
int reboot_cpu;
enum reboot_type reboot_type = BOOT_ACPI;
int reboot_force;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index e076bddd4c66..196559994f7c 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -124,7 +124,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SEQ_printf(m, " ");
SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
- p->comm, p->pid,
+ p->comm, task_pid_nr(p),
SPLIT_NS(p->se.vruntime),
(long long)(p->nvcsw + p->nivcsw),
p->prio);
@@ -289,7 +289,7 @@ do { \
P(nr_load_updates);
P(nr_uninterruptible);
PN(next_balance);
- P(curr->pid);
+ SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
PN(clock);
P(cpu_load[0]);
P(cpu_load[1]);
@@ -492,7 +492,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
unsigned long nr_switches;
- SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
+ SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
get_nr_threads(p));
SEQ_printf(m,
"---------------------------------------------------------"
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9b3fe1cd8f40..7c70201fbc61 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
}
if (!se) {
- cfs_rq->h_load = rq->avg.load_avg_contrib;
+ cfs_rq->h_load = cfs_rq->runnable_load_avg;
cfs_rq->last_h_load_update = now;
}
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
(busiest->load_per_task * SCHED_POWER_SCALE) /
busiest->group_power;
- if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >=
- (scaled_busy_load_per_task * imbn)) {
+ if (busiest->avg_load + scaled_busy_load_per_task >=
+ local->avg_load + (scaled_busy_load_per_task * imbn)) {
env->imbalance = busiest->load_per_task;
return;
}
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* max load less than avg load(as we skip the groups at or below
* its cpu_power, while calculating max_load..)
*/
- if (busiest->avg_load < sds->avg_load) {
+ if (busiest->avg_load <= sds->avg_load ||
+ local->avg_load >= sds->avg_load) {
env->imbalance = 0;
return fix_small_imbalance(env, sds);
}
@@ -5928,11 +5929,15 @@ static void task_fork_fair(struct task_struct *p)
cfs_rq = task_cfs_rq(current);
curr = cfs_rq->curr;
- if (unlikely(task_cpu(p) != this_cpu)) {
- rcu_read_lock();
- __set_task_cpu(p, this_cpu);
- rcu_read_unlock();
- }
+ /*
+ * Not only the cpu but also the task_group of the parent might have
+ * been changed after parent->se.parent,cfs_rq were copied to
+ * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
+ * of child point to valid ones.
+ */
+ rcu_read_lock();
+ __set_task_cpu(p, this_cpu);
+ rcu_read_unlock();
update_curr(cfs_rq);
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 5aef494fc8b4..c7edee71bce8 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -104,8 +104,9 @@ static inline void sched_info_queued(struct task_struct *t)
}
/*
- * Called when a process ceases being the active-running process, either
- * voluntarily or involuntarily. Now we can calculate how long we ran.
+ * Called when a process ceases being the active-running process involuntarily
+ * due, typically, to expiring its time slice (this may also be called when
+ * switching to the idle task). Now we can calculate how long we ran.
* Also, if the process is still in the TASK_RUNNING state, call
* sched_info_queued() to mark that it has now again started waiting on
* the runqueue.
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 8f5b3b98577b..bb2215174f05 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -516,13 +516,13 @@ static void sync_cmos_clock(struct work_struct *work)
schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
}
-static void notify_cmos_timer(void)
+void ntp_notify_cmos_timer(void)
{
schedule_delayed_work(&sync_cmos_work, 0);
}
#else
-static inline void notify_cmos_timer(void) { }
+void ntp_notify_cmos_timer(void) { }
#endif
@@ -687,8 +687,6 @@ int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai)
if (!(time_status & STA_NANO))
txc->time.tv_usec /= NSEC_PER_USEC;
- notify_cmos_timer();
-
return result;
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 48b9fffabdc2..947ba25a95a0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1703,6 +1703,8 @@ int do_adjtimex(struct timex *txc)
write_seqcount_end(&timekeeper_seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
+ ntp_notify_cmos_timer();
+
return ret;
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 51c4f34d258e..4431610f049a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
.unpark = watchdog_enable,
};
-static int watchdog_enable_all_cpus(void)
+static void restart_watchdog_hrtimer(void *info)
+{
+ struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+ int ret;
+
+ /*
+ * No need to cancel and restart hrtimer if it is currently executing
+ * because it will reprogram itself with the new period now.
+ * We should never see it unqueued here because we are running per-cpu
+ * with interrupts disabled.
+ */
+ ret = hrtimer_try_to_cancel(hrtimer);
+ if (ret == 1)
+ hrtimer_start(hrtimer, ns_to_ktime(sample_period),
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static void update_timers(int cpu)
+{
+ struct call_single_data data = {.func = restart_watchdog_hrtimer};
+ /*
+ * Make sure that perf event counter will adopt to a new
+ * sampling period. Updating the sampling period directly would
+ * be much nicer but we do not have an API for that now so
+ * let's use a big hammer.
+ * Hrtimer will adopt the new period on the next tick but this
+ * might be late already so we have to restart the timer as well.
+ */
+ watchdog_nmi_disable(cpu);
+ __smp_call_function_single(cpu, &data, 1);
+ watchdog_nmi_enable(cpu);
+}
+
+static void update_timers_all_cpus(void)
+{
+ int cpu;
+
+ get_online_cpus();
+ preempt_disable();
+ for_each_online_cpu(cpu)
+ update_timers(cpu);
+ preempt_enable();
+ put_online_cpus();
+}
+
+static int watchdog_enable_all_cpus(bool sample_period_changed)
{
int err = 0;
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
pr_err("Failed to create watchdog threads, disabled\n");
else
watchdog_running = 1;
+ } else if (sample_period_changed) {
+ update_timers_all_cpus();
}
return err;
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int err, old_thresh, old_enabled;
+ static DEFINE_MUTEX(watchdog_proc_mutex);
+ mutex_lock(&watchdog_proc_mutex);
old_thresh = ACCESS_ONCE(watchdog_thresh);
old_enabled = ACCESS_ONCE(watchdog_user_enabled);
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (err || !write)
- return err;
+ goto out;
set_sample_period();
/*
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
* watchdog_*_all_cpus() function takes care of this.
*/
if (watchdog_user_enabled && watchdog_thresh)
- err = watchdog_enable_all_cpus();
+ err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
else
watchdog_disable_all_cpus();
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
watchdog_thresh = old_thresh;
watchdog_user_enabled = old_enabled;
}
-
+out:
+ mutex_unlock(&watchdog_proc_mutex);
return err;
}
#endif /* CONFIG_SYSCTL */
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
set_sample_period();
if (watchdog_user_enabled)
- watchdog_enable_all_cpus();
+ watchdog_enable_all_cpus(false);
}
diff --git a/lib/kobject.c b/lib/kobject.c
index 962175134702..669bf190d4fb 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
bool kobj_ns_current_may_mount(enum kobj_ns_type type)
{
- bool may_mount = false;
-
- if (type == KOBJ_NS_TYPE_NONE)
- return true;
+ bool may_mount = true;
spin_lock(&kobj_ns_type_lock);
if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
diff --git a/lib/lockref.c b/lib/lockref.c
index e2cd2c0a8821..6f9d434c1521 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,6 +4,22 @@
#ifdef CONFIG_CMPXCHG_LOCKREF
/*
+ * Allow weakly-ordered memory architectures to provide barrier-less
+ * cmpxchg semantics for lockref updates.
+ */
+#ifndef cmpxchg64_relaxed
+# define cmpxchg64_relaxed cmpxchg64
+#endif
+
+/*
+ * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
+ * This is useful for architectures with an expensive cpu_relax().
+ */
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
+#endif
+
+/*
* Note that the "cmpxchg()" reloads the "old" value for the
* failure case.
*/
@@ -14,12 +30,13 @@
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
struct lockref new = old, prev = old; \
CODE \
- old.lock_count = cmpxchg(&lockref->lock_count, \
- old.lock_count, new.lock_count); \
+ old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
+ old.lock_count, \
+ new.lock_count); \
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
- cpu_relax(); \
+ arch_mutex_cpu_relax(); \
} \
} while (0)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d5ff3ce13029..1c52ddbc839b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -39,6 +39,7 @@
#include <linux/limits.h>
#include <linux/export.h>
#include <linux/mutex.h>
+#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swapops.h>
@@ -160,6 +161,10 @@ struct mem_cgroup_per_zone {
struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
+ struct rb_node tree_node; /* RB tree node */
+ unsigned long long usage_in_excess;/* Set to the value by which */
+ /* the soft limit is exceeded*/
+ bool on_tree;
struct mem_cgroup *memcg; /* Back pointer, we cannot */
/* use container_of */
};
@@ -168,6 +173,26 @@ struct mem_cgroup_per_node {
struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};
+/*
+ * Cgroups above their limits are maintained in a RB-Tree, independent of
+ * their hierarchy representation
+ */
+
+struct mem_cgroup_tree_per_zone {
+ struct rb_root rb_root;
+ spinlock_t lock;
+};
+
+struct mem_cgroup_tree_per_node {
+ struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_tree {
+ struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
+};
+
+static struct mem_cgroup_tree soft_limit_tree __read_mostly;
+
struct mem_cgroup_threshold {
struct eventfd_ctx *eventfd;
u64 threshold;
@@ -303,22 +328,6 @@ struct mem_cgroup {
atomic_t numainfo_events;
atomic_t numainfo_updating;
#endif
- /*
- * Protects soft_contributed transitions.
- * See mem_cgroup_update_soft_limit
- */
- spinlock_t soft_lock;
-
- /*
- * If true then this group has increased parents' children_in_excess
- * when it got over the soft limit.
- * When a group falls bellow the soft limit, parents' children_in_excess
- * is decreased and soft_contributed changed to false.
- */
- bool soft_contributed;
-
- /* Number of children that are in soft limit excess */
- atomic_t children_in_excess;
struct mem_cgroup_per_node *nodeinfo[0];
/* WARNING: nodeinfo must be the last member here */
@@ -422,6 +431,7 @@ static bool move_file(void)
* limit reclaim to prevent infinite loops, if they ever occur.
*/
#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
+#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
enum charge_type {
MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -648,6 +658,164 @@ page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
return mem_cgroup_zoneinfo(memcg, nid, zid);
}
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_node_zone(int nid, int zid)
+{
+ return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_from_page(struct page *page)
+{
+ int nid = page_to_nid(page);
+ int zid = page_zonenum(page);
+
+ return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static void
+__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
+ struct mem_cgroup_per_zone *mz,
+ struct mem_cgroup_tree_per_zone *mctz,
+ unsigned long long new_usage_in_excess)
+{
+ struct rb_node **p = &mctz->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ struct mem_cgroup_per_zone *mz_node;
+
+ if (mz->on_tree)
+ return;
+
+ mz->usage_in_excess = new_usage_in_excess;
+ if (!mz->usage_in_excess)
+ return;
+ while (*p) {
+ parent = *p;
+ mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
+ tree_node);
+ if (mz->usage_in_excess < mz_node->usage_in_excess)
+ p = &(*p)->rb_left;
+ /*
+ * We can't avoid mem cgroups that are over their soft
+ * limit by the same amount
+ */
+ else if (mz->usage_in_excess >= mz_node->usage_in_excess)
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&mz->tree_node, parent, p);
+ rb_insert_color(&mz->tree_node, &mctz->rb_root);
+ mz->on_tree = true;
+}
+
+static void
+__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
+ struct mem_cgroup_per_zone *mz,
+ struct mem_cgroup_tree_per_zone *mctz)
+{
+ if (!mz->on_tree)
+ return;
+ rb_erase(&mz->tree_node, &mctz->rb_root);
+ mz->on_tree = false;
+}
+
+static void
+mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
+ struct mem_cgroup_per_zone *mz,
+ struct mem_cgroup_tree_per_zone *mctz)
+{
+ spin_lock(&mctz->lock);
+ __mem_cgroup_remove_exceeded(memcg, mz, mctz);
+ spin_unlock(&mctz->lock);
+}
+
+
+static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
+{
+ unsigned long long excess;
+ struct mem_cgroup_per_zone *mz;
+ struct mem_cgroup_tree_per_zone *mctz;
+ int nid = page_to_nid(page);
+ int zid = page_zonenum(page);
+ mctz = soft_limit_tree_from_page(page);
+
+ /*
+ * Necessary to update all ancestors when hierarchy is used.
+ * because their event counter is not touched.
+ */
+ for (; memcg; memcg = parent_mem_cgroup(memcg)) {
+ mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+ excess = res_counter_soft_limit_excess(&memcg->res);
+ /*
+ * We have to update the tree if mz is on RB-tree or
+ * mem is over its softlimit.
+ */
+ if (excess || mz->on_tree) {
+ spin_lock(&mctz->lock);
+ /* if on-tree, remove it */
+ if (mz->on_tree)
+ __mem_cgroup_remove_exceeded(memcg, mz, mctz);
+ /*
+ * Insert again. mz->usage_in_excess will be updated.
+ * If excess is 0, no tree ops.
+ */
+ __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
+ spin_unlock(&mctz->lock);
+ }
+ }
+}
+
+static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
+{
+ int node, zone;
+ struct mem_cgroup_per_zone *mz;
+ struct mem_cgroup_tree_per_zone *mctz;
+
+ for_each_node(node) {
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+ mz = mem_cgroup_zoneinfo(memcg, node, zone);
+ mctz = soft_limit_tree_node_zone(node, zone);
+ mem_cgroup_remove_exceeded(memcg, mz, mctz);
+ }
+ }
+}
+
+static struct mem_cgroup_per_zone *
+__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+ struct rb_node *rightmost = NULL;
+ struct mem_cgroup_per_zone *mz;
+
+retry:
+ mz = NULL;
+ rightmost = rb_last(&mctz->rb_root);
+ if (!rightmost)
+ goto done; /* Nothing to reclaim from */
+
+ mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
+ /*
+ * Remove the node now but someone else can add it back,
+ * we will to add it back at the end of reclaim to its correct
+ * position in the tree.
+ */
+ __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
+ if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
+ !css_tryget(&mz->memcg->css))
+ goto retry;
+done:
+ return mz;
+}
+
+static struct mem_cgroup_per_zone *
+mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+ struct mem_cgroup_per_zone *mz;
+
+ spin_lock(&mctz->lock);
+ mz = __mem_cgroup_largest_soft_limit_node(mctz);
+ spin_unlock(&mctz->lock);
+ return mz;
+}
+
/*
* Implementation Note: reading percpu statistics for memcg.
*
@@ -822,48 +990,6 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
}
/*
- * Called from rate-limited memcg_check_events when enough
- * MEM_CGROUP_TARGET_SOFTLIMIT events are accumulated and it makes sure
- * that all the parents up the hierarchy will be notified that this group
- * is in excess or that it is not in excess anymore. mmecg->soft_contributed
- * makes the transition a single action whenever the state flips from one to
- * the other.
- */
-static void mem_cgroup_update_soft_limit(struct mem_cgroup *memcg)
-{
- unsigned long long excess = res_counter_soft_limit_excess(&memcg->res);
- struct mem_cgroup *parent = memcg;
- int delta = 0;
-
- spin_lock(&memcg->soft_lock);
- if (excess) {
- if (!memcg->soft_contributed) {
- delta = 1;
- memcg->soft_contributed = true;
- }
- } else {
- if (memcg->soft_contributed) {
- delta = -1;
- memcg->soft_contributed = false;
- }
- }
-
- /*
- * Necessary to update all ancestors when hierarchy is used
- * because their event counter is not touched.
- * We track children even outside the hierarchy for the root
- * cgroup because tree walk starting at root should visit
- * all cgroups and we want to prevent from pointless tree
- * walk if no children is below the limit.
- */
- while (delta && (parent = parent_mem_cgroup(parent)))
- atomic_add(delta, &parent->children_in_excess);
- if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
- atomic_add(delta, &root_mem_cgroup->children_in_excess);
- spin_unlock(&memcg->soft_lock);
-}
-
-/*
* Check events in order.
*
*/
@@ -886,7 +1012,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
mem_cgroup_threshold(memcg);
if (unlikely(do_softlimit))
- mem_cgroup_update_soft_limit(memcg);
+ mem_cgroup_update_tree(memcg, page);
#if MAX_NUMNODES > 1
if (unlikely(do_numainfo))
atomic_inc(&memcg->numainfo_events);
@@ -929,15 +1055,6 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
return memcg;
}
-static enum mem_cgroup_filter_t
-mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
- mem_cgroup_iter_filter cond)
-{
- if (!cond)
- return VISIT;
- return cond(memcg, root);
-}
-
/*
* Returns a next (in a pre-order walk) alive memcg (with elevated css
* ref. count) or NULL if the whole root's subtree has been visited.
@@ -945,7 +1062,7 @@ mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
* helper function to be used by mem_cgroup_iter
*/
static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
- struct mem_cgroup *last_visited, mem_cgroup_iter_filter cond)
+ struct mem_cgroup *last_visited)
{
struct cgroup_subsys_state *prev_css, *next_css;
@@ -963,31 +1080,11 @@ skip_node:
if (next_css) {
struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
- switch (mem_cgroup_filter(mem, root, cond)) {
- case SKIP:
+ if (css_tryget(&mem->css))
+ return mem;
+ else {
prev_css = next_css;
goto skip_node;
- case SKIP_TREE:
- if (mem == root)
- return NULL;
- /*
- * css_rightmost_descendant is not an optimal way to
- * skip through a subtree (especially for imbalanced
- * trees leaning to right) but that's what we have right
- * now. More effective solution would be traversing
- * right-up for first non-NULL without calling
- * css_next_descendant_pre afterwards.
- */
- prev_css = css_rightmost_descendant(next_css);
- goto skip_node;
- case VISIT:
- if (css_tryget(&mem->css))
- return mem;
- else {
- prev_css = next_css;
- goto skip_node;
- }
- break;
}
}
@@ -1051,7 +1148,6 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
* @root: hierarchy root
* @prev: previously returned memcg, NULL on first invocation
* @reclaim: cookie for shared reclaim walks, NULL for full walks
- * @cond: filter for visited nodes, NULL for no filter
*
* Returns references to children of the hierarchy below @root, or
* @root itself, or %NULL after a full round-trip.
@@ -1064,18 +1160,15 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
* divide up the memcgs in the hierarchy among all concurrent
* reclaimers operating on the same zone and priority.
*/
-struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
- struct mem_cgroup_reclaim_cookie *reclaim,
- mem_cgroup_iter_filter cond)
+ struct mem_cgroup_reclaim_cookie *reclaim)
{
struct mem_cgroup *memcg = NULL;
struct mem_cgroup *last_visited = NULL;
- if (mem_cgroup_disabled()) {
- /* first call must return non-NULL, second return NULL */
- return (struct mem_cgroup *)(unsigned long)!prev;
- }
+ if (mem_cgroup_disabled())
+ return NULL;
if (!root)
root = root_mem_cgroup;
@@ -1086,9 +1179,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
if (!root->use_hierarchy && root != root_mem_cgroup) {
if (prev)
goto out_css_put;
- if (mem_cgroup_filter(root, root, cond) == VISIT)
- return root;
- return NULL;
+ return root;
}
rcu_read_lock();
@@ -1111,7 +1202,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
last_visited = mem_cgroup_iter_load(iter, root, &seq);
}
- memcg = __mem_cgroup_iter_next(root, last_visited, cond);
+ memcg = __mem_cgroup_iter_next(root, last_visited);
if (reclaim) {
mem_cgroup_iter_update(iter, last_visited, memcg, seq);
@@ -1122,11 +1213,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
reclaim->generation = iter->generation;
}
- /*
- * We have finished the whole tree walk or no group has been
- * visited because filter told us to skip the root node.
- */
- if (!memcg && (prev || (cond && !last_visited)))
+ if (prev && !memcg)
goto out_unlock;
}
out_unlock:
@@ -1767,7 +1854,6 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
return total;
}
-#if MAX_NUMNODES > 1
/**
* test_mem_cgroup_node_reclaimable
* @memcg: the target memcg
@@ -1790,6 +1876,7 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
return false;
}
+#if MAX_NUMNODES > 1
/*
* Always updating the nodemask is not very good - even if we have an empty
@@ -1857,50 +1944,104 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
return node;
}
+/*
+ * Check all nodes whether it contains reclaimable pages or not.
+ * For quick scan, we make use of scan_nodes. This will allow us to skip
+ * unused nodes. But scan_nodes is lazily updated and may not cotain
+ * enough new information. We need to do double check.
+ */
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+{
+ int nid;
+
+ /*
+ * quick check...making use of scan_node.
+ * We can skip unused nodes.
+ */
+ if (!nodes_empty(memcg->scan_nodes)) {
+ for (nid = first_node(memcg->scan_nodes);
+ nid < MAX_NUMNODES;
+ nid = next_node(nid, memcg->scan_nodes)) {
+
+ if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
+ return true;
+ }
+ }
+ /*
+ * Check rest of nodes.
+ */
+ for_each_node_state(nid, N_MEMORY) {
+ if (node_isset(nid, memcg->scan_nodes))
+ continue;
+ if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
+ return true;
+ }
+ return false;
+}
+
#else
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
{
return 0;
}
-#endif
-
-/*
- * A group is eligible for the soft limit reclaim under the given root
- * hierarchy if
- * a) it is over its soft limit
- * b) any parent up the hierarchy is over its soft limit
- *
- * If the given group doesn't have any children over the limit then it
- * doesn't make any sense to iterate its subtree.
- */
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
- struct mem_cgroup *root)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
{
- struct mem_cgroup *parent;
-
- if (!memcg)
- memcg = root_mem_cgroup;
- parent = memcg;
-
- if (res_counter_soft_limit_excess(&memcg->res))
- return VISIT;
+ return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
+}
+#endif
- /*
- * If any parent up to the root in the hierarchy is over its soft limit
- * then we have to obey and reclaim from this group as well.
- */
- while ((parent = parent_mem_cgroup(parent))) {
- if (res_counter_soft_limit_excess(&parent->res))
- return VISIT;
- if (parent == root)
+static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
+ struct zone *zone,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned)
+{
+ struct mem_cgroup *victim = NULL;
+ int total = 0;
+ int loop = 0;
+ unsigned long excess;
+ unsigned long nr_scanned;
+ struct mem_cgroup_reclaim_cookie reclaim = {
+ .zone = zone,
+ .priority = 0,
+ };
+
+ excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
+
+ while (1) {
+ victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
+ if (!victim) {
+ loop++;
+ if (loop >= 2) {
+ /*
+ * If we have not been able to reclaim
+ * anything, it might because there are
+ * no reclaimable pages under this hierarchy
+ */
+ if (!total)
+ break;
+ /*
+ * We want to do more targeted reclaim.
+ * excess >> 2 is not to excessive so as to
+ * reclaim too much, nor too less that we keep
+ * coming back to reclaim from this cgroup
+ */
+ if (total >= (excess >> 2) ||
+ (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
+ break;
+ }
+ continue;
+ }
+ if (!mem_cgroup_reclaimable(victim, false))
+ continue;
+ total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
+ zone, &nr_scanned);
+ *total_scanned += nr_scanned;
+ if (!res_counter_soft_limit_excess(&root_memcg->res))
break;
}
-
- if (!atomic_read(&memcg->children_in_excess))
- return SKIP_TREE;
- return SKIP;
+ mem_cgroup_iter_break(root_memcg, victim);
+ return total;
}
static DEFINE_SPINLOCK(memcg_oom_lock);
@@ -2812,7 +2953,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
unlock_page_cgroup(pc);
/*
- * "charge_statistics" updated event counter.
+ * "charge_statistics" updated event counter. Then, check it.
+ * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
+ * if they exceeds softlimit.
*/
memcg_check_events(memcg, page);
}
@@ -4647,6 +4790,98 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
return ret;
}
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned)
+{
+ unsigned long nr_reclaimed = 0;
+ struct mem_cgroup_per_zone *mz, *next_mz = NULL;
+ unsigned long reclaimed;
+ int loop = 0;
+ struct mem_cgroup_tree_per_zone *mctz;
+ unsigned long long excess;
+ unsigned long nr_scanned;
+
+ if (order > 0)
+ return 0;
+
+ mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
+ /*
+ * This loop can run a while, specially if mem_cgroup's continuously
+ * keep exceeding their soft limit and putting the system under
+ * pressure
+ */
+ do {
+ if (next_mz)
+ mz = next_mz;
+ else
+ mz = mem_cgroup_largest_soft_limit_node(mctz);
+ if (!mz)
+ break;
+
+ nr_scanned = 0;
+ reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
+ gfp_mask, &nr_scanned);
+ nr_reclaimed += reclaimed;
+ *total_scanned += nr_scanned;
+ spin_lock(&mctz->lock);
+
+ /*
+ * If we failed to reclaim anything from this memory cgroup
+ * it is time to move on to the next cgroup
+ */
+ next_mz = NULL;
+ if (!reclaimed) {
+ do {
+ /*
+ * Loop until we find yet another one.
+ *
+ * By the time we get the soft_limit lock
+ * again, someone might have aded the
+ * group back on the RB tree. Iterate to
+ * make sure we get a different mem.
+ * mem_cgroup_largest_soft_limit_node returns
+ * NULL if no other cgroup is present on
+ * the tree
+ */
+ next_mz =
+ __mem_cgroup_largest_soft_limit_node(mctz);
+ if (next_mz == mz)
+ css_put(&next_mz->memcg->css);
+ else /* next_mz == NULL or other memcg */
+ break;
+ } while (1);
+ }
+ __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
+ excess = res_counter_soft_limit_excess(&mz->memcg->res);
+ /*
+ * One school of thought says that we should not add
+ * back the node to the tree if reclaim returns 0.
+ * But our reclaim could return 0, simply because due
+ * to priority we are exposing a smaller subset of
+ * memory to reclaim from. Consider this as a longer
+ * term TODO.
+ */
+ /* If excess == 0, no tree ops */
+ __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
+ spin_unlock(&mctz->lock);
+ css_put(&mz->memcg->css);
+ loop++;
+ /*
+ * Could not reclaim anything and there are no more
+ * mem cgroups to try or we seem to be looping without
+ * reclaiming anything.
+ */
+ if (!nr_reclaimed &&
+ (next_mz == NULL ||
+ loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
+ break;
+ } while (!nr_reclaimed);
+ if (next_mz)
+ css_put(&next_mz->memcg->css);
+ return nr_reclaimed;
+}
+
/**
* mem_cgroup_force_empty_list - clears LRU of a group
* @memcg: group to clear
@@ -5911,6 +6146,8 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mz = &pn->zoneinfo[zone];
lruvec_init(&mz->lruvec);
+ mz->usage_in_excess = 0;
+ mz->on_tree = false;
mz->memcg = memcg;
}
memcg->nodeinfo[node] = pn;
@@ -5966,6 +6203,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
int node;
size_t size = memcg_size();
+ mem_cgroup_remove_from_trees(memcg);
free_css_id(&mem_cgroup_subsys, &memcg->css);
for_each_node(node)
@@ -6002,6 +6240,29 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
}
EXPORT_SYMBOL(parent_mem_cgroup);
+static void __init mem_cgroup_soft_limit_tree_init(void)
+{
+ struct mem_cgroup_tree_per_node *rtpn;
+ struct mem_cgroup_tree_per_zone *rtpz;
+ int tmp, node, zone;
+
+ for_each_node(node) {
+ tmp = node;
+ if (!node_state(node, N_NORMAL_MEMORY))
+ tmp = -1;
+ rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
+ BUG_ON(!rtpn);
+
+ soft_limit_tree.rb_tree_per_node[node] = rtpn;
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+ rtpz = &rtpn->rb_tree_per_zone[zone];
+ rtpz->rb_root = RB_ROOT;
+ spin_lock_init(&rtpz->lock);
+ }
+ }
+}
+
static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
@@ -6031,7 +6292,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
mutex_init(&memcg->thresholds_lock);
spin_lock_init(&memcg->move_lock);
vmpressure_init(&memcg->vmpressure);
- spin_lock_init(&memcg->soft_lock);
return &memcg->css;
@@ -6109,13 +6369,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
mem_cgroup_invalidate_reclaim_iterators(memcg);
mem_cgroup_reparent_charges(memcg);
- if (memcg->soft_contributed) {
- while ((memcg = parent_mem_cgroup(memcg)))
- atomic_dec(&memcg->children_in_excess);
-
- if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
- atomic_dec(&root_mem_cgroup->children_in_excess);
- }
mem_cgroup_destroy_all_caches(memcg);
vmpressure_cleanup(&memcg->vmpressure);
}
@@ -6790,6 +7043,7 @@ static int __init mem_cgroup_init(void)
{
hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
enable_swap_cgroup();
+ mem_cgroup_soft_limit_tree_init();
memcg_stock_init();
return 0;
}
diff --git a/mm/mlock.c b/mm/mlock.c
index d63802663242..67ba6da7d0e3 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -736,6 +736,7 @@ static int do_mlockall(int flags)
/* Ignore errors */
mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
+ cond_resched();
}
out:
return 0;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8ed1b775bdc9..beb35778c69f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -139,23 +139,11 @@ static bool global_reclaim(struct scan_control *sc)
{
return !sc->target_mem_cgroup;
}
-
-static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
-{
- struct mem_cgroup *root = sc->target_mem_cgroup;
- return !mem_cgroup_disabled() &&
- mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE;
-}
#else
static bool global_reclaim(struct scan_control *sc)
{
return true;
}
-
-static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
-{
- return false;
-}
#endif
unsigned long zone_reclaimable_pages(struct zone *zone)
@@ -2176,11 +2164,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
}
}
-static int
-__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
{
unsigned long nr_reclaimed, nr_scanned;
- int groups_scanned = 0;
do {
struct mem_cgroup *root = sc->target_mem_cgroup;
@@ -2188,17 +2174,15 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
.zone = zone,
.priority = sc->priority,
};
- struct mem_cgroup *memcg = NULL;
- mem_cgroup_iter_filter filter = (soft_reclaim) ?
- mem_cgroup_soft_reclaim_eligible : NULL;
+ struct mem_cgroup *memcg;
nr_reclaimed = sc->nr_reclaimed;
nr_scanned = sc->nr_scanned;
- while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) {
+ memcg = mem_cgroup_iter(root, NULL, &reclaim);
+ do {
struct lruvec *lruvec;
- groups_scanned++;
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
shrink_lruvec(lruvec, sc);
@@ -2218,7 +2202,8 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
mem_cgroup_iter_break(root, memcg);
break;
}
- }
+ memcg = mem_cgroup_iter(root, memcg, &reclaim);
+ } while (memcg);
vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
sc->nr_scanned - nr_scanned,
@@ -2226,37 +2211,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
} while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
sc->nr_scanned - nr_scanned, sc));
-
- return groups_scanned;
-}
-
-
-static void shrink_zone(struct zone *zone, struct scan_control *sc)
-{
- bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
- unsigned long nr_scanned = sc->nr_scanned;
- int scanned_groups;
-
- scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
- /*
- * memcg iterator might race with other reclaimer or start from
- * a incomplete tree walk so the tree walk in __shrink_zone
- * might have missed groups that are above the soft limit. Try
- * another loop to catch up with others. Do it just once to
- * prevent from reclaim latencies when other reclaimers always
- * preempt this one.
- */
- if (do_soft_reclaim && !scanned_groups)
- __shrink_zone(zone, sc, do_soft_reclaim);
-
- /*
- * No group is over the soft limit or those that are do not have
- * pages in the zone we are reclaiming so we have to reclaim everybody
- */
- if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
- __shrink_zone(zone, sc, false);
- return;
- }
}
/* Returns true if compaction should go ahead for a high-order request */
@@ -2320,6 +2274,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
+ unsigned long nr_soft_reclaimed;
+ unsigned long nr_soft_scanned;
bool aborted_reclaim = false;
/*
@@ -2359,6 +2315,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
continue;
}
}
+ /*
+ * This steals pages from memory cgroups over softlimit
+ * and returns the number of reclaimed pages and
+ * scanned pages. This works for global memory pressure
+ * and balancing, not for a memcg's limit.
+ */
+ nr_soft_scanned = 0;
+ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+ sc->order, sc->gfp_mask,
+ &nr_soft_scanned);
+ sc->nr_reclaimed += nr_soft_reclaimed;
+ sc->nr_scanned += nr_soft_scanned;
/* need some check for avoid more shrink_zone() */
}
@@ -2952,6 +2920,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
{
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
+ unsigned long nr_soft_reclaimed;
+ unsigned long nr_soft_scanned;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.priority = DEF_PRIORITY,
@@ -3066,6 +3036,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
sc.nr_scanned = 0;
+ nr_soft_scanned = 0;
+ /*
+ * Call soft limit reclaim before calling shrink_zone.
+ */
+ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+ order, sc.gfp_mask,
+ &nr_soft_scanned);
+ sc.nr_reclaimed += nr_soft_reclaimed;
+
/*
* There should be no need to raise the scanning
* priority if enough pages are already being scanned
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 4493913f0d5c..813db4e64602 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -168,6 +168,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
case ETH_P_8021Q:
vhdr = (struct vlan_ethhdr *)skb->data;
vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+ vid |= BATADV_VLAN_HAS_TAG;
if (vhdr->h_vlan_encapsulated_proto != ethertype)
break;
@@ -331,6 +332,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
case ETH_P_8021Q:
vhdr = (struct vlan_ethhdr *)skb->data;
vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+ vid |= BATADV_VLAN_HAS_TAG;
if (vhdr->h_vlan_encapsulated_proto != ethertype)
break;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index b9259efa636e..e74ddc1c29a8 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -207,7 +207,7 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask)
{
int err = 0;
- struct net_bridge_port *port = br_port_get_rcu(dev);
+ struct net_bridge_port *port = br_port_get_rtnl(dev);
/* not a bridge port and */
if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN))
@@ -451,7 +451,7 @@ static size_t br_get_link_af_size(const struct net_device *dev)
struct net_port_vlans *pv;
if (br_port_exists(dev))
- pv = nbp_get_vlan_info(br_port_get_rcu(dev));
+ pv = nbp_get_vlan_info(br_port_get_rtnl(dev));
else if (dev->priv_flags & IFF_EBRIDGE)
pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
else
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 598cb0b333c6..efb57d911569 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -202,13 +202,10 @@ struct net_bridge_port
static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
{
- struct net_bridge_port *port =
- rcu_dereference_rtnl(dev->rx_handler_data);
-
- return br_port_exists(dev) ? port : NULL;
+ return rcu_dereference(dev->rx_handler_data);
}
-static inline struct net_bridge_port *br_port_get_rtnl(struct net_device *dev)
+static inline struct net_bridge_port *br_port_get_rtnl(const struct net_device *dev)
{
return br_port_exists(dev) ?
rtnl_dereference(dev->rx_handler_data) : NULL;
@@ -746,6 +743,7 @@ extern struct net_bridge_port *br_get_port(struct net_bridge *br,
extern void br_init_port(struct net_bridge_port *p);
extern void br_become_designated_port(struct net_bridge_port *p);
+extern void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
extern int br_set_max_age(struct net_bridge *br, unsigned long x);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 1c0a50f13229..3c86f0538cbb 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -209,7 +209,7 @@ static void br_record_config_information(struct net_bridge_port *p,
p->designated_age = jiffies - bpdu->message_age;
mod_timer(&p->message_age_timer, jiffies
- + (p->br->max_age - bpdu->message_age));
+ + (bpdu->max_age - bpdu->message_age));
}
/* called under bridge lock */
@@ -544,18 +544,27 @@ int br_set_max_age(struct net_bridge *br, unsigned long val)
}
+void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
+{
+ br->bridge_forward_delay = t;
+ if (br_is_root_bridge(br))
+ br->forward_delay = br->bridge_forward_delay;
+}
+
int br_set_forward_delay(struct net_bridge *br, unsigned long val)
{
unsigned long t = clock_t_to_jiffies(val);
+ int err = -ERANGE;
+ spin_lock_bh(&br->lock);
if (br->stp_enabled != BR_NO_STP &&
(t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
- return -ERANGE;
+ goto unlock;
- spin_lock_bh(&br->lock);
- br->bridge_forward_delay = t;
- if (br_is_root_bridge(br))
- br->forward_delay = br->bridge_forward_delay;
+ __br_set_forward_delay(br, t);
+ err = 0;
+
+unlock:
spin_unlock_bh(&br->lock);
- return 0;
+ return err;
}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index d45e760141bb..108084a04671 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -129,6 +129,14 @@ static void br_stp_start(struct net_bridge *br)
char *envp[] = { NULL };
r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
+
+ spin_lock_bh(&br->lock);
+
+ if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
+ __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
+ else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY)
+ __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
+
if (r == 0) {
br->stp_enabled = BR_USER_STP;
br_debug(br, "userspace STP started\n");
@@ -137,10 +145,10 @@ static void br_stp_start(struct net_bridge *br)
br_debug(br, "using kernel STP\n");
/* To start timers on any ports left in blocking */
- spin_lock_bh(&br->lock);
br_port_state_selection(br);
- spin_unlock_bh(&br->lock);
}
+
+ spin_unlock_bh(&br->lock);
}
static void br_stp_stop(struct net_bridge *br)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 1606f740d6ae..2b4b32aaa893 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2216,6 +2216,17 @@ void ceph_osdc_sync(struct ceph_osd_client *osdc)
EXPORT_SYMBOL(ceph_osdc_sync);
/*
+ * Call all pending notify callbacks - for use after a watch is
+ * unregistered, to make sure no more callbacks for it will be invoked
+ */
+extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
+{
+ flush_workqueue(osdc->notify_wq);
+}
+EXPORT_SYMBOL(ceph_osdc_flush_notifies);
+
+
+/*
* init, shutdown
*/
int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2c637e9a0b27..fc75c9e461b8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -550,7 +550,7 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
return;
proto = ntohs(eth_hdr(skb)->h_proto);
- if (proto == ETH_P_IP) {
+ if (proto == ETH_P_ARP) {
struct arphdr *arp;
unsigned char *arp_ptr;
/* No arp on this interface */
@@ -1284,15 +1284,14 @@ EXPORT_SYMBOL_GPL(__netpoll_free_async);
void netpoll_cleanup(struct netpoll *np)
{
- if (!np->dev)
- return;
-
rtnl_lock();
+ if (!np->dev)
+ goto out;
__netpoll_cleanup(np);
- rtnl_unlock();
-
dev_put(np->dev);
np->dev = NULL;
+out:
+ rtnl_unlock();
}
EXPORT_SYMBOL(netpoll_cleanup);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 9c61f9c02fdb..6cf9f7782ad4 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -135,6 +135,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (dst)
dst->ops->redirect(dst, sk, skb);
+ goto out;
}
if (type == ICMPV6_PKT_TOOBIG) {
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d6c0e64ec97f..dace87f06e5f 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -369,7 +369,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
pip->saddr = fl4.saddr;
pip->protocol = IPPROTO_IGMP;
pip->tot_len = 0; /* filled in later */
- ip_select_ident(pip, &rt->dst, NULL);
+ ip_select_ident(skb, &rt->dst, NULL);
((u8 *)&pip[1])[0] = IPOPT_RA;
((u8 *)&pip[1])[1] = 4;
((u8 *)&pip[1])[2] = 0;
@@ -714,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
iph->daddr = dst;
iph->saddr = fl4.saddr;
iph->protocol = IPPROTO_IGMP;
- ip_select_ident(iph, &rt->dst, NULL);
+ ip_select_ident(skb, &rt->dst, NULL);
((u8 *)&iph[1])[0] = IPOPT_RA;
((u8 *)&iph[1])[1] = 4;
((u8 *)&iph[1])[2] = 0;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 000e3d239d64..33d5537881ed 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -32,8 +32,8 @@
* At the moment of writing this notes identifier of IP packets is generated
* to be unpredictable using this code only for packets subjected
* (actually or potentially) to defragmentation. I.e. DF packets less than
- * PMTU in size uses a constant ID and do not use this code (see
- * ip_select_ident() in include/net/ip.h).
+ * PMTU in size when local fragmentation is disabled use a constant ID and do
+ * not use this code (see ip_select_ident() in include/net/ip.h).
*
* Route cache entries hold references to our nodes.
* New cache entries get references via lookup by destination IP address in
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9ee17e3d11c3..a04d872c54f9 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
iph->saddr = saddr;
iph->protocol = sk->sk_protocol;
- ip_select_ident(iph, &rt->dst, sk);
+ ip_select_ident(skb, &rt->dst, sk);
if (opt && opt->opt.optlen) {
iph->ihl += opt->opt.optlen>>2;
@@ -386,7 +386,7 @@ packet_routed:
ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
}
- ip_select_ident_more(iph, &rt->dst, sk,
+ ip_select_ident_more(skb, &rt->dst, sk,
(skb_shinfo(skb)->gso_segs ?: 1) - 1);
skb->priority = sk->sk_priority;
@@ -1316,7 +1316,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
else
ttl = ip_select_ttl(inet, &rt->dst);
- iph = (struct iphdr *)skb->data;
+ iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 5;
iph->tos = inet->tos;
@@ -1324,7 +1324,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
ip_copy_addrs(iph, fl4);
- ip_select_ident(iph, &rt->dst, sk);
+ ip_select_ident(skb, &rt->dst, sk);
if (opt) {
iph->ihl += opt->optlen>>2;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9ae54b09254f..62212c772a4b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1658,7 +1658,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
iph->protocol = IPPROTO_IPIP;
iph->ihl = 5;
iph->tot_len = htons(skb->len);
- ip_select_ident(iph, skb_dst(skb), NULL);
+ ip_select_ident(skb, skb_dst(skb), NULL);
ip_send_check(iph);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a86c7ae71881..bfec521c717f 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -387,7 +387,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
iph->check = 0;
iph->tot_len = htons(length);
if (!iph->id)
- ip_select_ident(iph, &rt->dst, NULL);
+ ip_select_ident(skb, &rt->dst, NULL);
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 4a22f3e715df..52f3c6b971d2 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -502,7 +502,9 @@ reset:
* ACKs, wait for troubles.
*/
if (crtt > tp->srtt) {
- inet_csk(sk)->icsk_rto = crtt + max(crtt >> 2, tcp_rto_min(sk));
+ /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
+ crtt >>= 3;
+ inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
} else if (tp->srtt == 0) {
/* RFC6298: 5.7 We've failed to get a valid RTT sample from
* 3WHS. This is most likely due to retransmission,
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index eb1dd4d643f2..b5663c37f089 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -117,7 +117,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
- ip_select_ident(top_iph, dst->child, NULL);
+ ip_select_ident(skb, dst->child, NULL);
top_iph->ttl = ip4_dst_hoplimit(dst->child);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 61355f7f4da5..2d8f4829575b 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1656,9 +1656,9 @@ static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr),
- &parm->raddr) ||
- nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
&parm->laddr) ||
+ nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
+ &parm->raddr) ||
nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
index 61aaf70f376e..2205e8eeeacf 100644
--- a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
@@ -69,8 +69,8 @@ icmpv6_manip_pkt(struct sk_buff *skb,
hdr = (struct icmp6hdr *)(skb->data + hdroff);
l3proto->csum_update(skb, iphdroff, &hdr->icmp6_cksum,
tuple, maniptype);
- if (hdr->icmp6_code == ICMPV6_ECHO_REQUEST ||
- hdr->icmp6_code == ICMPV6_ECHO_REPLY) {
+ if (hdr->icmp6_type == ICMPV6_ECHO_REQUEST ||
+ hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
hdr->icmp6_identifier,
tuple->src.u.icmp.id, 0);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index f77139007983..f2e30fb31e78 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1052,7 +1052,7 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
* Not an artificial restriction anymore, as we must prevent
* possible loops created by swapping in setlist type of sets. */
if (!(from->type->features == to->type->features &&
- from->type->family == to->type->family))
+ from->family == to->family))
return -IPSET_ERR_TYPE_MISMATCH;
strncpy(from_name, from->name, IPSET_MAXNAMELEN);
@@ -1489,8 +1489,7 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
if (ret == -EAGAIN)
ret = 1;
- return (ret < 0 && ret != -ENOTEMPTY) ? ret :
- ret > 0 ? 0 : -IPSET_ERR_EXIST;
+ return ret > 0 ? 0 : -IPSET_ERR_EXIST;
}
/* Get headed data of a set */
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 6fdf88ae2353..dac156f819ac 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -116,12 +116,12 @@ ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
{
int protoff;
u8 nexthdr;
- __be16 frag_off;
+ __be16 frag_off = 0;
nexthdr = ipv6_hdr(skb)->nexthdr;
protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
&frag_off);
- if (protoff < 0)
+ if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
return false;
return get_port(skb, nexthdr, protoff, src, port, proto);
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 57beb1762b2d..707bc520d629 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -325,18 +325,22 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length)
static void
mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length)
{
- u8 i, j;
-
- for (i = 0; i < nets_length - 1 && h->nets[i].cidr != cidr; i++)
- ;
- h->nets[i].nets--;
-
- if (h->nets[i].nets != 0)
- return;
-
- for (j = i; j < nets_length - 1 && h->nets[j].nets; j++) {
- h->nets[j].cidr = h->nets[j + 1].cidr;
- h->nets[j].nets = h->nets[j + 1].nets;
+ u8 i, j, net_end = nets_length - 1;
+
+ for (i = 0; i < nets_length; i++) {
+ if (h->nets[i].cidr != cidr)
+ continue;
+ if (h->nets[i].nets > 1 || i == net_end ||
+ h->nets[i + 1].nets == 0) {
+ h->nets[i].nets--;
+ return;
+ }
+ for (j = i; j < net_end && h->nets[j].nets; j++) {
+ h->nets[j].cidr = h->nets[j + 1].cidr;
+ h->nets[j].nets = h->nets[j + 1].nets;
+ }
+ h->nets[j].nets = 0;
+ return;
}
}
#endif
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index c6a525373be4..f15f3e28b9c3 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -260,7 +260,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
e.ip = htonl(ip);
e.ip2 = htonl(ip2_from & ip_set_hostmask(e.cidr + 1));
ret = adtfn(set, &e, &ext, &ext, flags);
- return ip_set_enomatch(ret, flags, adt) ? 1 :
+ return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
@@ -544,7 +544,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &e, &ext, &ext, flags);
- return ip_set_enomatch(ret, flags, adt) ? 1 :
+ return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index da740ceb56ae..223e9f546d0f 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -199,7 +199,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
e.ip = htonl(ip & ip_set_hostmask(e.cidr));
ret = adtfn(set, &e, &ext, &ext, flags);
- return ip_set_enomatch(ret, flags, adt) ? 1 :
+ return ip_set_enomatch(ret, flags, adt, set) ? -ret:
ip_set_eexist(ret, flags) ? 0 : ret;
}
@@ -396,7 +396,7 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
ret = adtfn(set, &e, &ext, &ext, flags);
- return ip_set_enomatch(ret, flags, adt) ? 1 :
+ return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 84ae6f6ce624..7d798d5d5cd3 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -368,7 +368,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
e.ip = htonl(ip & ip_set_hostmask(e.cidr));
ret = adtfn(set, &e, &ext, &ext, flags);
- return ip_set_enomatch(ret, flags, adt) ? 1 :
+ return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
@@ -634,7 +634,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
ret = adtfn(set, &e, &ext, &ext, flags);
- return ip_set_enomatch(ret, flags, adt) ? 1 :
+ return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 9a0869853be5..09d6690bee6f 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -244,7 +244,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) {
e.ip = htonl(ip & ip_set_hostmask(e.cidr + 1));
ret = adtfn(set, &e, &ext, &ext, flags);
- return ip_set_enomatch(ret, flags, adt) ? 1 :
+ return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
@@ -489,7 +489,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &e, &ext, &ext, flags);
- return ip_set_enomatch(ret, flags, adt) ? 1 :
+ return ip_set_enomatch(ret, flags, adt, set) ? -ret :
ip_set_eexist(ret, flags) ? 0 : ret;
}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index b75ff6429a04..c47444e4cf8c 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -883,7 +883,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
iph->daddr = cp->daddr.ip;
iph->saddr = saddr;
iph->ttl = old_iph->ttl;
- ip_select_ident(iph, &rt->dst, NULL);
+ ip_select_ident(skb, &rt->dst, NULL);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 95a98c8c1da6..ae2e5c11d01a 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -1009,7 +1009,7 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
verdict = NF_DROP;
if (ct)
- nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
+ nfqnl_ct_seq_adjust(entry->skb, ct, ctinfo, diff);
}
if (nfqa[NFQA_MARK])
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 5f2068679f83..98b69bbecdd9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -634,8 +634,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
break;
case ICMP_REDIRECT:
sctp_icmp_redirect(sk, transport, skb);
- err = 0;
- break;
+ /* Fall through to out_unlock. */
default:
goto out_unlock;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index da613ceae28c..e7b2d4fe2b6a 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -183,7 +183,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
break;
case NDISC_REDIRECT:
sctp_icmp_redirect(sk, transport, skb);
- break;
+ goto out_unlock;
default:
break;
}
@@ -204,44 +204,23 @@ out:
in6_dev_put(idev);
}
-/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
{
struct sock *sk = skb->sk;
struct ipv6_pinfo *np = inet6_sk(sk);
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
-
- fl6.flowi6_proto = sk->sk_protocol;
-
- /* Fill in the dest address from the route entry passed with the skb
- * and the source address from the transport.
- */
- fl6.daddr = transport->ipaddr.v6.sin6_addr;
- fl6.saddr = transport->saddr.v6.sin6_addr;
-
- fl6.flowlabel = np->flow_label;
- IP6_ECN_flow_xmit(sk, fl6.flowlabel);
- if (ipv6_addr_type(&fl6.saddr) & IPV6_ADDR_LINKLOCAL)
- fl6.flowi6_oif = transport->saddr.v6.sin6_scope_id;
- else
- fl6.flowi6_oif = sk->sk_bound_dev_if;
-
- if (np->opt && np->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
- fl6.daddr = *rt0->addr;
- }
+ struct flowi6 *fl6 = &transport->fl.u.ip6;
pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
- skb->len, &fl6.saddr, &fl6.daddr);
+ skb->len, &fl6->saddr, &fl6->daddr);
- SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
+ IP6_ECN_flow_xmit(sk, fl6->flowlabel);
if (!(transport->param_flags & SPP_PMTUD_ENABLE))
skb->local_df = 1;
- return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
+ SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
+
+ return ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
}
/* Returns the dst cache entry for the given source and destination ip
@@ -254,10 +233,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
struct dst_entry *dst = NULL;
struct flowi6 *fl6 = &fl->u.ip6;
struct sctp_bind_addr *bp;
+ struct ipv6_pinfo *np = inet6_sk(sk);
struct sctp_sockaddr_entry *laddr;
union sctp_addr *baddr = NULL;
union sctp_addr *daddr = &t->ipaddr;
union sctp_addr dst_saddr;
+ struct in6_addr *final_p, final;
__u8 matchlen = 0;
__u8 bmatchlen;
sctp_scope_t scope;
@@ -281,7 +262,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
pr_debug("src=%pI6 - ", &fl6->saddr);
}
- dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
+ final_p = fl6_update_dst(fl6, np->opt, &final);
+ dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
if (!asoc || saddr)
goto out;
@@ -333,10 +315,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
}
}
rcu_read_unlock();
+
if (baddr) {
fl6->saddr = baddr->v6.sin6_addr;
fl6->fl6_sport = baddr->v6.sin6_port;
- dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
+ final_p = fl6_update_dst(fl6, np->opt, &final);
+ dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
}
out:
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index fcac5d141717..084656671d6e 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1075,6 +1075,15 @@ gss_destroy(struct rpc_auth *auth)
kref_put(&gss_auth->kref, gss_free_callback);
}
+/*
+ * Auths may be shared between rpc clients that were cloned from a
+ * common client with the same xprt, if they also share the flavor and
+ * target_name.
+ *
+ * The auth is looked up from the oldest parent sharing the same
+ * cl_xprt, and the auth itself references only that common parent
+ * (which is guaranteed to last as long as any of its descendants).
+ */
static struct gss_auth *
gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args,
struct rpc_clnt *clnt,
@@ -1088,6 +1097,8 @@ gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args,
gss_auth,
hash,
hashval) {
+ if (gss_auth->client != clnt)
+ continue;
if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor)
continue;
if (gss_auth->target_name != args->target_name) {
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 47016c304c84..66cad506b8a2 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3975,8 +3975,8 @@ sub string_find_replace {
# check for new externs in .h files.
if ($realfile =~ /\.h$/ &&
$line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
- if (WARN("AVOID_EXTERNS",
- "extern prototypes should be avoided in .h files\n" . $herecurr) &&
+ if (CHK("AVOID_EXTERNS",
+ "extern prototypes should be avoided in .h files\n" . $herecurr) &&
$fix) {
$fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
}
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 98969541cbcc..bea523a5d852 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -139,6 +139,18 @@ static int snd_compr_open(struct inode *inode, struct file *f)
static int snd_compr_free(struct inode *inode, struct file *f)
{
struct snd_compr_file *data = f->private_data;
+ struct snd_compr_runtime *runtime = data->stream.runtime;
+
+ switch (runtime->state) {
+ case SNDRV_PCM_STATE_RUNNING:
+ case SNDRV_PCM_STATE_DRAINING:
+ case SNDRV_PCM_STATE_PAUSED:
+ data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
+ break;
+ default:
+ break;
+ }
+
data->stream.ops->free(&data->stream);
kfree(data->stream.runtime->buffer);
kfree(data->stream.runtime);
@@ -837,7 +849,8 @@ static int snd_compress_dev_disconnect(struct snd_device *device)
struct snd_compr *compr;
compr = device->device_data;
- snd_unregister_device(compr->direction, compr->card, compr->device);
+ snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
+ compr->device);
return 0;
}
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index b524f89a1f13..18d972501585 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -111,6 +111,9 @@ enum {
/* 0x0009 - 0x0014 -> 12 test regs */
/* 0x0015 - visibility reg */
+/* Cirrus Logic CS4208 */
+#define CS4208_VENDOR_NID 0x24
+
/*
* Cirrus Logic CS4210
*
@@ -223,6 +226,16 @@ static const struct hda_verb cs_coef_init_verbs[] = {
{} /* terminator */
};
+static const struct hda_verb cs4208_coef_init_verbs[] = {
+ {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
+ {0x24, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */
+ {0x24, AC_VERB_SET_COEF_INDEX, 0x0033},
+ {0x24, AC_VERB_SET_PROC_COEF, 0x0001}, /* A1 ICS */
+ {0x24, AC_VERB_SET_COEF_INDEX, 0x0034},
+ {0x24, AC_VERB_SET_PROC_COEF, 0x1C01}, /* A1 Enable, A Thresh = 300mV */
+ {} /* terminator */
+};
+
/* Errata: CS4207 rev C0/C1/C2 Silicon
*
* http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
@@ -295,6 +308,8 @@ static int cs_init(struct hda_codec *codec)
/* init_verb sequence for C0/C1/C2 errata*/
snd_hda_sequence_write(codec, cs_errata_init_verbs);
snd_hda_sequence_write(codec, cs_coef_init_verbs);
+ } else if (spec->vendor_nid == CS4208_VENDOR_NID) {
+ snd_hda_sequence_write(codec, cs4208_coef_init_verbs);
}
snd_hda_gen_init(codec);
@@ -434,6 +449,29 @@ static const struct hda_pintbl mba42_pincfgs[] = {
{} /* terminator */
};
+static const struct hda_pintbl mba6_pincfgs[] = {
+ { 0x10, 0x032120f0 }, /* HP */
+ { 0x11, 0x500000f0 },
+ { 0x12, 0x90100010 }, /* Speaker */
+ { 0x13, 0x500000f0 },
+ { 0x14, 0x500000f0 },
+ { 0x15, 0x770000f0 },
+ { 0x16, 0x770000f0 },
+ { 0x17, 0x430000f0 },
+ { 0x18, 0x43ab9030 }, /* Mic */
+ { 0x19, 0x770000f0 },
+ { 0x1a, 0x770000f0 },
+ { 0x1b, 0x770000f0 },
+ { 0x1c, 0x90a00090 },
+ { 0x1d, 0x500000f0 },
+ { 0x1e, 0x500000f0 },
+ { 0x1f, 0x500000f0 },
+ { 0x20, 0x500000f0 },
+ { 0x21, 0x430000f0 },
+ { 0x22, 0x430000f0 },
+ {} /* terminator */
+};
+
static void cs420x_fixup_gpio_13(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -556,22 +594,23 @@ static int patch_cs420x(struct hda_codec *codec)
/*
* CS4208 support:
- * Its layout is no longer compatible with CS4206/CS4207, and the generic
- * parser seems working fairly well, except for trivial fixups.
+ * Its layout is no longer compatible with CS4206/CS4207
*/
enum {
+ CS4208_MBA6,
CS4208_GPIO0,
};
static const struct hda_model_fixup cs4208_models[] = {
{ .id = CS4208_GPIO0, .name = "gpio0" },
+ { .id = CS4208_MBA6, .name = "mba6" },
{}
};
static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
/* codec SSID */
- SND_PCI_QUIRK(0x106b, 0x7100, "MacBookPro 6,1", CS4208_GPIO0),
- SND_PCI_QUIRK(0x106b, 0x7200, "MacBookPro 6,2", CS4208_GPIO0),
+ SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+ SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
{} /* terminator */
};
@@ -588,18 +627,35 @@ static void cs4208_fixup_gpio0(struct hda_codec *codec,
}
static const struct hda_fixup cs4208_fixups[] = {
+ [CS4208_MBA6] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = mba6_pincfgs,
+ .chained = true,
+ .chain_id = CS4208_GPIO0,
+ },
[CS4208_GPIO0] = {
.type = HDA_FIXUP_FUNC,
.v.func = cs4208_fixup_gpio0,
},
};
+/* correct the 0dB offset of input pins */
+static void cs4208_fix_amp_caps(struct hda_codec *codec, hda_nid_t adc)
+{
+ unsigned int caps;
+
+ caps = query_amp_caps(codec, adc, HDA_INPUT);
+ caps &= ~(AC_AMPCAP_OFFSET);
+ caps |= 0x02;
+ snd_hda_override_amp_caps(codec, adc, HDA_INPUT, caps);
+}
+
static int patch_cs4208(struct hda_codec *codec)
{
struct cs_spec *spec;
int err;
- spec = cs_alloc_spec(codec, 0); /* no specific w/a */
+ spec = cs_alloc_spec(codec, CS4208_VENDOR_NID);
if (!spec)
return -ENOMEM;
@@ -609,6 +665,12 @@ static int patch_cs4208(struct hda_codec *codec)
cs4208_fixups);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+ snd_hda_override_wcaps(codec, 0x18,
+ get_wcaps(codec, 0x18) | AC_WCAP_STEREO);
+ cs4208_fix_amp_caps(codec, 0x18);
+ cs4208_fix_amp_caps(codec, 0x1b);
+ cs4208_fix_amp_caps(codec, 0x1c);
+
err = cs_parse_auto_config(codec);
if (err < 0)
goto error;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 3d8cd04455a6..7ea0245fc6bd 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1149,32 +1149,43 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
}
static void haswell_config_cvts(struct hda_codec *codec,
- int pin_id, int mux_id)
+ hda_nid_t pin_nid, int mux_idx)
{
struct hdmi_spec *spec = codec->spec;
- struct hdmi_spec_per_pin *per_pin;
- int pin_idx, mux_idx;
- int curr;
- int err;
+ hda_nid_t nid, end_nid;
+ int cvt_idx, curr;
+ struct hdmi_spec_per_cvt *per_cvt;
- for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
- per_pin = get_pin(spec, pin_idx);
+ /* configure all pins, including "no physical connection" ones */
+ end_nid = codec->start_nid + codec->num_nodes;
+ for (nid = codec->start_nid; nid < end_nid; nid++) {
+ unsigned int wid_caps = get_wcaps(codec, nid);
+ unsigned int wid_type = get_wcaps_type(wid_caps);
- if (pin_idx == pin_id)
+ if (wid_type != AC_WID_PIN)
continue;
- curr = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
+ if (nid == pin_nid)
+ continue;
+
+ curr = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_CONNECT_SEL, 0);
+ if (curr != mux_idx)
+ continue;
- /* Choose another unused converter */
- if (curr == mux_id) {
- err = hdmi_choose_cvt(codec, pin_idx, NULL, &mux_idx);
- if (err < 0)
- return;
- snd_printdd("HDMI: choose converter %d for pin %d\n", mux_idx, pin_idx);
- snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
+ /* choose an unassigned converter. The conveters in the
+ * connection list are in the same order as in the codec.
+ */
+ for (cvt_idx = 0; cvt_idx < spec->num_cvts; cvt_idx++) {
+ per_cvt = get_cvt(spec, cvt_idx);
+ if (!per_cvt->assigned) {
+ snd_printdd("choose cvt %d for pin nid %d\n",
+ cvt_idx, nid);
+ snd_hda_codec_write_cache(codec, nid, 0,
AC_VERB_SET_CONNECT_SEL,
- mux_idx);
+ cvt_idx);
+ break;
+ }
}
}
}
@@ -1216,7 +1227,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
/* configure unused pins to choose other converters */
if (is_haswell(codec))
- haswell_config_cvts(codec, pin_idx, mux_idx);
+ haswell_config_cvts(codec, per_pin->pin_nid, mux_idx);
snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bc07d369fac4..0e303b99a47c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3439,6 +3439,9 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
/* Set to manual mode */
val = alc_read_coef_idx(codec, 0x06);
alc_write_coef_idx(codec, 0x06, val & ~0x000c);
+ /* Enable Line1 input control by verb */
+ val = alc_read_coef_idx(codec, 0x1a);
+ alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
break;
}
}
@@ -3531,6 +3534,7 @@ enum {
ALC269VB_FIXUP_ORDISSIMO_EVE2,
ALC283_FIXUP_CHROME_BOOK,
ALC282_FIXUP_ASUS_TX300,
+ ALC283_FIXUP_INT_MIC,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -3790,6 +3794,16 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc282_fixup_asus_tx300,
},
+ [ALC283_FIXUP_INT_MIC] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ {0x20, AC_VERB_SET_COEF_INDEX, 0x1a},
+ {0x20, AC_VERB_SET_PROC_COEF, 0x0011},
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -3874,7 +3888,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
diff --git a/tools/lib/lk/debugfs.c b/tools/lib/lk/debugfs.c
index 099e7cd022e4..7c4347962353 100644
--- a/tools/lib/lk/debugfs.c
+++ b/tools/lib/lk/debugfs.c
@@ -5,7 +5,6 @@
#include <stdbool.h>
#include <sys/vfs.h>
#include <sys/mount.h>
-#include <linux/magic.h>
#include <linux/kernel.h>
#include "debugfs.h"
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
index 9570c2b0f83c..b2519e49424f 100644
--- a/tools/perf/arch/x86/util/tsc.c
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -32,7 +32,7 @@ u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
struct perf_tsc_conversion *tc)
{
- bool cap_usr_time_zero;
+ bool cap_user_time_zero;
u32 seq;
int i = 0;
@@ -42,7 +42,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
tc->time_mult = pc->time_mult;
tc->time_shift = pc->time_shift;
tc->time_zero = pc->time_zero;
- cap_usr_time_zero = pc->cap_usr_time_zero;
+ cap_user_time_zero = pc->cap_user_time_zero;
rmb();
if (pc->lock == seq && !(seq & 1))
break;
@@ -52,7 +52,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
}
}
- if (!cap_usr_time_zero)
+ if (!cap_user_time_zero)
return -EOPNOTSUPP;
return 0;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 423875c999b2..afe377b2884f 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -321,8 +321,6 @@ found:
return perf_event__repipe(tool, event_sw, &sample_sw, machine);
}
-extern volatile int session_done;
-
static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index c2dff9cb1f2c..9b5f077fee5b 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -101,7 +101,7 @@ static int setup_cpunode_map(void)
dir1 = opendir(PATH_SYS_NODE);
if (!dir1)
- return -1;
+ return 0;
while ((dent1 = readdir(dir1)) != NULL) {
if (dent1->d_type != DT_DIR ||
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8e50d8d77419..72eae7498c09 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -401,8 +401,6 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
return 0;
}
-extern volatile int session_done;
-
static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
@@ -568,6 +566,9 @@ static int __cmd_report(struct perf_report *rep)
}
}
+ if (session_done())
+ return 0;
+
if (nr_samples == 0) {
ui__error("The %s file has no samples!\n", session->filename);
return 0;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 7f31a3ded1b6..9c333ff3dfeb 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -553,8 +553,6 @@ static struct perf_tool perf_script = {
.ordering_requires_timestamps = true,
};
-extern volatile int session_done;
-
static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f5aa6375e3e9..71aa3e35406b 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -16,6 +16,23 @@
#include <sys/mman.h>
#include <linux/futex.h>
+/* For older distros: */
+#ifndef MAP_STACK
+# define MAP_STACK 0x20000
+#endif
+
+#ifndef MADV_HWPOISON
+# define MADV_HWPOISON 100
+#endif
+
+#ifndef MADV_MERGEABLE
+# define MADV_MERGEABLE 12
+#endif
+
+#ifndef MADV_UNMERGEABLE
+# define MADV_UNMERGEABLE 13
+#endif
+
static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
unsigned long arg,
u8 arg_idx __maybe_unused,
@@ -1038,6 +1055,7 @@ static int trace__replay(struct trace *trace)
trace->tool.sample = trace__process_sample;
trace->tool.mmap = perf_event__process_mmap;
+ trace->tool.mmap2 = perf_event__process_mmap2;
trace->tool.comm = perf_event__process_comm;
trace->tool.exit = perf_event__process_exit;
trace->tool.fork = perf_event__process_fork;
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 214e17e97e5c..5f6f9b3271bb 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -87,7 +87,7 @@ CFLAGS += -Wall
CFLAGS += -Wextra
CFLAGS += -std=gnu99
-EXTLIBS = -lelf -lpthread -lrt -lm
+EXTLIBS = -lelf -lpthread -lrt -lm -ldl
ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
CFLAGS += -fstack-protector-all
@@ -180,6 +180,9 @@ FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
CFLAGS += -DLIBELF_MMAP
endif
+ifeq ($(call try-cc,$(SOURCE_ELF_GETPHDRNUM),$(FLAGS_LIBELF),-DHAVE_ELF_GETPHDRNUM),y)
+ CFLAGS += -DHAVE_ELF_GETPHDRNUM
+endif
# include ARCH specific config
-include $(src-perf)/arch/$(ARCH)/Makefile
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
index 708fb8e9822a..d5a8dd44945f 100644
--- a/tools/perf/config/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -61,6 +61,15 @@ int main(void)
}
endef
+define SOURCE_ELF_GETPHDRNUM
+#include <libelf.h>
+int main(void)
+{
+ size_t dst;
+ return elf_getphdrnum(0, &dst);
+}
+endef
+
ifndef NO_SLANG
define SOURCE_SLANG
#include <slang.h>
@@ -210,6 +219,7 @@ define SOURCE_LIBAUDIT
int main(void)
{
+ printf(\"error message: %s\n\", audit_errno_to_name(0));
return audit_open();
}
endef
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index bfc5a27597d6..7eae5488ecea 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -809,7 +809,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
end = map__rip_2objdump(map, sym->end);
offset = line_ip - start;
- if (offset < 0 || (u64)line_ip > end)
+ if ((u64)line_ip < start || (u64)line_ip > end)
offset = -1;
else
parsed_line = tmp2 + 1;
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 3e5f5430a28a..e23bde19d590 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -263,6 +263,21 @@ bool die_is_signed_type(Dwarf_Die *tp_die)
}
/**
+ * die_is_func_def - Ensure that this DIE is a subprogram and definition
+ * @dw_die: a DIE
+ *
+ * Ensure that this DIE is a subprogram and NOT a declaration. This
+ * returns true if @dw_die is a function definition.
+ **/
+bool die_is_func_def(Dwarf_Die *dw_die)
+{
+ Dwarf_Attribute attr;
+
+ return (dwarf_tag(dw_die) == DW_TAG_subprogram &&
+ dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
+}
+
+/**
* die_get_data_member_location - Get the data-member offset
* @mb_die: a DIE of a member of a data structure
* @offs: The offset of the member in the data structure
@@ -392,6 +407,10 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
{
struct __addr_die_search_param *ad = data;
+ /*
+ * Since a declaration entry doesn't has given pc, this always returns
+ * function definition entry.
+ */
if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
dwarf_haspc(fn_die, ad->addr)) {
memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 6ce1717784b7..8658d41697d2 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -38,6 +38,9 @@ extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
int (*callback)(Dwarf_Die *, void *), void *data);
+/* Ensure that this DIE is a subprogram and definition (not declaration) */
+extern bool die_is_func_def(Dwarf_Die *dw_die);
+
/* Compare diename and tname */
extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 26441d0e571b..ce69901176d8 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -199,9 +199,11 @@ static int write_buildid(char *name, size_t name_len, u8 *build_id,
return write_padded(fd, name, name_len + 1, len);
}
-static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
- u16 misc, int fd)
+static int __dsos__write_buildid_table(struct list_head *head,
+ struct machine *machine,
+ pid_t pid, u16 misc, int fd)
{
+ char nm[PATH_MAX];
struct dso *pos;
dsos__for_each_with_build_id(pos, head) {
@@ -215,6 +217,10 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
if (is_vdso_map(pos->short_name)) {
name = (char *) VDSO__MAP_NAME;
name_len = sizeof(VDSO__MAP_NAME) + 1;
+ } else if (dso__is_kcore(pos)) {
+ machine__mmap_name(machine, nm, sizeof(nm));
+ name = nm;
+ name_len = strlen(nm) + 1;
} else {
name = pos->long_name;
name_len = pos->long_name_len + 1;
@@ -240,10 +246,10 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
umisc = PERF_RECORD_MISC_GUEST_USER;
}
- err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
- kmisc, fd);
+ err = __dsos__write_buildid_table(&machine->kernel_dsos, machine,
+ machine->pid, kmisc, fd);
if (err == 0)
- err = __dsos__write_buildid_table(&machine->user_dsos,
+ err = __dsos__write_buildid_table(&machine->user_dsos, machine,
machine->pid, umisc, fd);
return err;
}
@@ -375,23 +381,31 @@ out_free:
return err;
}
-static int dso__cache_build_id(struct dso *dso, const char *debugdir)
+static int dso__cache_build_id(struct dso *dso, struct machine *machine,
+ const char *debugdir)
{
bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
bool is_vdso = is_vdso_map(dso->short_name);
+ char *name = dso->long_name;
+ char nm[PATH_MAX];
- return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
- dso->long_name, debugdir,
- is_kallsyms, is_vdso);
+ if (dso__is_kcore(dso)) {
+ is_kallsyms = true;
+ machine__mmap_name(machine, nm, sizeof(nm));
+ name = nm;
+ }
+ return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
+ debugdir, is_kallsyms, is_vdso);
}
-static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
+static int __dsos__cache_build_ids(struct list_head *head,
+ struct machine *machine, const char *debugdir)
{
struct dso *pos;
int err = 0;
dsos__for_each_with_build_id(pos, head)
- if (dso__cache_build_id(pos, debugdir))
+ if (dso__cache_build_id(pos, machine, debugdir))
err = -1;
return err;
@@ -399,8 +413,9 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
{
- int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
- ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
+ int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine,
+ debugdir);
+ ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir);
return ret;
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 46a0d35a05e1..9ff6cf3e9a99 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -611,6 +611,8 @@ void hists__collapse_resort(struct hists *hists)
next = rb_first(root);
while (next) {
+ if (session_done())
+ break;
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 933d14f287ca..6188d2876a71 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -792,7 +792,7 @@ static int machine__create_modules(struct machine *machine)
modules = path;
}
- if (symbol__restricted_filename(path, "/proc/modules"))
+ if (symbol__restricted_filename(modules, "/proc/modules"))
return -1;
file = fopen(modules, "r");
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index be0329394d56..371476cb8ddc 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -118,7 +118,6 @@ static const Dwfl_Callbacks offline_callbacks = {
static int debuginfo__init_offline_dwarf(struct debuginfo *self,
const char *path)
{
- Dwfl_Module *mod;
int fd;
fd = open(path, O_RDONLY);
@@ -129,11 +128,11 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *self,
if (!self->dwfl)
goto error;
- mod = dwfl_report_offline(self->dwfl, "", "", fd);
- if (!mod)
+ self->mod = dwfl_report_offline(self->dwfl, "", "", fd);
+ if (!self->mod)
goto error;
- self->dbg = dwfl_module_getdwarf(mod, &self->bias);
+ self->dbg = dwfl_module_getdwarf(self->mod, &self->bias);
if (!self->dbg)
goto error;
@@ -676,37 +675,42 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
}
/* Convert subprogram DIE to trace point */
-static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr,
- bool retprobe, struct probe_trace_point *tp)
+static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
+ Dwarf_Addr paddr, bool retprobe,
+ struct probe_trace_point *tp)
{
Dwarf_Addr eaddr, highaddr;
- const char *name;
-
- /* Copy the name of probe point */
- name = dwarf_diename(sp_die);
- if (name) {
- if (dwarf_entrypc(sp_die, &eaddr) != 0) {
- pr_warning("Failed to get entry address of %s\n",
- dwarf_diename(sp_die));
- return -ENOENT;
- }
- if (dwarf_highpc(sp_die, &highaddr) != 0) {
- pr_warning("Failed to get end address of %s\n",
- dwarf_diename(sp_die));
- return -ENOENT;
- }
- if (paddr > highaddr) {
- pr_warning("Offset specified is greater than size of %s\n",
- dwarf_diename(sp_die));
- return -EINVAL;
- }
- tp->symbol = strdup(name);
- if (tp->symbol == NULL)
- return -ENOMEM;
- tp->offset = (unsigned long)(paddr - eaddr);
- } else
- /* This function has no name. */
- tp->offset = (unsigned long)paddr;
+ GElf_Sym sym;
+ const char *symbol;
+
+ /* Verify the address is correct */
+ if (dwarf_entrypc(sp_die, &eaddr) != 0) {
+ pr_warning("Failed to get entry address of %s\n",
+ dwarf_diename(sp_die));
+ return -ENOENT;
+ }
+ if (dwarf_highpc(sp_die, &highaddr) != 0) {
+ pr_warning("Failed to get end address of %s\n",
+ dwarf_diename(sp_die));
+ return -ENOENT;
+ }
+ if (paddr > highaddr) {
+ pr_warning("Offset specified is greater than size of %s\n",
+ dwarf_diename(sp_die));
+ return -EINVAL;
+ }
+
+ /* Get an appropriate symbol from symtab */
+ symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
+ if (!symbol) {
+ pr_warning("Failed to find symbol at 0x%lx\n",
+ (unsigned long)paddr);
+ return -ENOENT;
+ }
+ tp->offset = (unsigned long)(paddr - sym.st_value);
+ tp->symbol = strdup(symbol);
+ if (!tp->symbol)
+ return -ENOMEM;
/* Return probe must be on the head of a subprogram */
if (retprobe) {
@@ -734,7 +738,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
}
/* If not a real subprogram, find a real one */
- if (dwarf_tag(sc_die) != DW_TAG_subprogram) {
+ if (!die_is_func_def(sc_die)) {
if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
pr_warning("Failed to find probe point in any "
"functions.\n");
@@ -980,12 +984,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
struct dwarf_callback_param *param = data;
struct probe_finder *pf = param->data;
struct perf_probe_point *pp = &pf->pev->point;
- Dwarf_Attribute attr;
/* Check tag and diename */
- if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
- !die_compare_name(sp_die, pp->function) ||
- dwarf_attr(sp_die, DW_AT_declaration, &attr))
+ if (!die_is_func_def(sp_die) ||
+ !die_compare_name(sp_die, pp->function))
return DWARF_CB_OK;
/* Check declared file */
@@ -1151,7 +1153,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
tev = &tf->tevs[tf->ntevs++];
/* Trace point should be converted from subprogram DIE */
- ret = convert_to_trace_point(&pf->sp_die, pf->addr,
+ ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
pf->pev->point.retprobe, &tev->point);
if (ret < 0)
return ret;
@@ -1183,7 +1185,7 @@ int debuginfo__find_trace_events(struct debuginfo *self,
{
struct trace_event_finder tf = {
.pf = {.pev = pev, .callback = add_probe_trace_event},
- .max_tevs = max_tevs};
+ .mod = self->mod, .max_tevs = max_tevs};
int ret;
/* Allocate result tevs array */
@@ -1252,7 +1254,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
vl = &af->vls[af->nvls++];
/* Trace point should be converted from subprogram DIE */
- ret = convert_to_trace_point(&pf->sp_die, pf->addr,
+ ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr,
pf->pev->point.retprobe, &vl->point);
if (ret < 0)
return ret;
@@ -1291,6 +1293,7 @@ int debuginfo__find_available_vars_at(struct debuginfo *self,
{
struct available_var_finder af = {
.pf = {.pev = pev, .callback = add_available_vars},
+ .mod = self->mod,
.max_vls = max_vls, .externs = externs};
int ret;
@@ -1474,7 +1477,7 @@ static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
return 0;
}
-/* Search function from function name */
+/* Search function definition from function name */
static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
{
struct dwarf_callback_param *param = data;
@@ -1485,7 +1488,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die)))
return DWARF_CB_OK;
- if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
+ if (die_is_func_def(sp_die) &&
die_compare_name(sp_die, lr->function)) {
lf->fname = dwarf_decl_file(sp_die);
dwarf_decl_line(sp_die, &lr->offset);
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 17e94d0c36f9..3b7d63018960 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -23,6 +23,7 @@ static inline int is_c_varname(const char *name)
/* debug information structure */
struct debuginfo {
Dwarf *dbg;
+ Dwfl_Module *mod;
Dwfl *dwfl;
Dwarf_Addr bias;
};
@@ -77,6 +78,7 @@ struct probe_finder {
struct trace_event_finder {
struct probe_finder pf;
+ Dwfl_Module *mod; /* For solving symbols */
struct probe_trace_event *tevs; /* Found trace events */
int ntevs; /* Number of trace events */
int max_tevs; /* Max number of trace events */
@@ -84,6 +86,7 @@ struct trace_event_finder {
struct available_var_finder {
struct probe_finder pf;
+ Dwfl_Module *mod; /* For solving symbols */
struct variable_list *vls; /* Found variable lists */
int nvls; /* Number of variable lists */
int max_vls; /* Max no. of variable lists */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 51f5edf2a6d0..70ffa41518f3 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -531,6 +531,9 @@ static int flush_sample_queue(struct perf_session *s,
return 0;
list_for_each_entry_safe(iter, tmp, head, list) {
+ if (session_done())
+ return 0;
+
if (iter->timestamp > limit)
break;
@@ -1160,7 +1163,6 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
}
}
-#define session_done() (*(volatile int *)(&session_done))
volatile int session_done;
static int __perf_session__process_pipe_events(struct perf_session *self,
@@ -1372,10 +1374,13 @@ more:
"Processing events...");
}
+ err = 0;
+ if (session_done())
+ goto out_err;
+
if (file_pos < file_size)
goto more;
- err = 0;
/* do the final flush for ordered samples */
session->ordered_samples.next_flush = ULLONG_MAX;
err = flush_sample_queue(session, tool);
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 3aa75fb2225f..04bf7373a7e5 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -124,4 +124,8 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
#define perf_session__set_tracepoints_handlers(session, array) \
__perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
+
+extern volatile int session_done;
+
+#define session_done() (*(volatile int *)(&session_done))
#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index a7b9ab557380..a9c829be5216 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -8,6 +8,22 @@
#include "symbol.h"
#include "debug.h"
+#ifndef HAVE_ELF_GETPHDRNUM
+static int elf_getphdrnum(Elf *elf, size_t *dst)
+{
+ GElf_Ehdr gehdr;
+ GElf_Ehdr *ehdr;
+
+ ehdr = gelf_getehdr(elf, &gehdr);
+ if (!ehdr)
+ return -1;
+
+ *dst = ehdr->e_phnum;
+
+ return 0;
+}
+#endif
+
#ifndef NT_GNU_BUILD_ID
#define NT_GNU_BUILD_ID 3
#endif
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index fe7a27d67d2b..e9e1c03f927d 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -186,7 +186,7 @@ void parse_proc_kallsyms(struct pevent *pevent,
char *next = NULL;
char *addr_str;
char *mod;
- char *fmt;
+ char *fmt = NULL;
line = strtok_r(file, "\n", &next);
while (line) {
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index ea475cd03511..8a39dda7a325 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -101,8 +101,11 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
typeof(*work), queue);
cancel_work_sync(&work->work);
list_del(&work->queue);
- if (!work->done) /* work was canceled */
+ if (!work->done) { /* work was canceled */
+ mmdrop(work->mm);
+ kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
+ }
}
spin_lock(&vcpu->async_pf.lock);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index bf040c4e02b3..979bff485fb0 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1058,11 +1058,15 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
EXPORT_SYMBOL_GPL(gfn_to_hva);
/*
- * The hva returned by this function is only allowed to be read.
- * It should pair with kvm_read_hva() or kvm_read_hva_atomic().
+ * If writable is set to false, the hva returned by this function is only
+ * allowed to be read.
*/
-static unsigned long gfn_to_hva_read(struct kvm *kvm, gfn_t gfn)
+unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
{
+ struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
+ if (writable)
+ *writable = !memslot_is_readonly(slot);
+
return __gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL, false);
}
@@ -1430,7 +1434,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int r;
unsigned long addr;
- addr = gfn_to_hva_read(kvm, gfn);
+ addr = gfn_to_hva_prot(kvm, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
r = kvm_read_hva(data, (void __user *)addr + offset, len);
@@ -1468,7 +1472,7 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
gfn_t gfn = gpa >> PAGE_SHIFT;
int offset = offset_in_page(gpa);
- addr = gfn_to_hva_read(kvm, gfn);
+ addr = gfn_to_hva_prot(kvm, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
pagefault_disable();